diff --git "a/2878.jsonl" "b/2878.jsonl" new file mode 100644--- /dev/null +++ "b/2878.jsonl" @@ -0,0 +1,733 @@ +{"seq_id":"349850394","text":"import os\nimport numpy as np\nfrom babilim import warn, info\n\n\nclass Checkpoint(object):\n def __init__(self, checkpoint_path):\n self.checkoint_path = checkpoint_path\n self.data = {}\n if os.path.exists(checkpoint_path):\n self.data = np.load(checkpoint_path, allow_pickle=False)\n try:\n if \"model_state_dict\" in self.data:\n raise ValueError\n except ValueError:\n warn(\"Checkpoint format deprecated. Save this checkpoint to update the format.\")\n self.data = {}\n checkpoint = np.load(checkpoint_path, allow_pickle=True)\n if \"epoch\" in checkpoint:\n self.set_epoch(checkpoint[\"epoch\"][()])\n if \"model_state_dict\" in checkpoint:\n self.set_state_dict(\"model\", checkpoint[\"model_state_dict\"][()])\n if \"optimizer_state_dict\" in checkpoint:\n self.set_state_dict(\"optimizer\", checkpoint[\"optimizer_state_dict\"][()])\n if \"loss_state_dict\" in checkpoint:\n self.set_state_dict(\"loss\", checkpoint[\"loss_state_dict\"][()])\n if \"metrics_state_dict\" in checkpoint:\n self.set_state_dict(\"metrics\", checkpoint[\"metrics_state_dict\"][()])\n if \"lr_schedule_state_dict\" in checkpoint:\n self.set_state_dict(\"lr_schedule\", checkpoint[\"lr_schedule_state_dict\"][()])\n\n def print(self):\n info(\"Checkpoint: {}\".format(self.checkoint_path))\n for k in self.data:\n if isinstance(self.data[k], np.ndarray):\n info(\" {}: {}\".format(k, self.data[k].shape))\n else:\n info(\" {}: {}\".format(k, self.data[k]))\n\n def save(self, checkpoint_path=None):\n if checkpoint_path is None:\n checkpoint_path = self.checkoint_path\n np.savez_compressed(checkpoint_path, **self.data)\n\n def set_epoch(self, epoch):\n self.data[\"epoch\"] = epoch\n\n def get_epoch(self):\n return self.data[\"epoch\"]\n\n def get_state_dict(self, prefix):\n tmp = self.data\n tmp = {\"{}\".format(\"/\".join(k.split(\"/\")[1:])): tmp[k] for k in tmp if k.startswith(prefix)}\n return tmp\n\n def set_state_dict(self, prefix, state_dict):\n tmp = {\"{}/{}\".format(prefix, k): state_dict[k] for k in state_dict}\n self.data.update(tmp)\n","sub_path":"babilim/core/checkpoint.py","file_name":"checkpoint.py","file_ext":"py","file_size_in_byte":2430,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"53372948","text":"from dijkstra import Dijkstra\nimport pandas as pd\nimport numpy as np\n\n\nmat = pd.read_csv(\"example.csv\", index_col = 0)\nmat = np.array(mat)\n\nmat_size = len(mat)\n\nwhile(1):\n start, finish = map(int, input().split())\n w, p = Dijkstra(mat_size, start, mat)\n \n path = []\n ans = 0\n while(finish != start):\n path.append(finish)\n ans += mat[p[finish]][finish]\n finish = p[finish]\n\n path = path[::-1]\n \n print(\"weight path\")\n print(ans)\n print(\"weight\")\n print(w)\n print(\"path\")\n print(path)\n print()\n","sub_path":"test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":558,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"216799214","text":"import os\nimport platform\nimport random\nimport threading\nfrom tkinter import *\nimport Board\nimport xml.dom.minidom\n\ndef thread_function(client):\n \"\"\"\n Keepalive topic publisher, in very 30sec to the broker.\n \"\"\"\n threading.Timer(30.0, thread_function).start()\n client.publish(\"serverCommand/keepalive\", \"0\")\n print(\"Message Sent. (keepalive)\")\n\n\nclass PuzzleGame:\n \"\"\"\n Puzzle Game main object, with the game parameters, and methods.\n \"\"\"\n\n def __init__(self, parent, client):\n \"\"\"\n Constructor of the Puzzle Game, with\n a parent parameter, which is the root Tk() object.\n \"\"\"\n self.parent = parent\n self.client = client\n self.grid = 3 # 3x3 board game, for picture cutting.\n # self.send_config_xml_to_broker() # send the xml config file about this game to the mqtt broker.\n self.board = Frame(self.parent)\n self.start()\n # keepalive topic writer thread in every 30sec, to the broker (this device is online)\n # keepalive_thread = threading.Thread(target=thread_function, args=(self.client,))\n # keepalive_thread.start()\n\n def send_config_xml_to_broker(self):\n \"\"\"\n Read the config xml, convert to string, and send it to the mqtt broker.\n \"\"\"\n # xmlObject = xml.dom.minidom.parse(\"config_setup.xml\")\n # pretty_xml_as_string = xmlObject.toprettyxml()\n # print(pretty_xml_as_string)\n \"\"\"\n Read the config xml, convert to string, and send it to the mqtt broker.\n \"\"\"\n xmlObject = xml.dom.minidom.parse(\"config_setup.xml\")\n pretty_xml_as_string = xmlObject.toprettyxml()\n self.client.publish(\"users/everyone/inbox/server/deviceList\", pretty_xml_as_string)\n print(\"XML config sent.\")\n\n def pick_random_picture(self):\n \"\"\"\n Pick a random picture from the project script path's picture folder.\n Return with the path of the picture.\n \"\"\"\n path_project = os.path.dirname(os.path.realpath(__file__))\n system = platform.system()\n if system == 'Linux' or system == 'Darwin':\n path_pictures = path_project + \"/pictures\"\n return str(path_pictures + \"/\" + self.random_picture(path_pictures))\n elif system == 'Windows':\n path_pictures = path_project + \"\\\\pictures\"\n return str(path_pictures + \"\\\\\" + self.random_picture(path_pictures))\n\n def random_picture(self, place):\n \"\"\"\n Pick a random picture from the directory of \"place\" parameter.\n Return with the picked file name with extension. For example: Picture.jpg\n \"\"\"\n random_filename = random.choice([\n x for x in os.listdir(place)\n if os.path.isfile(os.path.join(place, x))\n ])\n return random_filename\n\n def start(self):\n \"\"\"\n Start the game, Initialize the board and start.\n \"\"\"\n image = self.pick_random_picture()\n grid = self.grid\n if os.path.exists(image):\n self.board = Board.Board(parent=self.parent, image=image, grid=grid, win=self.win)\n self.board.pack()\n\n def win(self, moves):\n \"\"\"\n moves - parameter number of moves until the win.\n Handle the end of the game, write out the steps and the winning message,\n and start a new game with a new random picture.\n \"\"\"\n self.board.pack_forget()\n win_text = (\"You are win, with {0} moves.\".format(moves))\n print(win_text)\n self.play_again()\n\n def play_again(self):\n \"\"\"\n Restart the game.\n \"\"\"\n self.start()\n","sub_path":"PuzzleGame.py","file_name":"PuzzleGame.py","file_ext":"py","file_size_in_byte":3654,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"233025949","text":"import pyaudio\nimport wave\nsound1 = wave.open(\"sound3.wav\", 'rb')\nsound2 = wave.open(\"sound2.wav\", 'rb')\n\ndef callback(in_data, frame_count, time_info, status):\n data1 = sound1.readframes(frame_count)\n data2 = sound2.readframes(frame_count)\n decodeddata1 = numpy.fromstring(data1, numpy.int16)\n decodeddata2 = numpy.fromstring(data2, numpy.int16)\n newdata = (decodeddata1 * 0.5 + decodeddata2* 0.5).astype(numpy.int16)\n return (result.tostring(), pyaudio.paContinue)\n","sub_path":"multi_audio1.py","file_name":"multi_audio1.py","file_ext":"py","file_size_in_byte":485,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"194576896","text":"from flask import Flask, render_template\nfrom flask_user import SQLAlchemyAdapter, UserManager\nfrom flask_mail import Mail\nfrom scheman.database import db, db_seed, compute_freehours\nimport click\n\napp = Flask(__name__)\napp.config.from_object('settings')\napp.config.from_envvar('SCHEMAN_SETTINGS', silent=True)\n\n# Database\n\ndb.init_app(app)\n\n# Mail\n\nmain = Mail(app)\n\nfrom scheman.database.models import User\n\n# User management\n\ndb_adapter = SQLAlchemyAdapter(db, User)\nuser_manager = UserManager(db_adapter, app)\n\n# Shut down database\n\n@app.teardown_appcontext\ndef shutdown_session(exception=None):\n db.session.remove()\n\n# Command line\n\n@app.cli.command()\ndef migrate():\n \"\"\"Initialize the database.\"\"\"\n db.create_all()\n click.echo('Database migrated')\n\n@app.cli.command()\ndef seed():\n \"\"\"Seed the database\"\"\"\n db_seed()\n click.echo('Database seeded')\n\n@app.cli.command()\ndef free():\n \"\"\"Compute the free hours\"\"\"\n click.echo('Computing... please wait')\n compute_freehours()\n click.echo('Done!')\n\n# Load views\n\nimport scheman.views\nimport scheman.views.api\n","sub_path":"scheman/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":1090,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"639109587","text":"import math\n\nfrom util import knmidata, geocoding\n\n\nLOCATION_START = \"Utrecht, Netherlands\"\nLOCATION_END = \"Nieuwegein, Netherlands\"\n\n\ndef test():\n bearing_rad, distance_m = geocoding.get_bearing_between_locations_rad(LOCATION_START, LOCATION_END)\n bearing_deg = bearing_rad * 180.0/math.pi\n stations = knmidata.get_actual_stations()\n for station in stations:\n print(station)\n station = stations[0]\n print(station)\n wind_direction_deg = station.wind_direction\n print(wind_direction_deg)\n print(bearing_deg)\n print(bearing_deg+wind_direction_deg)\n\n\nif __name__ == \"__main__\":\n test()\n","sub_path":"util/winddirection.py","file_name":"winddirection.py","file_ext":"py","file_size_in_byte":623,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"600038134","text":"n=int(input())\nm=int(input())\n\narray=[[] for _ in range(n+1)]\nswitch=[0]*(n+1)\n\n\nfor i in range(m):\n x,y=map(int,input().split())\n array[x].append(y)\n array[y].append(x)\n\ndef dfs(i):\n switch[i]=1\n for j in array[i]:\n if not(switch[j]):\n dfs(j)\n\ndfs(1)\nprint(switch.count(1)-1)\n","sub_path":"백준/Python/2609.py","file_name":"2609.py","file_ext":"py","file_size_in_byte":310,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"92275594","text":"\"\"\"Blogs URL Configuration\n\nThe `urlpatterns` list routes URLs to views. For more information please see:\n https://docs.djangoproject.com/en/1.11/topics/http/urls/\nExamples:\nFunction views\n 1. Add an import: from my_app import views\n 2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')\nClass-based views\n 1. Add an import: from other_app.views import Home\n 2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')\nIncluding another URLconf\n 1. Import the include() function: from django.conf.urls import url, include\n 2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))\n\"\"\"\nfrom django.conf.urls import url\n\nfrom bm import views\n\nurlpatterns = [\n\t# 博客管理\n\turl(r'^blog/list/$', views.blog_list, name='blog_list'),\n\turl(r'^blog/add/$', views.blog_change, name='blog_add'),\n\turl(r'^blog/edit/(\\d+)$', views.blog_change, name='blog_edit'),\n\turl(r'^(blog|article)/delete/(\\d+)$', views.delete, name='delete'),\n\t# 文章管理\n\turl(r'^article/list/$', views.article_list, name='article_list'),\n\turl(r'^article/add/$', views.article_change, name='article_add'),\n\turl(r'^article/edit/(\\d+)$', views.article_change, name='article_edit'),\n\n]\n","sub_path":"练习/练习题/test/test6 博客管理+权限组件/Blogs/bm/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1204,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"236636101","text":"\"\"\"\nwinda jest 8-osobowa\nna dole stoi n ludzi\nna gorze m ludzi\n\nile kursow musi zrobic winda?\n\"\"\"\n\nn = int(input(\"To ile ludzi stoi na dole?\\nn: \"))\nm = int(input(\"A na gorze?\\nm: \"))\nx = 0\n\nif n<0 or m<0:\n print(\"Bledne n albo m. :(\")\n exit()\n\nif (n//8 <= m//8) and m!=0:\n x+=1\n\n x+=(((m//8)+int(bool(m%8)))*2)-1\nelif n!=0:\n x=((n//8)+int(bool(n%8)))*2-1\nelse:\n x=0\n\nprint(x)","sub_path":"PPPD pierwsze zajecia punktowane/zadanie_1_06.py","file_name":"zadanie_1_06.py","file_ext":"py","file_size_in_byte":394,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"644267083","text":"import datetime\nimport os\nfrom csv import DictReader\nfrom csv import DictWriter\n\nfrom termcolor import colored\nfrom terminaltables import DoubleTable\n\nDONE = 'done'\nANALYZE = 'analyze'\nPROGREES = 'in progres'\nNEW = 'new'\n\nTASK_IDX = 1\nSTATUS_IDX = 2\nDATE_IDX = 3\n\nTASK_STATUSES_COLOR = (\n (DONE, 'green'),\n (ANALYZE, 'blue'),\n (PROGREES, 'yellow'),\n (NEW, 'red'),\n)\nHEADER = ('id', 'task', 'status', 'date')\n\nBASE_DIR = '/'.join(__file__.split('/')[:-1])\nPRJ_DIR_NAME = '.prjs'\n\n\ndef _make_prjs_dir():\n prj_path = os.path.join(BASE_DIR, PRJ_DIR_NAME)\n if not os.path.exists(prj_path):\n os.mkdir(prj_path)\n\n\ndef init_prj(prj_name: str):\n prj_file_name = '.'.join((prj_name, 'csv'))\n file_path = os.path.join(BASE_DIR, PRJ_DIR_NAME, prj_file_name)\n if not os.path.exists(file_path):\n os.mknod(file_path)\n\n\ndef add_task(prj_name: str, task: str):\n prj_file_name = '.'.join((prj_name, 'csv'))\n file_path = os.path.join(BASE_DIR, PRJ_DIR_NAME, prj_file_name)\n\n task_row = {\n 'task': task,\n 'status': 'new',\n 'date': datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')\n }\n with open(file_path, 'a') as project:\n writer = DictWriter(project, fieldnames=HEADER, delimiter=';')\n writer.writerow(task_row)\n\n\ndef _status_color(status: str):\n status_color = dict(TASK_STATUSES_COLOR)\n return colored(status, status_color[status])\n\n\ndef list_task(prj_name: str):\n table_data = []\n colord_header = [\n colored(h.capitalize(), 'magenta', attrs=['bold', 'underline'])\n for h in HEADER\n ]\n table_data.append(colord_header)\n prj_file_name = '.'.join((prj_name, 'csv'))\n file_path = os.path.join(BASE_DIR, PRJ_DIR_NAME, prj_file_name)\n with open(file_path, 'r') as f_table:\n reader = DictReader(\n f_table,\n fieldnames=HEADER,\n delimiter=';'\n )\n t_row = []\n for row in reader:\n t_row.append(row['task'])\n t_row.append(_status_color(row['status']))\n t_row.append(row['date'])\n table_data.append(t_row)\n t_row = []\n table = DoubleTable(table_data)\n table.title = \"Список дел\"\n print(table.table)\n\n\ndef main():\n while True:\n cmd = input('Input command: ')\n print(cmd)\n if cmd == 'init':\n prj_name = input('Input project name: ')\n init_prj(prj_name)\n print('Project is created!')\n elif cmd == 'add':\n prj_name = input('Input project name: ')\n task = input('Input task: ')\n add_task(prj_name, task)\n print('Task is created!')\n elif cmd == 'list':\n prj_name = input('Input project name: ')\n list_task(prj_name)\n elif cmd == 'exit':\n break\n else:\n print('I don\\'t know this command =/')\n print('Try this instead:')\n print('list')\n print('add')\n print('init')\n\n\n# if __name__ == '__main__':\n# try:\n# main()\n# except KeyboardInterrupt:\n# print('\\nGood bye!')\n\ndef show_table(tasks):\n table_data = []\n colored_header = [\n colored(h.capitalize(), 'magenta', attrs=['bold', 'underline'])\n for h in HEADER\n ]\n table_data.append(colored_header)\n t_row = []\n for task in tasks:\n t_row.append(task['id'])\n t_row.append(task['task'])\n t_row.append(_status_color(task['status']))\n t_row.append(task['date'])\n table_data.append(t_row)\n t_row = []\n table = DoubleTable(table_data)\n table.title = \"Список дел\"\n print(table.table)\n\n\nclass Project:\n\n def __init__(self, project_name):\n self.name = project_name\n self._saved_tasks = []\n self._new_tasks = []\n prj_file_name = '.'.join((self.name, 'csv'))\n file_path = os.path.join(BASE_DIR, PRJ_DIR_NAME, prj_file_name)\n with open(file_path, 'r') as f_table:\n reader = DictReader(\n f_table,\n fieldnames=HEADER,\n delimiter=';'\n )\n for row in reader:\n t_row = {\n 'id': row['id'],\n 'task': row['task'],\n 'status': row['status'],\n 'date': row['date'],\n }\n self._saved_tasks.append(t_row)\n self.current_id = max([int(i['id']) for i in self._saved_tasks])\n\n def get_task_by_keyword(self, task_keyword):\n return [\n task\n for task in self._saved_tasks\n if task_keyword in task['task']\n ]\n\n def get_task_by_id(self, task_id):\n for task in self.all_task:\n if task_id == task['id']:\n return task\n\n def add_task(self, task_description):\n self.current_id += 1\n new_task = {\n 'id': str(self.current_id),\n 'task': task_description,\n 'status': 'new',\n 'date': datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S'),\n }\n self._new_tasks.append(new_task)\n\n @property\n def all_task(self):\n return self._saved_tasks + self._new_tasks\n\n def save(self):\n prj_file_name = '.'.join((self.name, 'csv'))\n file_path = os.path.join(BASE_DIR, PRJ_DIR_NAME, prj_file_name)\n\n task_rows = []\n for task in self._new_tasks:\n task_row = {\n 'id': task['id'],\n 'task': task['task'],\n 'status': task['status'],\n 'date': task['date']\n }\n task_rows.append(task_row)\n\n with open(file_path, 'a') as project:\n writer = DictWriter(project, fieldnames=HEADER, delimiter=';')\n for row in task_rows:\n writer.writerow(row)\n self._saved_tasks += self._new_tasks\n self._new_tasks = []\n\n def delete_task(self, task_id):\n pass\n\np = Project('my_prj')\nshow_table(p.all_task)\nshow_table(p.get_task_by_keyword('баг'))\np.add_task('Проверка работы метода add_task')\nshow_table(p.all_task)\np.save()\nprint(p.get_task_by_id('1'))\n","sub_path":"todo/todo.py","file_name":"todo.py","file_ext":"py","file_size_in_byte":6213,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"267145622","text":"from setuptools import setup\nimport numpy.distutils.misc_util, os\n\nmeta = dict(\\\n description=\"Module to apply tracking algorithm of rainfall data\",\n url = 'https://github.com/antarcticrainforest/tintV2',\n author = 'Martin Bergemann',\n author_email = 'martin.bergemann@met.fu-berlin.de',\n license = 'GPL',\n version = '1.0',\n install_requires=[\n 'pandas',\n 'numpy',\n 'netCDF4',\n 'scipy',\n 'matplotlib',\n 'basemap',\n ])\n\nsetup(name='tint', packages=['tint'], **meta)\n","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":591,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"70507149","text":"# -*- coding: utf-8 -*-\n\nfrom odoo import _, api, exceptions, fields, models\n\nclass AccountMoveLine(models.Model):\n _inherit = \"account.move.line\"\n\n @api.model\n def _get_reconcile(self, account):\n return account.reconcile\n\n @api.multi\n def _get_reconcile_msg(self):\n for move_line in self:\n reconcile = self._get_reconcile(move_line.account_id)\n if reconcile and not move_line.partner_id:\n return _(\"Reconcilation is set on account \"\n \"%s '%s' so you must select partner on \"\n \"the account move line with label '%s'.\"\n ) % (move_line.account_id.code,\n move_line.account_id.name,\n move_line.name)\n\n @api.constrains('partner_id', 'account_id')\n def _check_partner_required(self):\n for rec in self:\n message = rec._get_reconcile_msg()\n if message:\n raise exceptions.ValidationError(message)\n","sub_path":"custom/dvit-odoo-10.0/dvit_account_partner_required/models/account.py","file_name":"account.py","file_ext":"py","file_size_in_byte":1034,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"215734597","text":"#!/usr/bin/env python Daniel Ho\n\nfrom itertools import cycle\n#from sets import Set\n#from wikipathways_api_client import WikipathwaysApiClient\nimport argparse,ast,bisect,configparser,csv,json,multiprocessing,os,pandas,pybedtools,re,requests,shutil,sqlite3,time\nfrom operator import itemgetter\n\nitems = []\ntable = {}\ninterlines = []\nintralines = []\ncol = []\nPriGene = { }\nGeneid = []\nMaRfrid = { }\nMaRCor = { }\n\ndef load_table(infile, outfile ):\n\titems = [ ]\n\tsitems = [ ]\n\ttitems = [ ]\n\tfitems = [ ]\n\tf2items = [ ]\n\tPriGene = { }\n\tSNPid = {}\n\tMaRfrid = { }\n\tMaRCor = { }\n\tinterlines = []\n\tcount = 0\n\tsnp_db = sqlite3.connect(\"/home/ubuntu/MyVolumeStore/dho760_project_raw/Daniel_CodeS3D/lib/snp_index_dbSNP_b151.db\")\n\tsnp_db.text_factory = str\n\tsnp_index = snp_db.cursor()\n\toutputfile = open(outfile, 'w')\n\tinputfile = open(infile, 'r')\n\twhile True:\n\t\tinput_line = inputfile.readline()\n\t\tinput_line = input_line[:-1]\n#\t\tprint (\"1\")\n\t\tif not input_line:\n\t\t\tbreak\n\t\telse:\n\t\t\titems = input_line.split('\\t')\n\t\t\tif items[1].strip() == \".\" :\n\t\t\t\tchr = items[0].strip()\n#\t\t\t\tlocus = int(items[3])\n\t\t\t\tlocus = int(items[2])\n\t\t\t\tlocus2 = locus - 1\n#\t\t\t\tprint (\"2\")\n#\t\t\t\tprint (chr)\n#\t\t\t\tprint (locus)\n\t\t\t\tsnp_index.execute(\"SELECT * FROM snps WHERE chr=? and locus=?\",[chr,locus2])\n\t\t\t\ttag_snp = snp_index.fetchone()\n#\t\t\t\tprint (\"3\")\t\n#\t\t\t\tif tag_snp == None :\n#\t\t\t\t\tsnp_index.execute(\"SELECT * FROM snps WHERE chr=? and locus=?\",[chr,locus2])\n#\t\t\t\t\ttag_snp = snp_index.fetchone()\n#\t\t\t\tprint (\"3-a\")\t\t\n\t\t\t\tif tag_snp == None :\n#\t\t\t\t\trsID = \"chr\" + chr + \":\" + items[3]\n\t\t\t\t\trsID = \"chr\" + chr + \":\" + items[2]\n\t\t\t\telse:\n\t\t\t\t\trsID = tag_snp[0]\n\t\t\telse:\n\t\t\t\trsID = items[1].strip()\n\t\t\t\n#\t\t\tline = items[0] + \"\\t\" + rsID + \"\\t\" + items[2] + \"\\t\" + items[3] + \"\\t\" + items[4] + \"\\t\" + items[5])\n\t\t\tline = items[0] + \"\\t\" + rsID + \"\\t\" + items[2] + \"\\t\" + items[3] + \"\\t\" + items[4] + \"\\t\" + items[5] + \"\\t\" + items[6] + \"\\t\" + items[7] + \"\\t\" + items[8]\n#\t\t\tprint (\"4\")\n\t\t\toutputfile.write(line)\n\t\t\toutputfile.write('\\n')\n\t\t\n\n\n\n\tinputfile.close()\n\toutputfile.close()\n#\tprint \"Read Items #: \" + str(len(table.keys()))\n\t\n\n\n\t\n\t\ndef save_items(newfile,lines):\n\toutputfile = open(newfile, 'w')\n#\tprint \"Output Items #: \" + str(len(items))\n#\tlines.sort()\n\tfor x in lines:\n\t\toutputfile.write(x)\n\t\toutputfile.write('\\n')\n\toutputfile.close()\n\t\n\n#file_name = raw_input(\"Input file name: \")\n\n\n\n\n\ninterlines = []\n\nload_table(\"WTCCCT1D_full_imputed_SNPs_below0.3.txt\", \"WTCCCT1D_full_imputed_SNPs_below0.3_rsID.txt\")\n\nprint (\"Finish !!\")\n#save_items(\"WTCCC_rsID_position_dbSNP_b151.txt\",interlines)\n#interlines = []\n\n\n\n\n\n","sub_path":"WTCCC/3_After_imputation_data_cleaning_processing/4b_remove_below0.03_SNPs/find_rsID_for_dot_SNP_Input2_scores_checked0.3.py","file_name":"find_rsID_for_dot_SNP_Input2_scores_checked0.3.py","file_ext":"py","file_size_in_byte":2595,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"376189207","text":"import os\nimport glob\nimport numpy as np\nfrom skimage.io import imread, imshow, imsave\nfrom skimage.transform import resize\nfrom skimage.color import gray2rgb\nfrom skimage.filters import gaussian\n# import matplotlib.pyplot as plt\n\nfrom collections import defaultdict\n\n\ndef bot_crop(image):\n # Since the top & bottom have the scales and the marks,\n # which is useless for the future image representation,\n # they are cut in this part. The shape is usually around 75 pixels#\n x, y = np.array(image.shape).astype(int)\n imaged = image[0:x - 75, :]\n\n return imaged\n\n\ndef getBackgroundColor(im, edge=None):\n if edge is None:\n edge = 5\n\n x, y = im.shape\n\n top = im[:edge, :]\n bottom = im[x - edge:, :]\n left = im[:, :edge]\n right = im[:, y - edge:]\n\n colors = defaultdict(lambda: 0)\n for img in (left, right, top, bottom):\n w, h = img.shape\n for i in range(w):\n for j in range(h):\n c = img[i, j]\n colors[c] += 1\n\n colors_reversed = [(count, color) for (color, count) in colors.items()]\n background_color = max(colors_reversed)[1]\n return background_color\n\n\ndef imgshape(image):\n x, y = image.shape\n (estimatex, estimatey) = (int(x * 0.2), int(y * 0.2))\n average = np.average(image)\n estimate1 = image[:estimatex, :estimatey]\n estimate2 = image[x - estimatex:, :estimatey]\n estimate3 = image[:estimatex, y - estimatey:]\n estimate4 = image[x - estimatex:, y - estimatey:]\n estimateaverage = np.average(\n estimate1 + estimate2 + estimate3 + estimate4) / 4.0\n # 1 = white, 0 = black#\n if (1 - estimateaverage) < 0.3 * (1 - average):\n return \"Circle\"\n else:\n return \"Rectangular\"\n\n\ndef autocrop(image, threshold, imgshape, heldout, bgc, error_rate):\n x, y = image.shape\n top = int(heldout * x)\n bottom = int(x - int(heldout * x))\n left = int(heldout * y)\n right = int(y - heldout * y)\n edge = []\n for start, end, step in ((top, 0, -1), (bottom, x, 1)):\n for i in range(start, end, step):\n count = 0\n for j in image[i, :]:\n if j not in range(int(bgc - threshold), int(bgc + threshold)):\n count = count + 1\n error = count * 1.0 / y\n if error < error_rate:\n break\n edge.append(i)\n\n for start, end, step in ((left, 0, -1), (right, y, 1)):\n for i in range(start, end, step):\n count = 0\n for j in image[:, i]:\n if j not in range(int(bgc - threshold), int(bgc + threshold)):\n count = count + 1\n error = count * 1.0 / x\n if error < error_rate:\n break\n edge.append(i)\n if imgshape == \"Rectangular\":\n auto_crop = image[edge[0]:edge[1], edge[2]:edge[3]]\n\n else:\n centerx = int((edge[0] + edge[1]) / 2.0)\n centery = int((edge[2] + edge[3]) / 2.0)\n dltx = int((edge[1] - edge[0]) / (2.0 * 1.8))\n dlty = int((edge[3] - edge[2]) / (2.0 * 1.5))\n\n auto_crop = image[centerx - dltx:centerx +\n dltx, centery - dlty:centery + dlty]\n return auto_crop\n\n\nif __name__ == '__main__':\n\n # Showing Overview for testimg\n def overview():\n n = 4\n paths = glob.glob(os.path.join('testimg', '*.*'))\n fig, axes = plt.subplots(int((len(paths) - 1) / n) * 2 + 2, n, figsize=(32,72))\n for a in axes:\n for b in a:\n b.axis('off')\n i = 0\n for path in paths:\n im = np.array(imread(path, as_grey=True), dtype=float)\n\n im = im * 1.0 / np.max(im)\n bgc = getBackgroundColor(im, edge=100)\n im_b = bot_crop(im)\n auto_cropimg = autocrop(im_b, threshold=15, imgshape=imgshape(im_b), heldout=0.2,\n bgc=bgc, error_rate=0.1)\n j = int(i / n) * 2\n k = i % n\n axes[j, k].imshow(im, cmap='gray')\n axes[j+1, k].imshow(auto_cropimg, cmap='gray')\n i = i + 1\n # plt.show()\n plt.savefig('overview.png')\n\n return 0\n\n # Scaling for every img in the Micrographs floder\n\n\n def scaling(main_mt):\n paths = glob.glob(os.path.join('../Micrographs', main_mt, '*.*'))\n for path in paths:\n (floder, filename) = os.path.split(path)\n print('Formatting {}'.format(filename))\n im = np.array(imread(path, as_grey=True), dtype=float)\n bgc = getBackgroundColor(im, edge=100)\n im = bot_crop(im)\n auto_cropimg = autocrop(im, threshold=15, imgshape=imgshape(im), heldout=0.2,\n bgc=bgc, error_rate=0.1)\n scaled = resize(auto_cropimg, [224, 224])\n colored = gray2rgb(scaled).astype(np.float32)\n colored = colored/np.max(colored)\n imsave(os.path.join('../Micrographs_scaled', main_mt, filename), colored)\n\n scaling('al')\n scaling('as')\n scaling('cc')\n scaling('ci')\n scaling('co')\n scaling('cs')\n scaling('cu')\n scaling('hs')\n scaling('lz')\n scaling('mg')\n scaling('ni')\n scaling('pl')\n scaling('rf')\n scaling('sc')\n scaling('sp')\n scaling('ss')\n scaling('ti')\n scaling('ts')\n scaling('un')\n","sub_path":"code/code_1/scale.py","file_name":"scale.py","file_ext":"py","file_size_in_byte":5337,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"361164809","text":"import rospy\nfrom sensor_msgs.msg import LaserScan\nimport time\nimport math\nfrom angle import angle\n\nclass obstacle_avoidance:\n\n def __init__(self):\n\n self.sub2 = rospy.Subscriber('/laser/scan', LaserScan, self.o_void)\n# rospy.init_node('obstacle_values')\n self.rate = rospy.Rate(10)\n self.get = 0\n self.turn = \"\"\n\n def o_void(self, msg):\n\n o1 = 0\n o2 = 0\n a = angle()\n ang = int(a.get_angle())\n\n if msg.ranges[ang] <25:\n print(\"OBSTACLE AHEAD\")\n for i in range(ang, ang-10, -1):\n if msg.ranges[i] > 20 or msg.ranges[i] == float(\"inf\"):\n o1 = i-1\n break\n\n for i in range(ang, ang+10):\n if msg.ranges[i] > 20 or msg.ranges[i] == float(\"inf\"):\n o2 = i-1\n break\n\n if 90-o1 < 90-o2:\n self.turn = \"right\"\n else:\n self.turn = \"left\"\n else:\n self.turn = \"straight\"\n self.get = 1\n\n def get_turn(self):\n while True:\n while True:\n if self.get != 0:\n break\n return self.turn\n self.get = 0\n\nif __name__ == \"__main__\":\n\n while True:\n ob = obstacle_avoidance()\n o = ob.get_turn()\n print(o)\n","sub_path":"testScripts/Combined_code/obstacle_avoidance.py","file_name":"obstacle_avoidance.py","file_ext":"py","file_size_in_byte":1370,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"380571238","text":"import torch\nfrom torchvision import datasets\nfrom torchvision import transforms\nfrom torch.utils.data import DataLoader\n# from lenet5 import LeNet5\nfrom Resnet import ResNet18\nfrom torch import nn as nn\n\ndef main():\n batch_size = 32\n cifar_train = datasets.CIFAR10('cifar',True,transform=transforms.Compose([\n transforms.Resize(32,32),\n transforms.ToTensor(),\n transforms.Normalize(mean=[0.485,0.456,0.406],\n std=[0.229,0.224,0.225]),\n ]),download=True)\n cifar_train = DataLoader(cifar_train, batch_size=batch_size, shuffle=True)\n\n cifar_test = datasets.CIFAR10('cifar',False,transform=transforms.Compose([\n transforms.Resize(32,32),\n transforms.ToTensor(),\n transforms.Normalize(mean=[0.485,0.456,0.406],\n std=[0.229,0.224,0.225]),\n ]),download=True)\n cifar_test = DataLoader(cifar_test, batch_size=batch_size, shuffle=True)\n\n # x, label = iter(cifar_train).__next__()\n # print('x:',x.shape, 'label:',label.shape)\n\n device = torch.device('cuda')\n # model = LeNet5().to(device)\n model = ResNet18().to(device)\n criterion = nn.CrossEntropyLoss()\n optimizer = torch.optim.Adam(model.parameters(), lr=1e-3)\n\n print(model)\n\n for epoch in range(5):\n for batch_idx,(x,label) in enumerate(cifar_train):\n #[b, 3, 32, 32]\n x,label = x.to(device), label.to(device)\n out = model(x)\n loss = criterion(out,label)\n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n print('loss:',loss.item())\n\n # test\n model.eval()\n with torch.no_grad():\n total_correct = 0\n total_num = 0\n for x, label in cifar_test:\n x, label = x.to(device), label.to(device)\n out = model(x)\n pred = out.argmax(dim=1)\n total_correct += torch.eq(pred,label).float().sum()\n total_num += x.size(0)\n acc = total_correct/total_num\n print('acc:',acc)\n\nif __name__ == '__main__':\n main()","sub_path":"第十章:CIFAR10分类/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2133,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"379492943","text":"hairstyles = [\"bouffant\", \"pixie\", \"dreadlocks\", \"crew\", \"bowl\", \"bob\", \"mohawk\", \"flattop\"]\n\nprices = [30, 25, 40, 20, 20, 35, 50, 35]\n\nlast_week = [2, 3, 5, 8, 4, 4, 6, 2]\n\n#code starts here:\ntotal_price = 0\nfor i in prices:\n total_price = total_price + i\naverage_price = total_price / len(prices)\n#print(\"Average Haircut Price: \"+ str(average_price))\nnew_prices = [i - 5 for i in prices]\nprint(new_prices)\ntotal_revenue = 0\nfor i in range(len(prices)):\n total_revenue = last_week[i] * prices[i] + total_revenue\n#print('Total Revenue: '+str(total_revenue))\naverage_daily_revenue = total_revenue / 7\n#print(average_daily_revenue)\ncuts_under_30 = [hairstyles[i] for i in range(len(new_prices)) if new_prices[i] < 30]\nprint(cuts_under_30)\n","sub_path":"loops_3.py","file_name":"loops_3.py","file_ext":"py","file_size_in_byte":740,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"417784400","text":"import numpy as nm\n\n\ndef loadDataSet():\n dataMat = []\n labelMat = []\n fr = open(\"data/testSet.txt\")\n for line in fr.readlines():\n lineArr = line.strip().split()\n dataMat.append([1.0, float(lineArr[0]), float(lineArr[1])])\n labelMat.append(int(lineArr[2]))\n return dataMat, labelMat\n\n\ndef sigmoid(inX):\n return 1.0 / (1 + nm.exp(-inX))\n\n\ndef gradAscent(dataMatIn, classLabels):\n dataMatrix = nm.mat(dataMatIn)\n labelMat = nm.mat(classLabels).transpose()\n m, n = nm.shape(dataMatrix)\n alpha = 0.001\n maxCycles = 500\n weights = nm.ones((n, 1))\n\n print(\"dataMatrix:\\n\", dataMatrix, \"\\nlabelMat:\\n\", labelMat)\n print(\"m:\", m, \"n:\", n, \"alpha:\", alpha, \"maxCycles:\", maxCycles, \"\\nweights:\\n\", weights)\n\n for k in range(maxCycles):\n h = sigmoid(dataMatrix * weights)\n error = (labelMat - h)\n weights = weights + alpha * dataMatrix.transpose() * error\n print(\"===========iter:\", k, \"\\nh:\\n\", h, \"\\nerror:\\n\", error, \"\\nweights:\\n\", weights)\n\n return weights\n\n\ndataMat, labelMat = loadDataSet()\nw = gradAscent(dataMat, labelMat)\n\nprint(\"result:\\ndataMat:\", dataMat, \"\\nlabelMat:\", labelMat, \"\\nw:\\n\", w)\n","sub_path":"workspace/tensorflow-work/machine_learning_in_action/5_1.py","file_name":"5_1.py","file_ext":"py","file_size_in_byte":1193,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"218610676","text":"import unittest\nfrom mycroft.skills.intent_services.padatious_service import PadatiousService, FallbackIntentContainer\nfrom mycroft_bus_client.message import Message\nfrom ovos_utils.messagebus import FakeBus\n\n\nclass UtteranceIntentMatchingTest(unittest.TestCase):\n def get_service(self, regex_only=False, fuzz=True):\n intent_service = PadatiousService(FakeBus(),\n {\"regex_only\": regex_only,\n \"intent_cache\": \"~/.local/share/mycroft/intent_cache\",\n \"train_delay\": 1,\n \"fuzz\": fuzz,\n \"single_thread\": True,\n })\n # register test intents\n filename = \"/tmp/test.intent\"\n with open(filename, \"w\") as f:\n f.write(\"this is a test\\ntest the intent\\nexecute test\")\n rxfilename = \"/tmp/test2.intent\"\n with open(rxfilename, \"w\") as f:\n f.write(\"tell me about {thing}\\nwhat is {thing}\")\n data = {'file_name': filename, 'lang': 'en-US', 'name': 'test'}\n intent_service.register_intent(Message(\"padatious:register_intent\", data))\n data = {'file_name': rxfilename, 'lang': 'en-US', 'name': 'test2'}\n intent_service.register_intent(Message(\"padatious:register_intent\", data))\n intent_service.train()\n\n return intent_service\n\n def test_padatious_intent(self):\n intent_service = self.get_service()\n\n # assert padatious is loaded not padacioso\n self.assertFalse(intent_service.is_regex_only)\n for container in intent_service.containers.values():\n self.assertFalse(isinstance(container, FallbackIntentContainer))\n\n # exact match\n intent = intent_service.calc_intent(\"this is a test\", \"en-US\")\n self.assertEqual(intent.name, \"test\")\n\n # fuzzy match\n intent = intent_service.calc_intent(\"this test\", \"en-US\")\n self.assertEqual(intent.name, \"test\")\n self.assertTrue(intent.conf <= 0.8)\n\n # regex match\n intent = intent_service.calc_intent(\"tell me about Mycroft\", \"en-US\")\n self.assertEqual(intent.name, \"test2\")\n self.assertEqual(intent.matches, {'thing': 'Mycroft'})\n\n # fuzzy regex match - success\n intent = intent_service.calc_intent(\"tell me everything about Mycroft\", \"en-US\")\n self.assertEqual(intent.name, \"test2\")\n # TODO - why are extracted entities lower case ???\n # i think case depends on padaos vs padatious matching internally\n # padaos (exact matches only) -> keep case\n # padatious -> lower case\n self.assertEqual(intent.matches, {'thing': 'mycroft'})\n self.assertTrue(intent.conf <= 0.9)\n\n def test_regex_intent(self):\n intent_service = self.get_service(regex_only=True, fuzz=False)\n\n # assert padacioso is loaded not padatious\n self.assertTrue(intent_service.is_regex_only)\n for container in intent_service.containers.values():\n self.assertTrue(isinstance(container, FallbackIntentContainer))\n\n # exact match\n intent = intent_service.calc_intent(\"this is a test\", \"en-US\")\n self.assertEqual(intent.name, \"test\")\n\n # regex match\n intent = intent_service.calc_intent(\"tell me about Mycroft\", \"en-US\")\n self.assertEqual(intent.name, \"test2\")\n self.assertEqual(intent.matches, {'thing': 'Mycroft'})\n\n # fuzzy match - failure case (no fuzz)\n intent = intent_service.calc_intent(\"this is test\", \"en-US\")\n self.assertTrue(intent.name is None)\n\n # fuzzy regex match - failure case (no fuzz)\n intent = intent_service.calc_intent(\"tell me everything about Mycroft\", \"en-US\")\n self.assertTrue(intent.name is None)\n\n def test_regex_fuzz_intent(self):\n intent_service = self.get_service(regex_only=True, fuzz=True)\n\n # fuzzy match - success\n intent = intent_service.calc_intent(\"this is test\", \"en-US\")\n self.assertEqual(intent.name, \"test\")\n self.assertTrue(intent.conf <= 0.8)\n\n # fuzzy regex match - success\n intent = intent_service.calc_intent(\"tell me everything about Mycroft\", \"en-US\")\n self.assertEqual(intent.name, \"test2\")\n self.assertEqual(intent.matches, {'thing': 'Mycroft'})\n self.assertTrue(intent.conf <= 0.8)\n\n\n","sub_path":"test/unittests/skills/test_utterance_intents.py","file_name":"test_utterance_intents.py","file_ext":"py","file_size_in_byte":4458,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"590418518","text":"from flask import Flask, flash, request, redirect, render_template\nfrom flask_bootstrap import Bootstrap\nfrom datastore import *\nfrom userdb import *\nfrom flask_login import LoginManager, login_required, login_user, logout_user, current_user\n\napp = Flask(__name__)\nBootstrap(app)\n\nlm = LoginManager()\nlm.session_protection = 'strong'\nlm.login_view = 'login'\n\nlm.init_app(app)\n\napp.secret_key = 'hard to guess'\n\n\n@lm.user_loader\ndef load_user(user_id):\n user = User(user_id)\n return user\n\n\n@app.route('/')\n@login_required\ndef index():\n owner = current_user.username\n docs = get_catalogs(owner)\n links_list = []\n for doc in docs:\n links = get_links(doc['name'], owner)\n item = {'ca': doc['name'], 'caid': doc['_id'], 'links': links}\n links_list.append(item)\n\n return render_template('index.html', cas=docs, links=links_list)\n\n\n@app.route('/search', methods=['POST'])\n@login_required\ndef search_wildcard_links():\n key = request.form['searchkey']\n owner = current_user.username\n allcas = get_catalogs(owner)\n alllinks = get_links_wildcard(key, owner)\n alllinks = list(alllinks)\n cas = []\n for doc in alllinks:\n if doc['catalog'] not in cas:\n cas.append(doc['catalog'])\n links_list = []\n for ca in cas:\n links = [link for link in alllinks if ca == link['catalog']]\n item = {'ca': ca, 'caid': 'unknown', 'links': links}\n links_list.append(item)\n return render_template('index.html', cas=allcas, links=links_list)\n\n\n@app.route('/logout')\n@login_required\ndef logout():\n logout_user()\n flash('You have successfully logout')\n return redirect('/login')\n\n\n@app.route('/login', methods=['GET', 'POST'])\ndef login():\n from forms import LoginForm\n form = LoginForm()\n if form.validate_on_submit():\n username = form.username.data\n password = form.password.data\n if check_password(username, password):\n user = User(username)\n login_user(user)\n return redirect('/')\n else:\n flash('Invalid username or password')\n return render_template('login.html', form=form)\n\n\n@app.route('/register', methods=['GET', 'POST'])\ndef register():\n from forms import RegisterForm\n rform = RegisterForm()\n if rform.validate_on_submit():\n r = add_user(rform.username.data, rform.password.data)\n if not r:\n flash('Username is token by others')\n else:\n flash('Register success')\n return render_template('register.html', form=rform)\n\n\n@app.route('/newcatalog', methods=['POST'])\n@login_required\ndef new_catalog():\n if request.form['catalog']:\n owner = current_user.username\n doc = {'name': request.form['catalog'], 'owner': owner, 'priority': 0}\n save_catalog(doc)\n return redirect('/')\n\n\n@app.route('/newbookmark', methods=['POST'])\n@login_required\ndef new_bookmark():\n catalog_select = request.form['catalogselect']\n markname = request.form['markname']\n linkadd = request.form['linkadd']\n owner = current_user.username\n\n if catalog_select and markname and linkadd:\n doc = {'name': markname, 'catalog': catalog_select,\n 'link': linkadd, 'owner': owner, 'comments': '', 'priority': 0}\n save_links(doc)\n return redirect('/')\n\n\n@app.route('/delbookmark/')\n@login_required\ndef del_bookmark(objid):\n delete_bookmark(objid)\n return redirect('/')\n\n\n@app.route('/editbookmark/', methods=['GET', 'POST'])\n@login_required\ndef edit_bookmark(objid):\n owner = current_user.username\n docs = get_catalogs(owner)\n link = get_link_by_id(objid)\n\n from forms import EditBookmarkForm\n eform = EditBookmarkForm(catalogs_choice=link['catalog'], alias=link['name'],\n link=link['link'], comments=link['comments'])\n eform.catalogs_choice.choices = [(d['name'], d['name']) for d in docs]\n\n action = '/editbookmark/%s' % (objid)\n\n if eform.validate_on_submit():\n update_bookmark(objid, eform.catalogs_choice.data, eform.alias.data, eform.link.data, eform.comments.data)\n return redirect('/')\n\n return render_template('editbookmark.html', form=eform, action=action)\n\n\n@app.route('/delcatalog/')\n@login_required\ndef del_catalog(ca):\n owner = current_user.username\n delete_bookmark(None, ca, owner)\n delete_catalogs(ca, owner)\n return redirect('/')\n\n\nif __name__ == '__main__':\n app.run(debug=True, host='0.0.0.0', port=3000)\n","sub_path":"server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":4494,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"339713330","text":"from knock30 import mecab_data\nfrom collections import defaultdict\n\nword_counts = defaultdict(lambda: 0)\nMlist = mecab_data()\n\nfor line in Mlist:\n for word in line:\n word_counts[ word['surface']] += 1\n \n \n\nfor y,w in sorted(word_counts.items(), key=lambda x: x[1], reverse=True):\n print(y,w)\n\n\n","sub_path":"arai/chapter04/knock36.py","file_name":"knock36.py","file_ext":"py","file_size_in_byte":320,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"544137441","text":"'''\n测试用例标题:送仓时间变更测试\n测试场景:送仓时间变更业务流程测试\n创建者:Tim\n创建日期:2018-11-20\n最后修改日期:2018-11-20\n输入数据:审批流程各个角色账号\n输出数据:无\n\n'''\n\n# -*- coding: utf-8 -*-\nimport sys,os\ncurPath = os.path.abspath(os.path.dirname(__file__))\nrootPath = os.path.split(curPath)[0]\n#sys.path.append(rootPath)\n\nimport unittest\nfrom cgitb import text\nimport selenium.webdriver.support.ui as ui\nfrom selenium import webdriver\nfrom time import sleep\nfrom selenium.webdriver.remote.webelement import WebElement\nfrom selenium.webdriver.common.action_chains import ActionChains\nimport time,unittest,configparser\nfrom selenium import webdriver\nfrom selenium.common.exceptions import NoAlertPresentException\nfrom selenium.common.exceptions import NoSuchElementException\n\nimport random\nimport json\nimport datetime\n\n'''\n加载配置选项\n'''\ncfg = configparser.ConfigParser()\ncfg.read(rootPath + '/core/config.ini')\n\n'''\n测试用例\n'''\nclass ChangeOrderQty(unittest.TestCase):\n base_url = cfg.get(\"projects\", \"base_url\")\n project_path = cfg.get(\"projects\", \"project_path\")\n log_path = cfg.get(\"webdriver\", \"log\") + '/' + cfg.get(\"webdriver\", \"logfile\") + '-%s.log' % time.strftime(\"%Y-%m-%d %H_%M_%S\")\n\n def loadvendername(self):\n\n global result\n file = open(rootPath + '/data/ChangeOrderQty.json', encoding='utf-8')\n data = json.load(file)\n result = [(d['username'], d['password']) for d in data['login']]\n\n return result\n\n def loadvendernames(self):\n\n global results\n file = open(rootPath + '/data/ChangeOrderQty.json', encoding='utf-8')\n data = json.load(file)\n results = [(d['name']) for d in data['keywords']]\n\n return results\n\n\n def setUp(self):\n # 脚本标识-标题\n self.script_name = '下单量变更单'\n # 脚本标识-ID\n self.script_id = 'ChangeOrderQty'\n self.target_url = self.base_url + self.project_path\n if (cfg.get(\"webdriver\", \"enabled\") == \"off\"):\n # 如果使用最新firefox需要屏蔽下面这句\n self.driver = webdriver.Firefox()\n else:\n # 如果使用最新firefox需要使用下面这句\n self.driver = webdriver.Firefox(log_path=self.log_path)\n self.driver.maximize_window()\n\n # 定义登录方法\n def login(self, username, password):\n self.driver.get(self.target_url) # 登录页面\n self.driver.find_element_by_id('account-inputEl').send_keys(username)\n self.driver.find_element_by_id('password-inputEl').send_keys(password)\n self.driver.find_element_by_xpath(\"//*[@id='LoginWin']//span[contains(@class,'x-btn-icon-el')]\").click()\n\n\n\n def test_ChangeOrderQty(self):\n\n su = self.loadvendername()\n ad = self.loadvendernames()\n for i in range(0, len(su)):\n print(su[i][0])\n print(su[i][1])\n self.login(su[0][0], su[0][1])\n #self.login('Vic_cn ','123')\n\n sleep(10)\n\n try:\n self.driver.find_element_by_xpath(\"//*[@id='msgwin-div']//div[contains(@class,'x-tool-close')]\").is_displayed()\n a = True\n except:\n a = False\n if a == True:\n print(\"元素存在\")\n elif a == False:\n print(\"元素不存在\")\n\n print(a)\n\n if a == True:\n\n # 关闭弹出框\n self.driver.find_element_by_xpath(\"//*[@id='msgwin-div']//div[contains(@class,'x-tool-close')]\").click()\n\n else:\n pass\n\n sleep(2)\n\n # 定位到资料档案\n self.driver.find_element_by_xpath(\"//*[@id='header-topnav']//span[contains(@class,'fa-file-o')]\").click()\n\n sleep(3)\n\n # 定位到订单资料\n self.driver.find_element_by_xpath(\"//*[@id='west-panel-body']//span[contains(text(),'订单资料')]\").click()\n\n sleep(3)\n\n # 定位到下单量变更单\n self.driver.find_element_by_xpath(\"//*[@id='west-panel-targetEl']//span[contains(text(),'下单量变更单')]\").click()\n\n sleep(3)\n\n # 定位到新建\n self.driver.find_element_by_xpath(\"//*[@id='ChangeOrderQtyView']//span[contains(@class,'fa-plus')]\").click()\n\n sleep(3)\n\n # 定位到订单编号\n self.driver.find_element_by_xpath(\"//*[@id='ChangeOrderQtyFormWinID-body']//input[@name = 'main.orderNumber']\").click()\n\n sleep(5)\n\n if ad[0] !='':\n\n self.driver.find_element_by_xpath(\n \"//*[@id='OrderDialogWinID-body']//input[@name = 'keywords']\").send_keys(ad[0])\n\n sleep(2)\n\n self.driver.find_element_by_xpath(\"//*[@id='OrderDialogWinID-body']//span[contains(@class,'fa-search')]\").click()\n\n sleep(2)\n\n _elementFirst =self.driver.find_element_by_xpath(\"//*[@id='OrderDialogWinGridPanelID-body']//div[text()='1']\")\n\n sleep(2)\n\n # 在此元素上双击\n ActionChains(self.driver).double_click(_elementFirst).perform()\n\n else:\n\n ul = self.driver.find_element_by_xpath(\"//*[@id='OrderDialogWinGridPanelID-body']/div/table/tbody\")\n\n lis = ul.find_elements_by_xpath('tr')\n\n\n sleep(2)\n\n _elementFiveth = (random.randint(1, len(lis)))\n\n sleep(2)\n\n _elementFirst =self.driver.find_element_by_xpath(\"//*[@id='OrderDialogWinGridPanelID-body']//div[text()='{}']\".format(_elementFiveth))\n\n sleep(2)\n\n # 在此元素上双击\n ActionChains(self.driver).double_click(_elementFirst).perform()\n\n sleep(2)\n\n # 定位添加订单信息按钮'''\n _elementFirst = self.driver.find_element_by_xpath(\"//*[@id='ChangeOrderQtyFormGridPanelID-f-body']//img[contains(@class,'x-tool-plus')]\")\n\n sleep(2)\n\n ActionChains(self.driver).double_click(_elementFirst).perform()\n\n\n sleep(2)\n\n ul = self.driver.find_element_by_xpath(\"//*[@id='OtherProductDialogWinGridPanelID-body']/div/table/tbody\")\n\n lis = ul.find_elements_by_xpath('tr')\n\n sleep(2)\n\n _elementFiveth = (random.randint(1, len(lis)))\n\n sleep(2)\n\n _elementFirst =self.driver.find_element_by_xpath(\"//*[@id='OtherProductDialogWinGridPanelID-body']//div[text()='{}']\".format(_elementFiveth))\n\n sleep(2)\n\n # 在此元素上双击\n ActionChains(self.driver).double_click(_elementFirst).perform()\n\n target = self.driver.find_element_by_xpath(\"//*[@id='OtherProductDialogWinID']//span[contains(@class,'fa-check')]\")\n self.driver.execute_script(\"arguments[0].scrollIntoView();\", target)\n\n sleep(3)\n\n # 定位到关键字\n self.driver.find_element_by_xpath(\"//*[@id='OtherProductDialogWinID']//span[contains(@class,'fa-check')]\").click()\n\n sleep(2)\n\n ul = self.driver.find_element_by_xpath(\"//*[@id='ChangeOrderQtyFormGridPanelID-normal-body']/div/table/tbody\")\n\n lis = ul.find_elements_by_xpath('tr')\n\n sleep(2)\n\n _elementFiveth = (random.randint(1, len(lis)))\n\n _elementThird = self.driver.find_element_by_xpath(\"//*[@id='ChangeOrderQtyFormGridPanelID-normal-body']/div/table/tbody/tr[{}]/td[4]/div[contains(@class,'x-grid-cell-inner ')]\".format(_elementFiveth)).text\n\n self.driver.find_element_by_xpath(\"//*[@id='ChangeOrderQtyFormGridPanelID-normal-body']/div/table/tbody/tr[{}]/td[7]/div[contains(@class,'x-grid-cell-inner ')]\".format(_elementFiveth)).click()\n\n sleep(2)\n\n self.driver.find_element_by_xpath(\"//*[@id='ChangeOrderQtyFormGridPanelID-f-body']//input[@name = 'changeOrderQty']\").clear()\n\n sleep(2)\n\n _elementThird = (random.randint(1, int(_elementThird)))\n\n self.driver.find_element_by_xpath(\"//*[@id='ChangeOrderQtyFormGridPanelID-f-body']//input[@name = 'changeOrderQty']\").send_keys(_elementThird)\n\n sleep(2)\n\n _elementFourth= self.driver.find_element_by_xpath(\"//*[@id='ChangeOrderQtyFormGridPanelID-normal-body']/div/table/tbody/tr[{}]/td[4]/div[contains(@class,'x-grid-cell-inner ')]\".format(len(lis))).text\n\n sleep(2)\n\n self.driver.find_element_by_xpath(\"//*[@id='ChangeOrderQtyFormGridPanelID-normal-body']/div/table/tbody/tr[{}]/td[7]/div[contains(@class,'x-grid-cell-inner ')]\".format(len(lis))).click()\n\n sleep(2)\n\n _elementFourth = (random.randint(1, int(_elementFourth)))\n\n self.driver.find_element_by_xpath(\"//*[@id='ChangeOrderQtyFormGridPanelID-f-body']//input[@name = 'changeOrderQty']\").send_keys(_elementFourth)\n\n sleep(2)\n\n target = self.driver.find_element_by_xpath(\"//*[@id='ChangeOrderQtyFormWinID']//span[contains(@class,'fa-save ')]\")\n self.driver.execute_script(\"arguments[0].scrollIntoView();\", target)\n\n sleep(2)\n\n self.driver.find_element_by_xpath(\"//*[@id='ChangeOrderQtyFormWinID']//span[contains(@class,'fa-save ')]\").click()\n\n # 获取弹窗提示:\n self.driver.implicitly_wait(120)\n a = self.driver.find_element_by_css_selector('.x-box-mc').get_attribute('textContent')\n print(a)\n\n sleep(2)\n\n self.driver.find_element_by_xpath(\"//*[@id='ChangeOrderQtyGridPanelID-body']//div[text()='1']\").click()\n\n sleep(5)\n\n self.driver.find_element_by_xpath(\"//*[@id='ChangeOrderQtyTbsPanelId-body']//span[contains(@class,'fa-check')]\").click()\n\n sleep(5)\n\n self.driver.find_element_by_xpath(\"//*[@id='ChangeOrderQtyFormWinID']//span[contains(@class,'fa-check')]\").click()\n\n # 获取弹窗提示:\n self.driver.implicitly_wait(120)\n a = self.driver.find_element_by_css_selector('.x-box-mc').get_attribute('textContent')\n print(a)\n\n # 点击注销\n self.driver.find_element_by_link_text('注销').click()\n\n self.driver.find_element_by_link_text('是').click()\n\n alert = self.driver.switch_to_alert()\n\n # 退出页面\n alert.accept()\n\n def tearDown(self):\n self.driver.quit()\n\nif __name__ == \"__main__\":\n unittest.main()\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n","sub_path":"case/ChangeOrderQty.py","file_name":"ChangeOrderQty.py","file_ext":"py","file_size_in_byte":10137,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"220268253","text":"import pathlib\npath = current_dir = str(pathlib.Path(__file__).parent)\n\nteks = \"SAYA SUKA PYTHON\"\n\ncaesar = int(input(\"masukan n sandi caesar :\"))\nlistTeks = list(teks)\n\nresult = \"\"\nfor charTeks in listTeks:\n if(charTeks.isalpha()):\n hurufAscii = ord(charTeks)\n indexAplha = hurufAscii - ord(\"A\")\n newIndex = (indexAplha + caesar) % 26\n newCaesar = newIndex + ord(\"A\")\n newChar = chr(newCaesar)\n result += newChar \n else:\n result += charTeks\n \n \nresultFile = open(path + '/resultEncodeCaesar.txt','w')\nresultFile.write(result)\nresultFile.close","sub_path":"Praktikum 10/project6.py","file_name":"project6.py","file_ext":"py","file_size_in_byte":615,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"448134710","text":"from django.urls import path\nfrom .views import OrderSummaryView, OrderListView, OrderDocSummaryView\n\napp_name = 'orders'\n\nurlpatterns = [\n path('', OrderListView.as_view(), name='orderlist'),\n path('/', OrderSummaryView, name='orderdetail'),\n path('/doc/', OrderDocSummaryView, name='orderdocdetail'),\n # path('checkout/', checkout, name='checkout')\n]\n","sub_path":"backend/orders/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":385,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"592200247","text":"import turtle,random,time,os\r\n\r\n#for create player's rock for crash bug\r\ndef rock(color,x,y,ply):\r\n ply.shape(\"square\")\r\n ply.shapesize(2,4)\r\n ply.color(color)\r\n ply.speed(1000)\r\n ply.penup()\r\n ply.setpos(x,y)\r\n ply.pendown()\r\n \r\n\r\ndef random_bug(size,dot):\r\n x= random.randint(-300,300)\r\n y= -350\r\n color=['red','pink','blue','orange','purple','yellow','cyan','green']\r\n dot.speed(random.randint(5,10)) \r\n dot.shapesize(size)\r\n dot.color(random.choice(color))\r\n dot.shape(\"turtle\")\r\n dot.penup()\r\n dot.ht()\r\n os.system(\"afplay soundBug.wav&\")\r\n dot.setpos(x,y)\r\n dot.st()\r\n #for control distance, hide bug and record score\r\n while(1):\r\n if dot.ycor() > ply1.ycor()-50 and dot.ycor() < ply1.ycor()+50 and dot.xcor()ply1.xcor()-20 :\r\n dot.ht()\r\n global xp1\r\n xp1 += 1\r\n os.system(\"afplay soundY.wav&\")\r\n scorboard()\r\n break\r\n \r\n if dot.ycor() > ply2.ycor()-50 and dot.ycor() < ply2.ycor()+50 and dot.xcor()ply2.xcor()-20 :\r\n dot.ht()\r\n global xp2\r\n xp2 += 1\r\n os.system(\"afplay soundC.wav&\")\r\n scorboard()\r\n break\r\n else:\r\n if x<0:\r\n dot.forward(1.5)\r\n if dot.xcor()>501:\r\n break\r\n if x>0:\r\n dot.backward(1.5)\r\n if dot.xcor()<-501:\r\n break\r\n dot.pendown()\r\n dot.clear()\r\n\r\n#purple borders\r\ndef sınır(lim,color):\r\n lim.color(color)\r\n lim.speed(500)\r\n lim.penup()\r\n lim.setpos(2000,350)\r\n lim.pendown()\r\n lim.setpos(-2000,350)\r\n lim.penup()\r\n lim.setpos(-2000,-350)\r\n lim.pendown()\r\n lim.setpos(2000,-350)\r\n \r\ndef scorboard():\r\n scor1.ht()\r\n scor1.speed(100)\r\n scor1.penup()\r\n scor1.goto(-180,0)\r\n scor1.pendown()\r\n scor1.clear()\r\n scor1.write(xp2, move = False, align =\"center\" , font = (\"Arial\",20,\"normal\"))\r\n \r\n scor2.ht()\r\n scor2.speed(100)\r\n scor2.penup()\r\n scor2.goto(180,0)\r\n scor2.pendown()\r\n scor2.clear()\r\n scor2.write(xp1, move = False, align =\"center\" , font = (\"Arial\",20,\"normal\"))\r\n \r\ndef scorboardtab(tab):\r\n tab.color(\"white\",\"white\")\r\n tab.speed(1000)\r\n tab.penup()\r\n tab.goto(-300,45)\r\n tab.pendown()\r\n tab.begin_fill()\r\n tab.forward(600)\r\n tab.setheading(270)\r\n tab.forward(55)\r\n tab.setheading(180)\r\n tab.forward(600)\r\n tab.setheading(90)\r\n tab.forward(55)\r\n tab.end_fill()\r\n tab.ht()\r\n\r\n#key controls\r\ndef ply1_right():\r\n if ply1.ycor() < -300 and ply1.ycor() > -400:\r\n ply1.penup()\r\n ply1.setheading(0)\r\n ply1.forward(0)\r\n ply1.pendown()\r\n else:\r\n ply1.penup()\r\n ply1.setheading(0)\r\n ply1.forward(50)\r\n ply1.pendown()\r\ndef ply1_left():\r\n if ply1.ycor() < -300 and ply1.ycor() > -400:\r\n ply1.penup()\r\n ply1.setheading(0)\r\n ply1.backward(0)\r\n ply1.pendown()\r\n else:\r\n ply1.penup()\r\n ply1.setheading(180)\r\n ply1.forward(50)\r\n ply1.pendown()\r\n \r\ndef ply2_right():\r\n if ply2.ycor() < -300 and ply2.ycor() > -400:\r\n ply2.penup()\r\n ply2.setheading(0)\r\n ply2.forward(0)\r\n ply2.pendown()\r\n else:\r\n ply2.penup()\r\n ply2.setheading(0)\r\n ply2.forward(50)\r\n ply2.pendown()\r\n \r\ndef ply2_left():\r\n if ply2.ycor() < -300 and ply1.ycor() > -400:\r\n ply2.penup()\r\n ply2.setheading(0)\r\n ply2.backward(0)\r\n ply2.pendown()\r\n else:\r\n ply2.penup()\r\n ply2.setheading(180)\r\n ply2.forward(50)\r\n ply2.pendown()\r\n \r\ndef ply1_down():\r\n if ply1.ycor() < -300 and ply1.ycor() > -400:\r\n ply1.setheading(270)\r\n ply1.forward(0)\r\n ply1.setheading(0)\r\n else:\r\n ply1.penup()\r\n ply1.setheading(270)\r\n ply1.forward(630)\r\n ply1.setheading(0)\r\n ply1.pendown()\r\n \r\ndef ply1_up():\r\n if ply1.ycor() < 350 and ply1.ycor() > 250:\r\n ply1.setheading(90)\r\n ply1.forward(0)\r\n ply1.setheading(0)\r\n else:\r\n ply1.penup()\r\n ply1.setheading(90)\r\n ply1.forward(630)\r\n ply1.setheading(0)\r\n ply1.pendown()\r\n \r\ndef ply2_down():\r\n if ply2.ycor() < -300 and ply2.ycor() > -400:\r\n ply2.setheading(270)\r\n ply2.forward(0)\r\n ply2.setheading(0)\r\n else:\r\n ply2.penup()\r\n ply2.setheading(270)\r\n ply2.forward(630)\r\n ply2.setheading(0)\r\n ply2.pendown()\r\n\r\ndef ply2_up():\r\n if ply2.ycor() < 350 and ply2.ycor() > 250:\r\n ply2.setheading(90)\r\n ply2.forward(0)\r\n ply2.setheading(0)\r\n else:\r\n ply2.penup()\r\n ply2.setheading(90)\r\n ply2.forward(630)\r\n ply2.setheading(0)\r\n ply2.pendown()\r\n\r\n\r\ncan=turtle.Turtle()\r\ncanvas=turtle.Screen()\r\n\r\n#login\r\ncanvas.screensize(4000,800,\"white\")\r\ncan.ht()\r\ncan.write(\"play fullscreen!\", move = False, align =\"center\" , font = (\"Arial\",30,\"normal\"))\r\ntime.sleep(2)\r\ncan.clear()\r\ncan.write(\"\\t control keys:\\n A-S-W-D(for yellow) and arrow keys(for cyan)\", move = False, align =\"center\" , font = (\"Arial\",20,\"normal\"))\r\ntime.sleep(4)\r\ncan.clear()\r\ncan.write(\"\\tcrash the bugs and gain xp, \\nthe player who reachs 15 xp first, gonna win! \", move = False, align =\"center\" , font = (\"Arial\",20,\"normal\"))\r\ntime.sleep(4)\r\ncan.clear()\r\ncan.write(\"3!\", move = False, align =\"center\" , font = (\"Arial\",70,\"normal\"))\r\ntime.sleep(1)\r\ncan.clear()\r\ncan.write(\"2!\", move = False, align =\"center\" , font = (\"Arial\",70,\"normal\"))\r\ntime.sleep(1)\r\ncan.clear()\r\ncan.write(\"1!\", move = False, align =\"center\" , font = (\"Arial\",70,\"normal\"))\r\ntime.sleep(1)\r\ncan.clear()\r\ncan.write(\"DEBUG!\", move = False, align =\"center\" , font = (\"Arial\",100,\"normal\"))\r\ntime.sleep(1)\r\ncan.clear()\r\ncanvas.screensize(4000,800,\"black\")\r\n\r\n\r\n#variables and turtle objects\r\nscor=turtle.Turtle()\r\nscor1=turtle.Turtle()\r\nscor2=turtle.Turtle()\r\n\r\nply1x = 300\r\nply1y = 300\r\nply2y = 300\r\nply2x = -300\r\n\r\nxp1= 0\r\nxp2= 0\r\n\r\ntab = turtle.Turtle()\r\nply1 = turtle.Turtle()\r\nply2 = turtle.Turtle()\r\nlimit = turtle.Turtle()\r\ndot = turtle.Turtle()\r\n\r\n#start of game\r\nsınır(limit,\"purple\") \r\nrock(\"cyan\",ply1x,ply1y,ply1)\r\nrock(\"yellow\",ply2x,ply2y,ply2)\r\nscorboardtab(tab)\r\nscor.penup()\r\nscor.goto(0,0)\r\nscor.pendown()\r\nscor.write(\"YELLOW vs CYAN\", move = False, align =\"center\" , font = (\"Arial\",20,\"normal\"))\r\nscor.ht()\r\n\r\n#for inputs from key\r\nturtle.listen() \r\n\r\nturtle.onkey(ply1_right, \"Right\")\r\nturtle.onkey(ply1_left, \"Left\")\r\nturtle.onkey(ply1_down, \"Down\")\r\nturtle.onkey(ply1_up, 'Up')\r\n\r\nturtle.onkey(ply2_right, 'd')\r\nturtle.onkey(ply2_left, 'a')\r\nturtle.onkey(ply2_down, 's')\r\nturtle.onkey(ply2_up, 'w')\r\n\r\n\r\n\r\nwhile(1):\r\n random_bug(1,dot)\r\n \r\n #for finish game\r\n if xp1==15: \r\n canvas.screensize(4000,800,\"white\")\r\n can.goto(0,-70)\r\n can.clear()\r\n can.color(\"green\")\r\n can.write(\"CYAN WIN!\", move = False, align =\"center\" , font = (\"Arial\",120,\"normal\"))\r\n time.sleep(3) \r\n exit(1)\r\n if xp2==15:\r\n canvas.screensize(4000,800,\"white\")\r\n can.goto(0,-70)\r\n can.clear()\r\n can.color(\"green\")\r\n can.write(\"YELLOW WIN!\", move = False, align =\"center\" , font = (\"Arial\",120,\"normal\"))\r\n time.sleep(3)\r\n exit(1)\r\n\r\n\r\n\r\nturtle.mainloop()\r\n \r\n\r\nturtle.done()","sub_path":"DEBUGGER[mac].py","file_name":"DEBUGGER[mac].py","file_ext":"py","file_size_in_byte":7562,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"469570139","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# In[1]:\n\n\nimport pandas as pd\nimport numpy as np\n\n\n# In[2]:\n\n\ndf = pd.DataFrame({'A':[1,2,np.nan],\n 'B':[5,np.nan,np.nan],\n 'C':[1,2,3]})\n\n\n# In[3]:\n\n\ndf\n\n\n# In[6]:\n\n\ndf.dropna()\n\n\n# In[7]:\n\n\ndf.dropna(axis=1)\n\n\n# In[60]:\n\n\ndf.dropna(thresh=2)\n\n\n# In[9]:\n\n\ndf\n\n\n# In[14]:\n\n\ndf.fillna(value=df.mean())\n\n\n# In[16]:\n\n\ndf['A']\n\n\n# In[17]:\n\n\ndf['A'].fillna(value=df[\"A\"].mean())\n\n\n# In[18]:\n\n\nimport pandas as pd\n# Create dataframe\ndata = {'Company':['GOOG','GOOG','MSFT','MSFT','FB','FB'],\n 'Person':['Sam','Charlie','Amy','Vanessa','Carl','Sarah'],\n 'Sales':[200,120,340,124,243,350]}\n\n\n# In[19]:\n\n\ndata\n\n\n# In[20]:\n\n\ndf1=pd.DataFrame(data)\n\n\n# In[21]:\n\n\ndf1\n\n\n# In[22]:\n\n\ndf1.groupby(\"Company\")\n\n\n# In[23]:\n\n\ngrp=df1.groupby(\"Company\")\n\n\n# In[24]:\n\n\ngrp\n\n\n# In[25]:\n\n\ngrp.sum()\n\n\n# In[26]:\n\n\ndf1\n\n\n# In[27]:\n\n\ngrp.count()\n\n\n# In[28]:\n\n\ngrp.describe()\n\n\n# In[33]:\n\n\ngrp.describe().transpose()[\"FB\"]\n\n\n# In[34]:\n\n\ngrp.sum()\n\n\n# In[35]:\n\n\ngrp.mean()\n\n\n# In[36]:\n\n\ngrp.max()\n\n\n# In[37]:\n\n\ngrp.min()\n\n\n# In[38]:\n\n\ngrp.median()\n\n\n# In[39]:\n\n\ngrp.mad()\n\n\n# In[40]:\n\n\ngrp.var()\n\n\n# In[41]:\n\n\ngrp.std()\n\n\n# In[42]:\n\n\ngrp.skew()\n\n\n# In[44]:\n\n\ngrp.cumsum()\n\n\n# In[45]:\n\n\n\n\n\n# In[46]:\n\n\ndf\n\n\n# In[47]:\n\n\ndf.hist()\n\n\n# In[50]:\n\n\nimport matplotlib as plt\n\n\n# In[51]:\n\n\ndf.hist()\n\n\n# In[61]:\n\n\nimport pandas as pd\ndf = pd.DataFrame({'col1':[1,2,3,4],'col2':[444,555,666,444],'col3':['abc','def','ghi','xyz']})\ndf.head()\n\n\n# In[64]:\n\n\ndf[\"col2\"].unique()\n\n\n# In[ ]:\n\n\n\n\n","sub_path":"Python/DataAnalysis/MIS401/MIS401(Spring2019)/Class Resource/MIS 401_2nd MID/Missing Value _ GroupBy.py","file_name":"Missing Value _ GroupBy.py","file_ext":"py","file_size_in_byte":1538,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"565892829","text":"import math\r\n\r\ndef Conversion(exp):\r\n reales=0\r\n complejo=0\r\n signo=1\r\n for i in range(len(exp)):\r\n if exp[i]== \"-\":\r\n signo=-1\r\n elif exp[i] == \"+\" :\r\n signo=1\r\n elif exp[i].isnumeric():\r\n reales+=int(exp[i])*signo\r\n else:\r\n if (exp[i][0] == \"+\"):\r\n complejo+=int(exp[i][1])\r\n elif (exp[i][0] == \"-\"):\r\n complejo-=int(exp[i][1])\r\n elif exp[i][0] == \"i\":\r\n complejo+=1\r\n else:\r\n complejo+=int(exp[i][0])*signo\r\n return (reales,complejo)\r\n\r\n\r\ndef Producto (exp1,exp2):\r\n resultado=(exp1[0]*exp2[0]-exp1[1]*exp2[1],exp1[0]*exp2[1]+exp1[1]*exp2[0])\r\n return resultado\r\n\r\n\r\ndef Suma (exp1,exp2):\r\n resultado=(exp1[0]+exp2[0],exp1[1]+exp2[1])\r\n return resultado\r\n\r\ndef Resta (exp1,exp2):\r\n exp2=(exp2[0]*-1,exp2[1]*-1)\r\n respuesta=Suma(exp1,exp2)\r\n return respuesta\r\n\r\ndef Conjugado (exp):\r\n resultado =(exp[0],exp[1]*-1)\r\n return resultado\r\n\r\n \r\ndef Division (exp1,exp2):\r\n conj=Conjugado(exp2)\r\n numerador=Producto(exp1,conj)\r\n denominador= (exp2[0]**2)+(exp2[1]**2)\r\n resultado = (numerador[0]/denominador,numerador[1]/denominador)\r\n return resultado\r\n\r\ndef Modulo(real,imaginario):\r\n modulo= (real**2 + imaginario**2)**(0.5)\r\n return modulo\r\n\r\ndef polarCartesiano(n,grado):\r\n ladoX=n*math.cos(math.radians(grado))\r\n ladoY=n*math.sin(math.radians(grado))\r\n cordenada=(ladoX,ladoY)\r\n return cordenada\r\ndef cartesianoPolar(real,imaginario):\r\n \r\n n=Modulo(real,imaginario)\r\n grado= math.degrees(math.atan2(imaginario,real))\r\n respuesta=(n,grado)\r\n return respuesta\r\n\r\ndef Fase(real,imaginario):\r\n\r\n fase = math.atan2(imaginario,real)\r\n return fase\r\ndef invertir (c):\r\n return (-c[0],-c[1])\r\n\r\n\r\n#------------------------------------------------------------------------------------#\r\n\r\ndef restaVectores(vec1,vec2):\r\n res = []\r\n for i in range(len(vec1)):\r\n res.append(Resta(vec1[i], vec2[i]))\r\n return res\r\n\r\ndef sumaVectores(vec1,vec2):\r\n res = []\r\n for i in range(len(vec1)):\r\n res.append(Suma(vec1[i],vec2[i]))\r\n return res\r\n\r\ndef inversoVector(vec):\r\n res = []\r\n for i in range(len(vec1)):\r\n res.append(invertir(vec1[i],vec2[i]))\r\n return res\r\n\r\ndef productoScalarV(escalar,vector):\r\n res = []\r\n for i in vector:\r\n res.append(Producto((escalar,0),i))\r\n return res\r\n\r\ndef multMatrices (matA,matB):\r\n filas = len(matA)\r\n columnas = len(matB[0])\r\n matriz = [[(0, 0) for x in range(columnas)] for x in range(filas)]\r\n for i in range(filas):\r\n for j in range(columnas):\r\n for k in range(len(matB)):\r\n matriz[i][j] = Suma(Producto(matA[i][k], matB[k][j]),matriz[i][j])\r\n return matriz\r\n \r\ndef productoInterno(matA,matB):\r\n real = 0\r\n complejo = 0\r\n for i in range(len(matA)):\r\n for j in range(len(matA[0])):\r\n real += (matA[i][j][0]*matB[i][j][0])\r\n complejo += (matA[i][j][1]*matB[i][j][1])\r\n return (real,complejo)\r\n\r\ndef sumaMatriz(matA,matB):\r\n resultado =[[0 for i in range (len(matA[0]))] for j in range(len(matA))]\r\n for i in range(len(matA)):\r\n for j in range(len(matA[0])):\r\n resultado[i][j]= Suma(matA[i][j], matB[i][j])\r\n return resultado\r\n \r\ndef productoEscalarMatriz(escalar,matriz):\r\n for i in range(len(matriz)):\r\n for j in range(len(matriz[0])):\r\n matriz[i][j]= Producto((escalar,0),matriz[i][j])\r\n return matriz\r\n\r\ndef matInversa(matriz):\r\n for i in range(len(matriz)):\r\n for j in range(len(matriz[0])):\r\n matriz[i][j]= invertir(matriz[i][j])\r\n return matriz\r\n\r\ndef maTranspuesta (matriz):\r\n \r\n resultado =[[(0,0) for i in range (len(matriz))] for j in range(len(matriz))]\r\n for i in range(len(matriz)):\r\n for j in range(len(matriz)):\r\n resultado[j][i]= matriz[i][j]\r\n return resultado\r\n\r\ndef matConjugada(matriz):\r\n for i in range(len(matriz)):\r\n for j in range(len(matriz[0])):\r\n matriz[i][j]= Conjugado(matriz[i][j])\r\n return matriz\r\n\r\ndef matAdjunta(matriz):\r\n \r\n return matConjugada(maTranspuesta(matriz))\r\n\r\ndef Accion(matA,vec):\r\n vectorResp=[]\r\n for j in range(len(matA)):\r\n fila=(0,0)\r\n for k in range(len(matA[0])):\r\n fila=Suma(fila,Producto(matA[j][k],vec[j]))\r\n vectorResp.append(fila)\r\n return vectorResp\r\n #return multMatrices(matA,(maTranspuesta([vec])))\r\n\r\ndef matNorma(Mat):\r\n return (productoInterno(Mat,Mat)[0],productoInterno(Mat,Mat)[1])\r\ndef distanciaMatrices(mat1,mat2):\r\n matriz = sumaMatriz(mat1,matInversa(mat2))\r\n distancia = matNorma(matriz)\r\n return distancia\r\n\r\ndef identidad(n):\r\n identidad = [[0 for x in range(n)]for y in range(n)]\r\n for i in range(n):\r\n identidad[i][i]=(1,1)\r\n return identidad\r\n\r\n\r\ndef unitaria(matriz):\r\n if len(matriz)!=len(matriz[0]):\r\n return False\r\n fin = multMatrices(matriz ,matAdjunta(matriz))\r\n for i in range(len(fin)):\r\n for j in range(len(fin[0])):\r\n if ((i==j and fin[i][j][0]!=1 and fin[i][j][0]!=1) or (i!=j and fin[i][j][0]!=0 and fin[i][j][0]!=0)):\r\n return False\r\n return True\r\n\r\ndef hermitian(mat):\r\n return (matAdjunta(mat) == mat)\r\n\r\n\r\n","sub_path":"cnyt/Complejo.py","file_name":"Complejo.py","file_ext":"py","file_size_in_byte":5415,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"594643424","text":"# Copyright 2018 Jetperch LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"\nJoulescope python setuptools module.\n\nSee:\nhttps://packaging.python.org/en/latest/distributing.html\nhttps://github.com/pypa/sampleproject\n\"\"\"\n\n# Always prefer setuptools over distutils\nimport setuptools\nfrom distutils.command.build import build as build_orig\nimport os\nimport sys\n\nVERSION = '0.6.8' # CHANGE THIS VERSION!\nMYPATH = os.path.abspath(os.path.dirname(__file__))\n\ntry:\n from Cython.Build import cythonize\n USE_CYTHON = os.path.isfile(os.path.join(MYPATH, 'joulescope', 'stream_buffer.pyx'))\nexcept ImportError:\n USE_CYTHON = False\n\ndef update_version_py():\n path = os.path.join(MYPATH, 'joulescope', 'version.py')\n with open(path, 'wt') as fv:\n fv.write('# AUTOMATICALLY GENERATED BY setup.py\\n')\n fv.write(f'VERSION = \"{VERSION}\"\\n')\n\nupdate_version_py()\n\next = '.pyx' if USE_CYTHON else '.c'\nextensions = [\n setuptools.Extension('joulescope.stream_buffer',\n sources=['joulescope/stream_buffer' + ext],\n include_dirs=[],\n ),\n setuptools.Extension('joulescope.pattern_buffer',\n sources=['joulescope/pattern_buffer' + ext],\n include_dirs=[],\n ),\n]\n\nif USE_CYTHON:\n from Cython.Build import cythonize\n extensions = cythonize(extensions, compiler_directives={'language_level': '3'}) # , annotate=True)\n\n\n# Get the long description from the README file\nwith open(os.path.join(MYPATH, 'README.md'), encoding='utf-8') as f:\n long_description = f.read()\n\n\nif sys.platform.startswith('win'):\n PLATFORM_INSTALL_REQUIRES = ['pypiwin32>=223']\nelse:\n PLATFORM_INSTALL_REQUIRES = []\n\n\n# Hack to install numpy before numpy.get_include()\n# https://stackoverflow.com/questions/54117786/add-numpy-get-include-argument-to-setuptools-without-preinstalled-numpy\nclass Build(build_orig):\n\n def finalize_options(self):\n super().finalize_options()\n # I stole this line from ead's answer:\n __builtins__.__NUMPY_SETUP__ = False\n import numpy\n for extension in self.distribution.ext_modules:\n extension.include_dirs.append(numpy.get_include())\n self.distribution.include_dirs.append(numpy.get_include())\n\n\nsetuptools.setup(\n name='joulescope',\n version=VERSION,\n description='Joulescope™ host driver and utilities',\n long_description=long_description,\n long_description_content_type='text/markdown',\n url='https://www.joulescope.com',\n author='Jetperch LLC',\n author_email='joulescope-dev@jetperch.com',\n license='Apache',\n\n # Classifiers help users find your project by categorizing it.\n #\n # For a list of valid classifiers, see https://pypi.org/classifiers/\n classifiers=[ # Optional\n 'Development Status :: 4 - Beta',\n\n # Indicate who your project is intended for\n 'Intended Audience :: Developers',\n 'Intended Audience :: End Users/Desktop',\n 'Topic :: Software Development :: Build Tools',\n\n # Pick your license as you wish\n 'License :: OSI Approved :: Apache Software License',\n\n # Supported Python versions\n 'Programming Language :: Python :: 3.6',\n 'Programming Language :: Python :: 3.7',\n ],\n\n keywords='joulescope driver',\n\n packages=setuptools.find_packages(exclude=['native', 'docs', 'test', 'dist', 'build']),\n ext_modules=extensions,\n cmdclass={\n 'build': Build,\n },\n include_dirs=[],\n \n # See https://packaging.python.org/guides/distributing-packages-using-setuptools/#python-requires\n python_requires='~=3.6',\n\n setup_requires=[\n 'numpy>=1.15.2',\n 'Cython>=0.29.3',\n ],\n\n # See https://packaging.python.org/en/latest/requirements.html\n install_requires=[\n 'numpy>=1.15.2',\n 'python-dateutil>=2.7.3',\n 'pymonocypher>=0.1.3',\n ] + PLATFORM_INSTALL_REQUIRES,\n\n extras_require={\n 'dev': ['check-manifest', 'Cython', 'coverage', 'wheel'],\n }, \n\n entry_points={\n 'console_scripts': [\n 'joulescope_cmd=joulescope.command.runner:run',\n ],\n },\n \n project_urls={\n 'Bug Reports': 'https://github.com/jetperch/pyjoulescope/issues',\n 'Funding': 'https://www.joulescope.com',\n 'Twitter': 'https://twitter.com/joulescope',\n 'Source': 'https://github.com/jetperch/pyjoulescope/',\n },\n)\n","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":4878,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"189386281","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Tue Dec 8 17:56:31 2020\r\n\r\n@author: eardi\r\n\"\"\"\r\nimport csv\r\n#filepath = \"C:\\\\Users\\\\eardi\\\\OneDrive\\\\Documents\\\\Bootcamp\\\\Python\\\\Resource\\\\budget_data.csv\"\r\nfilepath = '..\\\\Resources\\\\budget_data.csv'\r\n \r\nwith open(filepath, 'r') as text:\r\n reader = csv.reader(text, delimiter=\",\") \r\n next(reader)\r\n DateCnt = 0\r\n Net = 0\r\n Profit = 0\r\n Loss = 0\r\n Avg = 0\r\n max_ = 0\r\n TL_Difference = 0\r\n for row in reader: \r\n Date_Set = row[0] \r\n DateCnt += 1 \r\n \r\n Monthly_Tl = float(row[1])\r\n Net += Monthly_Tl \r\n if Monthly_Tl > 0:\r\n Profit += Monthly_Tl\r\n if Monthly_Tl < 0:\r\n Loss += Monthly_Tl\r\n \r\n TL_Difference = (Profit / Loss * DateCnt)\r\n print (TL_Difference)\r\n Total_Net = '${:,.2f}'.format(Net)\r\n Mean = '${:,.2f}'.format((TL_Difference)) \r\n \r\n print(\"Total Months: \",DateCnt) \r\n print(\"Net Total : \",Total_Net)\r\n print(\"Average of Changes: \", Mean) \r\n #print(\"Greatest Increase in Profits: \", max(row) )\r\n #print(\"Greatest Decrease in Profits: \", min(row)) \r\n","sub_path":"pyBank.py","file_name":"pyBank.py","file_ext":"py","file_size_in_byte":1144,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"148798282","text":"import argparse\nimport os\nfrom train import Train\nfrom test import Test\nimport torch\n\ndescription = 'T-CAM example using pytorch'\nparser = argparse.ArgumentParser(description=description)\n\nparser.add_argument('--dataDir', metavar='Directory', type=str, default='../../DB/VR_VIMS_PARSE', help='saved data directory ex:../../DB/VR_VIMS_PARSE/augmentation/video')\nparser.add_argument('--model', metavar='MODEL', type=str, default='model1_0', help='network architecture. Default T-CAM')\n\nparser.add_argument('--lr', metavar='L', type=float, default=1e-5, help='learning rate used for training. Default 1e-3')\nparser.add_argument('--batchSize', metavar='BATCH', type=int, default=1, help='batch size')\nparser.add_argument('--epochs', metavar='N', type=int, default=50, help='number of training epochs. Default 100')\nparser.add_argument('--gpu', metavar='GPU', type=int, default=0, help='set gpu')\n\nparser.add_argument('--fineTune', default=False, action='store_true', help='Fine-tunning')\nparser.add_argument('--isTest', default=False, action='store_true', help='Train & Test mode')\n\nargs = parser.parse_args()\ntorch.cuda.set_device(args.gpu)\n\ndef display_config(check_point):\n # check point 디렉토리 생성\n if not os.path.exists(check_point):\n os.makedirs(check_point)\n print('###################################################')\n print('# T-CAM example - Pytorch implementation #')\n print('# by Jinwoo Kim (jw09191@gmail.com) #')\n print('##################################################')\n print('')\n print('---------- YOUR SETTINGS ----------')\n if args.isTest or args.fineTune: type='a'\n else: type='w'\n with open(os.path.join(check_point, 'record.txt'), type) as record:\n for arg in vars(args):\n print(\"{:15}: {}\".format(str(arg), str(getattr(args, arg))))\n data = \"{:15}: {}\\n\".format(str(arg), str(getattr(args, arg)))\n record.write(data)\n print('')\n\ndef main():\n check_point = os.path.join('check_point', args.model)\n display_config(check_point)\n if args.isTest is not True:\n train = Train(check_point=check_point, data_directory=args.dataDir, model=args.model,\n num_epochs=args.epochs, learning_rate=args.lr,\n batchSize=args.batchSize, fineTune=args.fineTune)\n train.train()\n\n else:\n test = Test(check_point=check_point, data_directory=args.dataDir, model=args.model,\n is_test=args.isTest)\n test.test()\n\nif __name__ == '__main__':\n main()","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2562,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"232245273","text":"#!/usr/bin/env python3\n'''\n This sample program shows how to control a motor using a joystick. In the\n operator control part of the program, the joystick is read and the value\n is written to the motor.\n\n Joystick analog values range from -1 to 1 and speed controller inputs also\n range from -1 to 1 making it easy to work together. The program also delays\n a short time in the loop to allow other threads to run. This is generally\n a good idea, especially since the joystick values are only transmitted\n from the Driver Station once every 20ms.\n'''\n\nimport wpilib\nimport ctre\nfrom wpilib.drive import DifferentialDrive\nfrom wpilib.interfaces import GenericHID\n\nclass MyRobot(wpilib.IterativeRobot):\n \n def robotInit(self):\n '''Robot initialization function'''\n print(\"ROBORINIT\")\n self.leftMotor = ctre.WPI_TalonSRX(8) # initialize the motor as a Talon on channel 0\n self.rightMotor = ctre.WPI_TalonSRX(4)\n self.stick = wpilib.XboxController(0) # initialize the joystick on port 0\n self.right = wpilib.SpeedControllerGroup(self.rightMotor)\n self.left = wpilib.SpeedControllerGroup(self.leftMotor)\n\n self.drive = DifferentialDrive(self.left, self.right)\n\n def teleopPeriodic(self):\n #self.drive.arcadeDrive(self.stick, True)\n print(self.stick.getX(GenericHID.Hand.kLeft))\n self.drive.arcadeDrive(self.stick.getY(GenericHID.Hand.kLeft), self.stick.getX(GenericHID.Hand.kLeft), squaredInputs=True)\n print(self.stick.getX(GenericHID.Hand.kLeft))\n\nif __name__ == \"__main__\":\n wpilib.run(MyRobot)\n","sub_path":"robot.py","file_name":"robot.py","file_ext":"py","file_size_in_byte":1622,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"622241624","text":"\"\"\"Add public flags column\n\nRevision ID: 45973dacf7da\nRevises: 451f61f7f7cb\nCreate Date: 2020-09-02 10:38:18.142271\n\n\"\"\"\nfrom alembic import op\nimport sqlalchemy as sa\n\n\n# revision identifiers, used by Alembic.\nrevision = '45973dacf7da'\ndown_revision = '451f61f7f7cb'\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.add_column('users', sa.Column('public_flags', sa.JSON(), nullable=True))\n # ### end Alembic commands ###\n\n\ndef downgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.drop_column('users', 'public_flags')\n # ### end Alembic commands ###\n","sub_path":"alembic/versions/45973dacf7da_add_public_flags_column.py","file_name":"45973dacf7da_add_public_flags_column.py","file_ext":"py","file_size_in_byte":669,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"41373066","text":"import random\n\nrps = {1:'rock',2:'paper',3:'scissors'}\nwhile True:\n print('''Enter \n 1 for rock\n 2 for paper\n 3 for scissor \n Press Enter to quit''')\n user_rps = int(input())\n rand_int = random.randint(1,3)\n if user_rps == '':\n break\n if user_rps == random.randint(1,3):\n print(\"It's a tie ! \" + rps[user_rps])\n else:\n print(\"Let me decide the winner later!\")\n \n","sub_path":"RockPaperScissor.py","file_name":"RockPaperScissor.py","file_ext":"py","file_size_in_byte":453,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"198248069","text":"class Solution(object):\n def lengthOfLongestSubstring(self, s):\n \"\"\"\n :type s: str\n :rtype: int\n \"\"\"\n dict_t = {}\n start = 0\n curr_max = 0\n for indx in range(len(s)):\n if s[indx] in dict_t: # if char is in there\n\n if dict_t[s[indx]] < start: # if the character currently isn't included\n curr_max = max(curr_max, indx - start + 1)\n else: # if it is included, don't include it and start it at the next character\n start = dict_t[s[indx]] + 1\n else:\n curr_max = max(curr_max, indx - start + 1)\n dict_t[s[indx]] = indx\n return curr_max\n","sub_path":"problems/array/longestSubstringWithoutDuplicates.py","file_name":"longestSubstringWithoutDuplicates.py","file_ext":"py","file_size_in_byte":713,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"504480394","text":"import urllib.request,json\nfrom .models import Source\nfrom .models import Article\n\n\n\n# Getting api key\napi_key = None\n# Getting the movie base url\nsource_url = None\neverything_url = None\n\ndef configure_request(app):\n global api_key,sources_url,everything_url\n api_key = app.config['NEWS_API_KEY']\n sources_url = app.config[\"SOURCES_BASE_URL\"]\n everything_url= app.config[\"EVERYTHING_BASE_URL\"]\n\ndef get_sources(category):\n '''\n Function that gets the json response to our url request\n '''\n get_sources_url = sources_url.format(category,api_key)\n\n with urllib.request.urlopen(get_sources_url) as url:\n get_sources_data = url.read()\n get_sources_response = json.loads(get_sources_data)\n\n source_results = None\n\n if get_sources_response['sources']:\n source_results_list = get_sources_response['sources']\n source_results = process_results(source_results_list)\n\n\n return source_results\ndef process_results(source_list):\n '''\n Function that processes the source result and transform them to a list of Objects\n\n Args:\n source_list: A list of dictionaries that contain movie details\n\n Returns :\n source_results: A list of source objects\n '''\n source_results = []\n for source_item in source_list:\n id = source_item.get('id')\n name = source_item.get('name')\n description = source_item.get('description')\n url = source_item.get('url')\n category = source_item.get('category')\n language = source_item.get('language')\n\n source_object = Source(id,name,description,url,category,language)\n source_results.append(source_object)\n\n return source_results\n\ndef get_articles(source_id):\n '''\n Function that gets the json response to our url request\n '''\n get_articles_url = everything_url.format(category,api_key)\n\n with urllib.request.urlopen(get_articles_url) as url:\n get_articles_data = url.read()\n get_articles_response = json.loads(get_articles_data)\n\n articles_results = None\n\n if get_articles_response['articles']:\n articles_results_list = get_sources_response['articles']\n articles_results = process_results(articles_results_list)\n\n\n return articles_results\ndef process_articles(articles_list):\n '''\n Function that processes the source result and transform them to a list of Objects\n\n Args:\n articles_list: A list of dictionaries that contain movie details\n\n Returns :\n articles_results: A list of source objects\n '''\n articles_results = []\n for article_item in articles_list:\n\n title = article_item.get('title')\n description = article_item.get('description')\n url = article_item.get('url')\n urlToImage = article_item.get('urlToImage')\n\n\n if urlToImage:\n articles_object = Articles(title, description, url, urlToImage)\n articles_results.append(articles_object)\n\n return articles_results\n","sub_path":"app/request.py","file_name":"request.py","file_ext":"py","file_size_in_byte":3012,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"653057877","text":"from flask import Flask, render_template, redirect, request, url_for\nimport csv\n\napp = Flask(__name__)\n\ndef read_database(csv_database):\n csv_reader = csv.reader(open(csv_database))\n csv_reader_lists = []\n for list in csv_reader:\n csv_reader_lists.append(list)\n return csv_reader_lists\n\n@app.route('/')\ndef route_list():\n database= read_database(\"user_stories.csv\")\n return render_template(\"list.html\", database=database)\n\n@app.route('/story')\ndef route_edit():\n return render_template(\"form.html\")\n\n@app.route('/story/', methods=[\"POST\", \"GET\"])\ndef save_story(story_id):\n database= read_database(\"user_stories.csv\")\n current_entry_information = []\n for list in database:\n if list[0] == story_id:\n for list_item in list:\n current_entry_information.append(list_item)\n return render_template(\"form.html\", current_entry_information=current_entry_information)\n\n@app.route(\"/save_user_story\", methods=[\"POST\"])\ndef save_user_story():\n csv_database = read_database(\"user_stories.csv\")\n row_count = sum(1 for row in csv_database)\n user_story_manager = []\n user_story_manager.append(row_count+1)\n user_story_manager.append(request.form[\"Story Title\"])\n user_story_manager.append(request.form[\"User Story\"])\n user_story_manager.append(request.form[\"Acceptance criteria\"])\n user_story_manager.append(request.form[\"Business value\"])\n user_story_manager.append(request.form[\"Estimation\"])\n user_story_manager.append(request.form[\"Status\"])\n csv_writer = csv.writer(open(\"user_stories.csv\", \"a\"))\n csv_writer.writerow(user_story_manager)\n return redirect('/')\n\n@app.route(\"/update_user_story\", methods=[\"POST\"])\ndef update_user_story():\n existing_list = csv.reader(open(\"user_stories.csv\"))\n replaced_item = [item for item in existing_list]\n replaced_item[int(request.form[\"ID\"])-1][1] = request.form[\"Story Title\"]\n replaced_item[int(request.form[\"ID\"])-1][2] = request.form[\"User Story\"]\n replaced_item[int(request.form[\"ID\"])-1][3] = request.form[\"Acceptance criteria\"]\n replaced_item[int(request.form[\"ID\"])-1][4] = request.form[\"Business value\"]\n replaced_item[int(request.form[\"ID\"])-1][5] = request.form[\"Estimation\"]\n replaced_item[int(request.form[\"ID\"])-1][6] = request.form[\"Status\"]\n writer = csv.writer(open(\"user_stories.csv\", 'w'))\n writer.writerows(replaced_item)\n return redirect('/')\n\n\n@app.route(\"/remove_user_story\", methods=[\"POST\"])\ndef remove_user_story():\n csv_database = read_database(\"user_stories.csv\")\n for row in csv_database:\n if row[0] == request.form[\"ID\"]:\n csv_database.remove(row)\n row_count = 1\n for row in csv_database:\n row[0] = row_count\n row_count += 1\n writer = csv.writer(open(\"user_stories.csv\", 'w'))\n writer.writerows(csv_database)\n return redirect('/')\n\n \nif __name__ == \"__main__\":\n app.run(\n debug=True, # Allow verbose error reports\n port=2000 # Set custom port\n )","sub_path":"server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":3032,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"142595900","text":"from refineEnv import *\nfrom img_aug_func import *\nimport matplotlib.pyplot as plt\nimport cv2 \nimport skimage.io as io\n\ndef get_data ():\n base_path = 'DATA/'\n train_path = natsorted (glob.glob(base_path + 'trainA/*.tif'))\n train_label_path = natsorted (glob.glob(base_path + 'trainB/*.tif'))\n X_train = read_im (train_path)\n y_train = read_im (train_label_path)\n\n return X_train [0], y_train[0]\n\ndef down_sample_3d (data_list, factor):\n ret = []\n for data in data_list:\n assert (len (data.shape) == 3)\n ret += [data[::2, ::2, ::2]]\n return ret\n\nraw_list = []\nlbl_list = []\n\ndef get_medical_env ():\n global raw_list, lbl_list\n if (len (raw_list) == 0):\n raw_list, lbl_list = get_data ()\n for i in range (len (lbl_list)):\n lbl_list[i] = label (lbl_list[i] > 0)\n lbl_list = lbl_list.astype (np.uint8)\n print (\"DEBUG\", len (np.unique (lbl_list)))\n SEG_checkpoints_paths = [\n 'FCN/checkpoints/0_0.25/checkpoint_120000.pth.tar',\n 'FCN/checkpoints/1_0.5_r/checkpoint_207500.pth.tar',\n 'FCN/checkpoints/2_1.0_r/checkpoint_138750.pth.tar',\n 'FCN/checkpoints/3_1.0_r/checkpoint_213750.pth.tar'\n ]\n\n DS_FACTORS = [0.25, 0.5, 1.0, 1.0]\n\n return Environment (raw_list, lbl_list, SEG_checkpoints_paths, DS_FACTORS)\n\n\nplayer = get_medical_env ()\n\nobs = player.reset ()\n\n\n# obs, reward, done, info = player.step (2)\n# obs, reward, done, info = player.step (3)\n# obs, reward, done, info = player.step (1)\n# obs, reward, done, info = player.step (3)\n# obs, reward, done, info = player.step (1)\n\nprint ('obs shape:', obs.shape)\nprint ('render shape:', player.render ().shape)\n\ndone = False\n\ncv2.namedWindow('image', cv2.WINDOW_NORMAL)\ncv2.resizeWindow('image', 1000, 600)\n\nstack = []\n\nwhile not done:\n print ('level:', player.state.node.level, 'current score: ', player.cal_metric ())\n cv2.imshow ('image', player.render ())\n action = cv2.waitKey()\n\n action -= ord ('0')\n\n obs, reward, done, info = player.step (action)\n print ('action: ', action, 'reward', reward, 'done', done)\n if info['down_level']:\n stack += [info['current_score']]\n\n if info['up_level']:\n reward = player.cal_metric () - stack.pop ()\n print ('delayed reward', reward)\n done = False","sub_path":"RefineNet/testRefineEnv.py","file_name":"testRefineEnv.py","file_ext":"py","file_size_in_byte":2311,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"583284472","text":"import sys\nimport csv\nfrom PyQt5 import QtWidgets, QtCore\n# from PyQt5 import QtWidgets\nfrom PyQt5.QtWidgets import (QAction, QActionGroup, QApplication, QColorDialog,\n QComboBox, QDialog, QFontDialog, QGroupBox, QHBoxLayout, QLabel,\n QMainWindow, QMessageBox, QPushButton, QTableWidget,\n QTableWidgetItem, QToolBar)\n\nfrom dbconnector import db_delete\n\n\nif __name__ == '__main__':\n print(\"INFO: create_eut_handler class import as '__main__'\")\n import db_admin\n # from ....dbconnector import db_select, db_insert\nelse:\n print(\"INFO: create_eut_handler class import as '__module__'\")\n from gui.db_admin import db_admin\n from dbconnector import db_select, db_insert, db_selectAnalyser\n\n\nclass db_admin_handler(QtWidgets.QWidget):\n def __init__(self):\n super(db_admin_handler, self).__init__()\n self.ui = db_admin.Ui_Form()\n self.ui.setupUi(self)\n\n self.__dbSELECT = db_select.dbselect()\n self.__dbSELECTAnalyser = db_selectAnalyser.dbselect()\n self.__dbINSERT = db_insert.dbinsert()\n\n # Button connection --> clicked\n self.ui.btn_loadData.clicked.connect(self.event_btn_loadData)\n self.ui.btn_delCM.clicked.connect(self.event_btn_delCM)\n\n def event_btn_loadData(self):\n # hier muessen spater alle funktion aufgerufen werden die die labels\n # beschreiben ... damit das alles beim clicken des buttons load data\n # geladen wird\n\n self.__write_cm_label()\n self.__write_eut_label()\n self.__write_system_label()\n self.__write_ambientTemp_lable()\n self.__write_testLoad_lable()\n self.__write_configuration_lable()\n self.__write_chassis_lable()\n self.__write_cpu_lable()\n self.__write_hdd_lable()\n self.__write_mem_lable()\n self.__write_mobo_lable()\n self.__write_psu_lable()\n self.__write_pcieCtrlList_lable()\n self.__write_pcieCtrl_lable()\n self.__write_sensorName_lable()\n self.__write_sensorMax_lable()\n self.__write_sensorValue_lable()\n self.__write_sensorTypList_lable()\n self.__write_sensorTyp_lable()\n\n def __write_cm_label(self):\n # laden der anzahl von zeilen in der tabelle climaticmeasurment\n cm_count = self.__dbSELECT.count_climaticMeasurement()\n cm_max = self.__dbSELECT.MaxId_cmeasurement()\n # jetzt das lable beschrieften\n self.ui.label_cmCount.setText(\n \"count: [{:05}] | max: [{:05}]\".format(cm_count, cm_max))\n\n def __write_eut_label(self):\n # anzahl von zeilen in der tabelle eut\n eut_count = self.__dbSELECT.count_eut()\n eut_max = self.__dbSELECT.MaxId_eut()\n self.ui.label_EUTCount.setText(\n \"count: [{:05}] | max: [{:05}]\".format(eut_count, eut_max))\n\n def __write_system_label(self):\n # anzahl von zeilen in der tabelle system\n eut_count = self.__dbSELECT.count_system()\n eut_max = self.__dbSELECT.MaxId_system()\n self.ui.label_systemCount.setText(\n \"count: [{:05}] | max: [{:05}]\".format(eut_count, eut_max))\n\n def __write_ambientTemp_lable(self):\n ccount = self.__dbSELECT.count_ambientTemp()\n maxid = self.__dbSELECT.MaxId_ambientTemp()\n self.ui.label_ambientCount.setText(\n \"count: [{:05}] | max: [{:05}]\".format(ccount, maxid))\n\n def __write_testLoad_lable(self):\n ccount = self.__dbSELECT.count_testLoad()\n maxid = self.__dbSELECT.MaxId_testLoad()\n self.ui.label_testloadCount.setText(\n \"count: [{:05}] | max: [{:05}]\".format(ccount, maxid))\n\n def __write_configuration_lable(self):\n ccount = self.__dbSELECT.count_config()\n maxid = self.__dbSELECT.MaxId_config()\n self.ui.label_configurationCount.setText(\n \"count: [{:05}] | max: [{:05}]\".format(ccount, maxid))\n\n def __write_chassis_lable(self):\n ccount = self.__dbSELECT.count_chassis()\n maxid = self.__dbSELECT.MaxId_chassis()\n self.ui.label_chassisCount.setText(\n \"count: [{:05}] | max: [{:05}]\".format(ccount, maxid))\n\n def __write_cpu_lable(self):\n ccount = self.__dbSELECT.count_cpu()\n maxid = self.__dbSELECT.MaxId_cpu()\n self.ui.label_cpuCount.setText(\n \"count: [{:05}] | max: [{:05}]\".format(ccount, maxid))\n\n def __write_hdd_lable(self):\n ccount = self.__dbSELECT.count_hdd()\n maxid = self.__dbSELECT.MaxId_hdd()\n self.ui.label_hddCount.setText(\n \"count: [{:05}] | max: [{:05}]\".format(ccount, maxid))\n\n def __write_mem_lable(self):\n ccount = self.__dbSELECT.count_memory()\n maxid = self.__dbSELECT.MaxId_memory()\n self.ui.label_memCount.setText(\n \"count: [{:05}] | max: [{:05}]\".format(ccount, maxid))\n\n def __write_mobo_lable(self):\n ccount = self.__dbSELECT.count_mobo()\n maxid = self.__dbSELECT.MaxId_mobo()\n self.ui.label_moboCount.setText(\n \"count: [{:05}] | max: [{:05}]\".format(ccount, maxid))\n\n def __write_psu_lable(self):\n ccount = self.__dbSELECT.count_psu()\n maxid = self.__dbSELECT.MaxId_psu()\n self.ui.label_psuCount.setText(\n \"count: [{:05}] | max: [{:05}]\".format(ccount, maxid))\n\n def __write_pcieCtrlList_lable(self):\n ccount = self.__dbSELECT.count_pcieCtrlList()\n maxid = self.__dbSELECT.MaxId_pcieCtrlList()\n self.ui.label_pcieCtrlTableCount.setText(\n \"count: [{:05}] | max: [{:05}]\".format(ccount, maxid))\n\n def __write_pcieCtrl_lable(self):\n ccount = self.__dbSELECT.count_pcieCtrl()\n maxid = self.__dbSELECT.MaxId_pcieCtrl()\n self.ui.label_pcieCtrlCount.setText(\n \"count: [{:05}] | max: [{:05}]\".format(ccount, maxid))\n\n def __write_sensorName_lable(self):\n ccount = self.__dbSELECT.count_sensorName()\n maxid = self.__dbSELECT.MaxId_sensorName()\n self.ui.label_sensorNameCount.setText(\n \"count: [{:05}] | max: [{:05}]\".format(ccount, maxid))\n\n def __write_sensorMax_lable(self):\n ccount = self.__dbSELECT.count_sensorMax()\n maxid = self.__dbSELECT.MaxId_sensorMax()\n self.ui.label_sensorMaxCount.setText(\n \"count: [{:05}] | max: [{:05}]\".format(ccount, maxid))\n\n def __write_sensorValue_lable(self):\n ccount = self.__dbSELECT.count_sensorValue()\n maxid = self.__dbSELECT.MaxId_sensorValue()\n self.ui.label_sensorValueCount.setText(\n \"count: [{:05}] | max: [{:05}]\".format(ccount, maxid))\n\n def __write_sensorTypList_lable(self):\n ccount = self.__dbSELECT.count_sensorTypList()\n maxid = self.__dbSELECT.MaxId_sensorTypList()\n self.ui.label_sensorTypListCount.setText(\n \"count: [{:05}] | max: [{:05}]\".format(ccount, maxid))\n\n def __write_sensorTyp_lable(self):\n ccount = self.__dbSELECT.count_sensorTyp()\n maxid = self.__dbSELECT.MaxId_sensorTyp()\n self.ui.label_sensorTypCount.setText(\n \"count: [{:05}] | max: [{:05}]\".format(ccount, maxid))\n\n def event_btn_delCM(self):\n cm_id = self.ui.lineEdit_delCM_id.text()\n deleter = db_delete.dbdelete()\n deleter.del_climaticMeasurement_byID(cm_id)\n print(\"DEBUG: event btn del CM funktion is connected and running\")\n\n\nif __name__ == '__main__':\n app = QtWidgets.QApplication(sys.argv)\n x = db_admin_handler()\n x.show()\n app.exec_()\n","sub_path":"taff_v1/gui/db_admin/db_admin_handler.py","file_name":"db_admin_handler.py","file_ext":"py","file_size_in_byte":7479,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"377162496","text":"#!/usr/bin/python\n\nimport sys, socket, array, struct\nif sys.version_info >= (3, 0):\n\tfrom functools import reduce\n\nclass UdpSocket(object):\n\t_len_pack = struct.Struct(' 0, \"Network unavailable\"\n\t\t\tpacket = packet[sent-1:]\n\t\t\tsent = self.sock.sendto(packet, self.addr)\n\t\n\tdef recv(self, length, require_full = True):\n\t\t# packet should always end with checksum, but it might be shorter than *length* if !require_full\n\t\t# Don't do validation on same address to allow flowing through NAT\n\t\tremaining = length + 1\n\t\tpacket, address = self.sock.recvfrom(remaining)\n\t\tassert len(packet) > 0, \"Network unavailable\"\n\t\tremaining -= len(packet)\n\t\tif require_full:\n\t\t\twhile remaining > 0:\n\t\t\t\tbuf, address = self.sock.recvfrom(remaining)\n\t\t\t\tassert len(buf) > 0, \"Network unavailable\"\n\t\t\t\tremaining -= len(buf)\n\t\t\t\tpacket.extend(buf)\n\t\t\n\t\tpacket = array.array('B', ''.join(packet))\n\t\tdata = packet[:-1]\n\t\tcksum = packet[-1]\n\t\tif (cksum + reduce(lambda x,y: x+y, data) & 0xFF) != 0xFF:\n\t\t\treturn None # Corrupted data\n\t\treturn data\n\t\n\tdef send_big(self, data):\n\t\tdata = array.array('B', data)\n\t\ttotal_len = len(data)\n\t\ttotal_sent = 0\n\t\tself.send(UdpSocket._len_pack.pack(total_len))\n\t\twhile total_sent < total_len:\n\t\t\tchunk_len = min(total_len - total_sent, UdpSocket._send_big_chunk)\n\t\t\tchunk = data[total_sent : total_sent + chunk_len]\n\t\t\tself.send(chunk)\n\t\t\ttotal_sent += chunk_len\n\t\n\tdef recv_big(self):\n\t\tlength = self.recv(UdpSocket._len_pack.size, require_full=True)\n\t\tif length == None: return None # Corrupted data\n\t\tlength = UdpSocket._len_pack.unpack(length)[0]\n\t\tbuf = array.array('B')\n\t\twhile length > 0:\n\t\t\tpacket = self.recv(min(length, UdpSocket._send_big_chunk), require_full=True)\n\t\t\tif packet == None: return None # Corrupted data\n\t\t\tbuf.extend(packet)\n\t\t\tlength -= len(packet)\n\t\treturn buf","sub_path":"Final Project/Programming/SBC/shared/udp.py","file_name":"udp.py","file_ext":"py","file_size_in_byte":2697,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"275779014","text":"import os\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\nfrom torchvision.models import resnet50, inception_v3\nfrom torch.hub import download_url_to_file\n\nfrom backend.face_recognition.utils.constants import PRETRAINED_URL, PRETRAINED_MODEL_DIR, MODEL_DIR\n\n\ndef load_state():\n cached_file = os.path.join(PRETRAINED_MODEL_DIR, os.path.basename(PRETRAINED_URL))\n if not os.path.exists(cached_file):\n download_url_to_file(PRETRAINED_URL, cached_file)\n\n state_dict = torch.load(cached_file)\n\n return state_dict\n\n\ndef get_model(pretrained=True):\n model = FaceNet(pretrained)\n if pretrained:\n state = load_state()\n model.load_state_dict(state['state_dict'])\n\n return model\n\n\nclass Flatten(nn.Module):\n def forward(self, x):\n return x.view(x.size(0), -1)\n\n\nclass FaceNet(nn.Module):\n def __init__(self, pretrained=False, num_classes=500, embedding_size=128):\n super(FaceNet, self).__init__()\n self.model = resnet50(pretrained)\n self.cnn = nn.Sequential(\n self.model.conv1,\n self.model.bn1,\n self.model.relu,\n self.model.maxpool,\n self.model.layer1,\n self.model.layer2,\n self.model.layer3,\n self.model.layer4\n )\n\n fc_dim = 2048 * 8 * 8\n if pretrained:\n fc_dim = 100352\n\n self.model.fc = nn.Sequential(\n Flatten(),\n nn.Linear(fc_dim, embedding_size)\n )\n self.model.classifier = nn.Linear(embedding_size, num_classes)\n\n def l2_norm(self, input):\n input_size = input.size()\n buffer = torch.pow(input, 2)\n normp = torch.sum(buffer, 1).add_(1e-10)\n norm = torch.sqrt(normp)\n _output = torch.div(input, norm.view(-1, 1).expand_as(input))\n output = _output.view(input_size)\n return output\n\n def forward(self, x):\n x = self.cnn(x)\n x = self.model.fc(x)\n\n features = self.l2_norm(x)\n alpha = 10\n features = features * alpha\n\n return features\n\n\nclass FaceNetInceptionV3(nn.Module):\n def __init__(self, embedding_dimension=128, pretrained=False):\n super(FaceNetInceptionV3, self).__init__()\n self.model = inception_v3(pretrained=pretrained)\n\n input_features_fc_layer = self.model.fc.in_features\n self.model.fc = nn.Sequential(\n nn.Linear(input_features_fc_layer, embedding_dimension, bias=False),\n nn.BatchNorm1d(embedding_dimension, eps=0.001, momentum=0.1, affine=True)\n )\n\n def forward(self, x):\n x = self.model(x)\n x = F.normalize(x, p=2, dim=1)\n\n return x\n\n\nclass FaceNetResnet(nn.Module):\n def __init__(self, embedding_dimension=128, pretrained=False):\n super(FaceNetResnet, self).__init__()\n self.model = resnet50(pretrained=pretrained)\n\n input_features_fc_layer = self.model.fc.in_features\n self.model.fc = nn.Sequential(\n nn.Linear(input_features_fc_layer, embedding_dimension, bias=False),\n nn.BatchNorm1d(embedding_dimension, eps=0.001, momentum=0.1, affine=True)\n )\n\n def forward(self, x):\n x = self.model(x)\n x = F.normalize(x, p=2, dim=1)\n\n return x","sub_path":"backend/face_recognition/models/FaceNet.py","file_name":"FaceNet.py","file_ext":"py","file_size_in_byte":3269,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"207740810","text":"import datetime\nimport os\nimport random\nimport string\n\nimport math\nfrom django.utils import timezone\nfrom django.utils.text import slugify\n\n\ndef get_last_month_data(today):\n \"\"\"\n Simple method to get the datetime objects for the\n start and end of last month.\n \"\"\"\n this_month_start = datetime.datetime(today.year, today.month, 1)\n last_month_end = this_month_start - datetime.timedelta(days=1)\n last_month_start = datetime.datetime(last_month_end.year, last_month_end.month, 1)\n return (last_month_start, last_month_end)\n\n\ndef get_trial_days():\n return timezone.now() + timezone.timedelta(days=30)\n\n\ndef addDays(date_, num_of_days):\n return date_ + timezone.timedelta(days=num_of_days)\n\n\ndef get_fileType(filepath):\n import os\n filename, file_extension = os.path.splitext(filepath)\n return file_extension\n\n\ndef get_month_data_range(months_ago=1, include_this_month=False):\n \"\"\"\n A method that generates a list of dictionaries\n that describe any given amount of monthly data.\n \"\"\"\n today = datetime.datetime.now().today()\n dates_ = []\n if include_this_month:\n # get next month's data with:\n next_month = today.replace(day=28) + datetime.timedelta(days=4)\n # use next month's data to get this month's data breakdown\n start, end = get_last_month_data(next_month)\n dates_.insert(0, {\n \"start\": start.timestamp(),\n \"end\": end.timestamp(),\n \"start_json\": start.isoformat(),\n \"end\": end.timestamp(),\n \"end_json\": end.isoformat(),\n \"timesince\": 0,\n \"year\": start.year,\n \"month\": str(start.strftime(\"%B\")),\n })\n for x in range(0, months_ago):\n start, end = get_last_month_data(today)\n today = start\n dates_.insert(0, {\n \"start\": start.timestamp(),\n \"start_json\": start.isoformat(),\n \"end\": end.timestamp(),\n \"end_json\": end.isoformat(),\n \"timesince\": int((datetime.datetime.now() - end).total_seconds()),\n \"year\": start.year,\n \"month\": str(start.strftime(\"%B\"))\n })\n # dates_.reverse()\n return dates_\n\n\ndef get_filename(path): # /abc/filename.mp4\n return os.path.basename(path)\n\n\ndef random_string_generator(size=10, chars=string.ascii_lowercase + string.digits):\n return ''.join(random.choice(chars) for _ in range(size))\n\n\ndef unique_id_generator(instance):\n \"\"\"\n This is for a Django project with an key field\n \"\"\"\n size = random.randint(30, 45)\n key = random_string_generator(size=size)\n\n Klass = instance.__class__\n qs_exists = Klass.objects.filter(id=key).exists()\n if qs_exists:\n return unique_slug_generator(instance)\n return key\n\n\ndef unique_key_generator(instance):\n \"\"\"\n This is for a Django project with an key field\n \"\"\"\n size = random.randint(30, 45)\n key = random_string_generator(size=size)\n\n Klass = instance.__class__\n qs_exists = Klass.objects.filter(key=key).exists()\n if qs_exists:\n return unique_slug_generator(instance)\n return key\n\n\ndef unique_order_id_generator(instance):\n \"\"\"\n This is for a Django project with an order_id field\n \"\"\"\n order_new_id = random_string_generator()\n\n Klass = instance.__class__\n qs_exists = Klass.objects.filter(order_id=order_new_id).exists()\n if qs_exists:\n return unique_slug_generator(instance)\n return order_new_id\n\n\ndef unique_slug_generator(instance, new_slug=None):\n \"\"\"\n This is for a Django project and it assumes your instance\n has a model with a slug field and a title character (char) field.\n \"\"\"\n if new_slug is not None:\n slug = new_slug\n else:\n try:\n slug = slugify(instance.user)\n except Exception as e:\n slug = slugify(instance.name)\n\n Klass = instance.__class__\n qs_exists = Klass.objects.filter(slug=slug).exists()\n if qs_exists:\n new_slug = \"{slug}-{randstr}\".format(\n slug=slug,\n randstr=random_string_generator(size=4)\n )\n return unique_slug_generator(instance, new_slug=new_slug)\n return slug\n\n\ndef unique_slug_generator_by_email(instance, new_slug=None):\n \"\"\"\n This is for a Django project and it assumes your instance\n has a model with a slug field and a title character (char) field.\n \"\"\"\n if new_slug is not None:\n slug = new_slug\n else:\n slug = slugify(instance.email)\n\n Klass = instance.__class__\n qs_exists = Klass.objects.filter(email=slug).exists()\n if qs_exists:\n new_slug = \"{slug}-{randstr}\".format(\n slug=slug,\n randstr=random_string_generator(size=4)\n )\n return unique_slug_generator_by_email(instance, new_slug=new_slug)\n return slug\n\n\ndef unique_slug_by_name(instance, new_slug=None):\n \"\"\"\n This is for a Django project and it assumes your instance\n has a model with a slug field and a title character (char) field.\n \"\"\"\n if new_slug is not None:\n slug = new_slug\n else:\n slug = slugify(instance.name)\n\n Klass = instance.__class__\n qs_exists = Klass.objects.filter(slug=slug).exists()\n if qs_exists:\n new_slug = \"{slug}-{randstr}\".format(\n slug=slug,\n randstr=random_string_generator(size=4)\n )\n return unique_slug_generator(instance, new_slug=new_slug)\n return slug\n\n\ndef digitExtract(char):\n return ''.join(filter(str.isdigit, char))\n # return re.findall(r'\\d+', char) - would return data as a list encapsulated data\n\n\ndef removeNCharFromString(num_of_char, string_data):\n size = len(string_data)\n # Slice string to remove last N characters from string\n return string_data[:size - num_of_char]\n\n\ndef secondWordExtract(char):\n \"\"\"\n extract the second word from a string sequence\n :type char: str\n \"\"\"\n return char.split(' ', 2)[1]\n\n\ndef armotizationLoanCalculator(pAmount, interest, nRepayment):\n \"\"\"\n system generated armotized loan estimate\n \"\"\"\n pAmount = int(pAmount)\n interest_ = int(interest)\n nRepayment = int(nRepayment)\n\n sInterest = pAmount * (interest_ / 100)\n sRepayment = math.ceil(pAmount / nRepayment)\n\n return int(sRepayment + sInterest)\n\n\ndef switch_month(month):\n switcher = {\n \"January\":1,\n \"February\":2,\n \"March\":3,\n \"April\":4,\n \"May\":5,\n \"June\":6,\n \"July\":7,\n \"August\":8,\n \"September\":9,\n \"October\":10,\n \"November\":11,\n \"December\":12,\n }\n return switcher.get(month, \"Invalid Argument\")\n","sub_path":"amjuLoans/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":6643,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"532228987","text":"\"\"\"\npicture.py\nAuthor: Sarah Dunbar\nCredit: http://cloford.com/resources/colours/500col.htm\n\nAssignment:\n\nUse the ggame library to \"paint\" a graphical picture of something (e.g. a house, a face or landscape).\n\nUse at least:\n1. Three different Color objects.\n2. Ten different Sprite objects.\n3. One (or more) RectangleAsset objects.\n4. One (or more) CircleAsset objects.\n5. One (or more) EllipseAsset objects.\n6. One (or more) PolygonAsset objects.\n\nSee:\nhttps://github.com/HHS-IntroProgramming/Standards-and-Syllabus/wiki/TUTORIAL:-Displaying-Graphics\nfor general information on how to use ggame.\n\nSee:\nhttp://brythonserver.github.io/ggame/\nfor detailed information on ggame.\n\n\"\"\"\nfrom ggame import App, Color, LineStyle, Sprite, RectangleAsset, CircleAsset, EllipseAsset, PolygonAsset\n\n# add your code here \\/ \\/ \\/\nmakebrown = Color(0xff0000, 1.0)\nplum = Color(0xcc00ff, 1.0)\nwater = Color(0x0000ff, 0.5)\ngrassy = Color(0xeeff00, 1.0)\nmakebrown2 = Color(0x00ff00, 0.5)\nblack = Color(0x000000, 1.0)\ngrey = Color(0x000000, 0.5)\nwhite = Color(0x000000, 0.0)\nbrown = Color(0xA0522D, 1.0)\nskinwhite = Color(0xFFE7BA, 1.0)\ntunic = Color(0x872657, 1.0)\nfishscale = Color(0xEE1289, 1.0)\n\nthinlineblack = LineStyle (1, black)\nthinlinegrey = LineStyle (1, grey)\n\nfishhead = EllipseAsset (35, 15, thinlinegrey, fishscale)\nfishtail = PolygonAsset ([(0, 0), (0, 30), (50, 15), (0, 0)], thinlinegrey, fishscale)\nocean = RectangleAsset (600, 1600, thinlineblack, water)\nland = EllipseAsset (100, 75, thinlinegrey, grassy)\nhouse1 = RectangleAsset (50, 50, thinlineblack, plum)\nsailboat = PolygonAsset ([(0, 0), (50, 25), (0, 50), (0, 0)], thinlinegrey, white)\nroof1 = PolygonAsset ([(25, 0), (50, 50), (0, 50), (25, 0)], thinlinegrey, brown)\nship = PolygonAsset ([(0, 0), (100, 0), (75, 20), (25, 20), (0, 0)], thinlineblack, makebrown)\nship2 = PolygonAsset ([(0, 0), (100, 0), (75, 20), (25, 20), (0, 0)], thinlineblack, makebrown2)\npersonhead = CircleAsset (5, thinlinegrey, skinwhite)\npersonbody = RectangleAsset (10, 15, thinlineblack, tunic)\n\nSprite (fishhead, (385, 662.5))\nSprite (fishtail, (300, 650))\nSprite (land, (650, 500))\nSprite(ocean, (200, 500))\nSprite(house1, (650, 400))\nSprite(roof1, (650, 350))\nSprite(ship, (400, 480))\nSprite(ship2, (400, 480))\nSprite(sailboat, (450, 430))\nSprite(personbody, (420, 465))\nSprite(personhead, (425, 458))\n\n\n# add your code here /\\ /\\ /\\\n\n\nmyapp = App()\nmyapp.run()\n","sub_path":"picture.py","file_name":"picture.py","file_ext":"py","file_size_in_byte":2407,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"289908995","text":"# SPDX-License-Identifier: Apache-2.0\n\n\n\"\"\"\nsignal\n\"\"\"\n\nfrom __future__ import division\nfrom __future__ import print_function\nfrom __future__ import unicode_literals\n\nimport logging\n\nimport numpy as np\nfrom onnx import onnx_pb, helper\nfrom onnx.numpy_helper import to_array\nfrom tf2onnx import utils\nfrom tf2onnx.handler import tf_op\nfrom tf2onnx.graph_builder import GraphBuilder\n\nlogger = logging.getLogger(__name__)\n\n\n# pylint: disable=unused-argument,missing-docstring\n\ndef make_dft_constant(length, dtype, fft_length):\n n = np.arange(length)\n k = n.reshape((length, 1)).astype(np.float64)\n mat = np.exp(-2j * np.pi * k * n / length)\n mat = mat[:fft_length // 2 + 1]\n both = np.empty((2,) + mat.shape, dtype=dtype)\n both[0, :, :] = np.real(mat)\n both[1, :, :] = np.imag(mat)\n return both\n\n\nclass CommonFFTOp:\n @classmethod\n def any_version(cls, const_length, opset, ctx, node, **kwargs):\n \"\"\"\n Inspired from `Python implementation of RFFT\n `_.\n\n Complex version:\n\n ::\n\n import numpy as np\n\n def _DFT_cst(N, fft_length):\n n = np.arange(N)\n k = n.reshape((N, 1)).astype(np.float64)\n M = np.exp(-2j * np.pi * k * n / N)\n return M[:fft_length // 2 + 1]\n\n def DFT(x, fft_length=None):\n if len(x.shape) == 1:\n x = x.reshape((-1, 1))\n else:\n x = x.T\n if fft_length is None:\n fft_length = x.shape[0]\n cst = _DFT_cst(x.shape[0], fft_length)\n return np.dot(cst, x).T\n\n Real version, first axis is (real, imag) part:\n\n ::\n\n import numpy as np\n\n def _DFT_real_cst(N, fft_length):\n n = np.arange(N)\n k = n.reshape((N, 1)).astype(np.float64)\n M = np.exp(-2j * np.pi * k * n / N)\n M = M[:fft_length // 2 + 1]\n both = np.empty((2,) + M.shape)\n both[0, :, :] = np.real(M)\n both[1, :, :] = np.imag(M)\n return both\n\n def DFT_real(x, fft_length=None):\n if len(x.shape) == 1:\n x = x.reshape((-1, 1))\n else:\n x = x.T\n if fft_length is None:\n fft_length = x.shape[0]\n cst = _DFT_real_cst(x.shape[0], fft_length)\n res = np.dot(cst, x)\n return np.transpose(res, (0, 2, 1))\n \"\"\"\n supported_dtypes = [\n onnx_pb.TensorProto.FLOAT,\n onnx_pb.TensorProto.FLOAT16,\n onnx_pb.TensorProto.DOUBLE,\n onnx_pb.TensorProto.COMPLEX64,\n onnx_pb.TensorProto.COMPLEX128,\n ]\n consumers = ctx.find_output_consumers(node.output[0])\n consumer_types = set(op.type for op in consumers)\n utils.make_sure(\n consumer_types == {'ComplexAbs'},\n \"Current implementation of RFFT or FFT only allows ComplexAbs as consumer not %r\",\n consumer_types)\n\n input_name = node.input[0]\n onnx_dtype = ctx.get_dtype(input_name)\n utils.make_sure(onnx_dtype in supported_dtypes, \"Unsupported input type.\")\n shape = ctx.get_shape(node.input[0])\n shape_n = shape[-1]\n\n if onnx_dtype in (onnx_pb.TensorProto.COMPLEX64, onnx_pb.TensorProto.COMPLEX128):\n parent = ctx.get_node_by_output_in_current_graph(node.input[0])\n utils.make_sure(\n parent.type == 'Cast' and parent.get_attr_value('to') == onnx_dtype,\n \"Current implementation of FFT or RFFT assumes the input is real or complex produced \"\n \"by a node Cast just before this one.\")\n input_name = parent.input[0]\n onnx_dtype = ctx.get_dtype(input_name)\n\n np_dtype = utils.map_onnx_to_numpy_type(onnx_dtype)\n\n if np_dtype == np.float16:\n res_onnx_dtype = utils.map_numpy_to_onnx_dtype(np.float16)\n np_dtype = np.float16\n elif np_dtype in (np.float32, np.complex64):\n res_onnx_dtype = utils.map_numpy_to_onnx_dtype(np.float32)\n np_dtype = np.float32\n else:\n res_onnx_dtype = utils.map_numpy_to_onnx_dtype(np.float64)\n np_dtype = np.float64\n\n if const_length:\n # RFFT: length of FFT is known, some computation\n # (see function make_dft_constant)\n # can be done at conversion time and stored as constant\n utils.make_sure(len(node.input) == 2, \"Two inputs expected not %r\", len(node.input))\n\n # This input should be a constant.\n fft_length_name = node.input[1]\n node_fft_length = ctx.get_node_by_output(fft_length_name, search_in_parent_graphs=True)\n utils.make_sure(node_fft_length.type == 'Const',\n \"fft_length should be a constant, the other case is not implemented yet.\")\n value = node_fft_length.get_attr(\"value\")\n value_array = to_array(value.t)\n utils.make_sure(value_array.shape == (1,), \"Unexpected shape for fft_length (%r)\", value_array.shape)\n fft_length = value_array[0]\n\n # TODO: handle this parameter when onnx.helper.make_node is fixed.\n # Tcomplex = node.get_attr(\"Tcomplex\")\n\n real_imag_part = make_dft_constant(shape_n, np_dtype, fft_length)\n onx_real_imag_part = ctx.make_const(\n name=utils.make_name('cst_rfft_%d' % shape_n), np_val=real_imag_part)\n onx_real_imag_part_name = onx_real_imag_part.name\n else:\n # FFT: length of FFT is unknown, the matrix\n # created by function make_dft_constant must be\n # done in ONNX.\n dyn_shape_all = ctx.make_node(\"Shape\", inputs=[input_name],\n name=utils.make_name('CPLX_' + node.name + 'shape'))\n m1_cst = ctx.make_const(name=utils.make_name('CPLX_m1'), np_val=np.array([-1], dtype=np.int64))\n dyn_shape = ctx.make_node('Gather', inputs=[dyn_shape_all.output[0], m1_cst.name])\n one_tensor = helper.make_tensor(\"value\", res_onnx_dtype, dims=[1], vals=[1])\n cst_1 = ctx.make_node(\"ConstantOfShape\", inputs=[dyn_shape.output[0]], attr={\"value\": one_tensor})\n just_0 = ctx.make_const(name=utils.make_name('CPLX1'), np_val=np.array([0], dtype=np.int64))\n rng1 = ctx.make_node(\"CumSum\", inputs=[cst_1.output[0], just_0.name],\n name=utils.make_name('CPLX_' + node.name + 'range'))\n p1_cst = ctx.make_const(name=utils.make_name('CPLX_p1'), np_val=np.array([1], dtype=np_dtype))\n rng = ctx.make_node(\"Sub\", inputs=[rng1.output[0], p1_cst.name],\n name=utils.make_name('CPLX_' + node.name + 'range'))\n resh_cst = ctx.make_const(name=utils.make_name('CPLX_reshape'), np_val=np.array([1, -1], dtype=np.int64))\n rng_tr1 = ctx.make_node(\"Reshape\", inputs=[rng.output[0], resh_cst.name],\n name=utils.make_name('CPLX_' + node.name + 'range'))\n resh_cst = ctx.make_const(name=utils.make_name('CPLX_reshape'), np_val=np.array([-1, 1], dtype=np.int64))\n rng_tr2 = ctx.make_node(\"Reshape\", inputs=[rng.output[0], resh_cst.name],\n name=utils.make_name('CPLX_' + node.name + 'range'))\n rng_mat = ctx.make_node('MatMul', inputs=[rng_tr2.output[0], rng_tr1.output[0]],\n name=utils.make_name('CPLX_' + node.name + 'range2'))\n pi_cst = ctx.make_const(name=utils.make_name('CPLX_pi'), np_val=np.array([np.pi * 2], dtype=np_dtype))\n angle_pi = ctx.make_node(\"Mul\", inputs=[rng_mat.output[0], pi_cst.name],\n name=utils.make_name('CPLX_' + node.name + 'angle_pi'))\n shape_cast = ctx.make_node('Cast', inputs=[dyn_shape.output[0]], attr={'to': res_onnx_dtype})\n angle_pibn = ctx.make_node(\"Div\", inputs=[angle_pi.output[0], shape_cast.output[0]],\n name=utils.make_name('CPLX_' + node.name + 'angle'))\n if opset >= 13:\n angle = ctx.make_node(\"Unsqueeze\", inputs=[angle_pibn.output[0], just_0.name],\n name=utils.make_name('CPLX_' + node.name + 'angles'))\n else:\n angle = ctx.make_node(\"Unsqueeze\", inputs=[angle_pibn.output[0]],\n name=utils.make_name('CPLX_' + node.name + 'angles'),\n attr={'axes': [0]})\n rng_cos = ctx.make_node(\"Cos\", inputs=[angle.output[0]],\n name=utils.make_name('CPLX_' + node.name + 'cos'))\n rng_sin = ctx.make_node(\"Sin\", inputs=[angle.output[0]],\n name=utils.make_name('CPLX_' + node.name + 'sin'))\n onx_real_imag_part = ctx.make_node(\"Concat\", inputs=[rng_cos.output[0], rng_sin.output[0]],\n name=utils.make_name('CPLX_' + node.name + '_cst_fft'),\n attr={'axis': 0})\n onx_real_imag_part_name = onx_real_imag_part.output[0]\n\n shapei = list(np.arange(len(shape)))\n perm = shapei[:-2] + [shapei[-1], shapei[-2]]\n trx = ctx.make_node(\n \"Transpose\", inputs=[input_name], attr=dict(perm=perm),\n name=utils.make_name(node.name + 'tr'))\n\n ctx.remove_node(node.name)\n mult = ctx.make_node(\n \"MatMul\", inputs=[onx_real_imag_part_name, trx.output[0]],\n name=utils.make_name('CPLX_' + node.name + 'rfft'))\n\n new_shape = [2] + list(shape)\n shapei = list(np.arange(len(new_shape)))\n perm = shapei[:-2] + [shapei[-1], shapei[-2]]\n last_node = ctx.make_node(\n \"Transpose\", inputs=[mult.output[0]], attr=dict(perm=perm),\n name=utils.make_name('CPLX_' + node.name + 'rfft'))\n\n ctx.replace_all_inputs(node.output[0], last_node.output[0]) # ops=ctx.get_nodes()\n\n\n@tf_op(\"RFFT\")\nclass RFFTOp(CommonFFTOp):\n # support more dtype\n\n @classmethod\n def version_1(cls, ctx, node, **kwargs):\n return cls.any_version(True, 1, ctx, node, **kwargs)\n\n\n@tf_op(\"FFT\")\nclass FFTOp(CommonFFTOp):\n # support more dtype\n\n @classmethod\n def version_1(cls, ctx, node, **kwargs):\n return cls.any_version(False, 1, ctx, node, **kwargs)\n\n @classmethod\n def version_13(cls, ctx, node, **kwargs):\n return cls.any_version(False, 13, ctx, node, **kwargs)\n\n\n@tf_op(\"ComplexAbs\")\nclass ComplexAbsOp:\n # support more dtype\n\n @classmethod\n def any_version(cls, opset, ctx, node, **kwargs):\n \"\"\"\n Computes the modules of a complex.\n If the matrix dtype is not complex64 or complex128,\n it assumes the first dimension means real part (0)\n and imaginary part (1, :, :...).\n \"\"\"\n supported_dtypes = [\n onnx_pb.TensorProto.FLOAT,\n onnx_pb.TensorProto.FLOAT16,\n onnx_pb.TensorProto.DOUBLE,\n onnx_pb.TensorProto.COMPLEX64,\n onnx_pb.TensorProto.COMPLEX128,\n ]\n onnx_dtype = ctx.get_dtype(node.input[0])\n utils.make_sure(onnx_dtype in supported_dtypes, \"Unsupported input type.\")\n shape = ctx.get_shape(node.input[0])\n np_dtype = utils.map_onnx_to_numpy_type(onnx_dtype)\n utils.make_sure(shape[0] == 2, \"ComplexAbs expected the first dimension to be 2 but shape is %r\", shape)\n\n ind0 = ctx.make_const(name=utils.make_name('cst0'), np_val=np.array([0], dtype=np.int64))\n ind1 = ctx.make_const(name=utils.make_name('cst1'), np_val=np.array([1], dtype=np.int64))\n p2 = ctx.make_const(name=utils.make_name('p2'), np_val=np.array([2], dtype=np_dtype))\n\n real_part = ctx.make_node(\n 'Gather', inputs=[node.input[0], ind0.name], attr=dict(axis=0),\n name=utils.make_name('Real_' + node.name))\n imag_part = ctx.make_node(\n 'Gather', inputs=[node.input[0], ind1.name], attr=dict(axis=0),\n name=utils.make_name('Imag_' + node.name))\n\n real_part2 = ctx.make_node(\n 'Pow', inputs=[real_part.output[0], p2.name],\n name=utils.make_name(real_part.name + 'p2p'))\n\n imag_part2 = ctx.make_node(\n 'Pow', inputs=[imag_part.output[0], p2.name],\n name=utils.make_name(imag_part.name + 'p2p'))\n\n ctx.remove_node(node.name)\n add = ctx.make_node(\n \"Add\", inputs=[real_part2.output[0], imag_part2.output[0]],\n name=utils.make_name('ComplexAbs_' + node.name))\n\n squeezed = GraphBuilder(ctx).make_squeeze(\n {'data': add.output[0], 'axes': [0]}, name=utils.make_name('ComplexAbs' + node.name), return_node=True)\n\n last_node = ctx.make_node(\n \"Sqrt\", inputs=squeezed.output[:1],\n name=utils.make_name('ComplexAbs' + node.name),\n shapes=[shape[1:]], dtypes=[onnx_dtype])\n\n ctx.replace_all_inputs(node.output[0], last_node.output[0]) # ops=ctx.get_nodes()\n\n @classmethod\n def version_1(cls, ctx, node, **kwargs):\n cls.any_version(1, ctx, node, **kwargs)\n\n @classmethod\n def version_13(cls, ctx, node, **kwargs):\n cls.any_version(13, ctx, node, **kwargs)\n","sub_path":"tf2onnx/onnx_opset/signal.py","file_name":"signal.py","file_ext":"py","file_size_in_byte":13641,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"537591452","text":"##############################################################################\n#\n# Copyright (c) 2004-2009 Zope Foundation and Contributors.\n# All Rights Reserved.\n#\n# This software is subject to the provisions of the Zope Public License,\n# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.\n# THIS SOFTWARE IS PROVIDED \"AS IS\" AND ANY AND ALL EXPRESS OR IMPLIED\n# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED\n# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS\n# FOR A PARTICULAR PURPOSE.\n#\n##############################################################################\n\"\"\"Doc tests for the pagetemplate's 'engine' module\n\"\"\"\nimport doctest\nimport re\nimport unittest\nimport zope.pagetemplate.engine\nfrom zope.testing.renormalizing import RENormalizing\nfrom zope.component.testing import PlacelessSetup\n\nclass EngineTests(PlacelessSetup,\n unittest.TestCase):\n\n def _makeOne(self):\n return zope.pagetemplate.engine._Engine()\n\n def test_function_namespaces_return_secured_proxies(self):\n # See https://bugs.launchpad.net/zope3/+bug/98323\n from zope.proxy import isProxy\n engine = self._makeOne()\n namespace = engine.getFunctionNamespace('test')\n self.assertTrue(isProxy(namespace))\n\n def test_getContext_namespace(self):\n engine = self._makeOne()\n ctx = engine.getContext({'a': 1}, b=2, request=3, context=4)\n self.assertEqual(ctx.getValue('a'), 1)\n self.assertEqual(ctx.getValue('b'), 2)\n self.assertEqual(ctx.getValue('request'), 3)\n self.assertEqual(ctx.getValue('context'), 4)\n\nclass DummyEngine(object):\n\n def getTypes(self):\n return {}\n\nclass DummyContext(object):\n\n _engine = DummyEngine()\n\n def __init__(self, **kw):\n self.vars = kw\n\nclass ZopePythonExprTests(unittest.TestCase):\n\n def test_simple(self):\n from zope.pagetemplate.engine import ZopePythonExpr\n expr = ZopePythonExpr('python', 'max(a,b)', DummyEngine())\n self.assertEqual(expr(DummyContext(a=1, b=2)), 2)\n\n def test_allowed_module_name(self):\n from zope.pagetemplate.engine import ZopePythonExpr\n expr = ZopePythonExpr('python', '__import__(\"sys\").__name__',\n DummyEngine())\n self.assertEqual(expr(DummyContext()), 'sys')\n\n @unittest.skipUnless(zope.pagetemplate.engine.HAVE_UNTRUSTED,\n \"Needs untrusted\")\n def test_forbidden_module_name(self):\n from zope.pagetemplate.engine import ZopePythonExpr\n from zope.security.interfaces import Forbidden\n expr = ZopePythonExpr('python', '__import__(\"sys\").exit',\n DummyEngine())\n self.assertRaises(Forbidden, expr, DummyContext())\n\n @unittest.skipUnless(zope.pagetemplate.engine.HAVE_UNTRUSTED,\n \"Needs untrusted\")\n def test_disallowed_builtin(self):\n from zope.pagetemplate.engine import ZopePythonExpr\n expr = ZopePythonExpr('python', 'open(\"x\", \"w\")', DummyEngine())\n self.assertRaises(NameError, expr, DummyContext())\n\n\nclass TestZopeContext(PlacelessSetup,\n unittest.TestCase):\n\n def _makeOne(self):\n return zope.pagetemplate.engine.ZopeContext(None, {})\n\n def test_translate(self):\n ctx = self._makeOne()\n self.assertEqual(ctx.translate('msgid'), 'msgid')\n\n def test_evaluate_error(self):\n ctx = self._makeOne()\n with self.assertRaisesRegexp(zope.pagetemplate.engine.InlineCodeError,\n \"Inline Code Evaluation is deactivated\"):\n ctx.evaluateCode('lang', 'code')\n\n def test_evaluate_interpreter_not_importable(self):\n ctx = self._makeOne()\n ctx.evaluateInlineCode = True\n with self.assertRaises(ImportError):\n ctx.evaluateCode('lang', 'code')\n\n def test_evaluate_interpreter_not_found(self):\n get = zope.pagetemplate.engine._get_iinterpreter\n from zope import interface\n class IInterpreter(interface.Interface):\n pass\n def mock_get():\n return IInterpreter\n\n ctx = self._makeOne()\n ctx.evaluateInlineCode = True\n zope.pagetemplate.engine._get_iinterpreter = mock_get\n try:\n with self.assertRaisesRegexp(zope.pagetemplate.engine.InlineCodeError,\n \"No interpreter named\"):\n ctx.evaluateCode('lang', 'code')\n finally:\n zope.pagetemplate.engine._get_iinterpreter = get\n\n def test_evaluate_interpreter_found(self):\n get = zope.pagetemplate.engine._get_iinterpreter\n from zope import interface\n from zope import component\n class IInterpreter(interface.Interface):\n pass\n def mock_get():\n return IInterpreter\n\n @interface.implementer(IInterpreter)\n class Interpreter(object):\n def evaluateRawCode(self, code, globs):\n globs['new'] = code\n return 42\n\n component.provideUtility(Interpreter(), name='lang')\n\n ctx = self._makeOne()\n ctx.evaluateInlineCode = True\n zope.pagetemplate.engine._get_iinterpreter = mock_get\n try:\n result = ctx.evaluateCode('lang', 'code')\n finally:\n zope.pagetemplate.engine._get_iinterpreter = get\n\n self.assertEqual(result, 42)\n self.assertEqual('code', ctx.getValue('new'))\n\n\nclass TestTraversableModuleImporter(unittest.TestCase):\n\n def test_traverse_fails(self):\n from zope.traversing.interfaces import TraversalError\n\n tmi = zope.pagetemplate.engine.TraversableModuleImporter()\n with self.assertRaises(TraversalError):\n tmi.traverse('zope.cannot exist', ())\n\n with self.assertRaises(TraversalError):\n tmi.traverse('zope.pagetemplate.engine.DNE', ())\n\n\n with self.assertRaises(TraversalError):\n tmi.traverse('pickle.no_sub_module', ())\n\n\nclass TestAppPT(unittest.TestCase):\n\n def test_apppt_engine(self):\n self.assertIs(zope.pagetemplate.engine.AppPT().pt_getEngine(),\n zope.pagetemplate.engine.Engine)\n\n def test_trustedapppt_engine(self):\n self.assertIs(zope.pagetemplate.engine.TrustedAppPT().pt_getEngine(),\n zope.pagetemplate.engine.TrustedEngine)\n\n\ndef test_suite():\n\n checker = RENormalizing([\n # Python 3 includes module name in exceptions\n (re.compile(r\"zope.security.interfaces.ForbiddenAttribute\"),\n \"ForbiddenAttribute\"),\n (re.compile(r\"\"),\n \"\"),\n (re.compile(r\"\"), \"\"),\n # PyPy/pure-Python implementation\n (re.compile(r\"\"),\n \"\"),\n ])\n\n suite = unittest.defaultTestLoader.loadTestsFromName(__name__)\n suite.addTest(doctest.DocTestSuite('zope.pagetemplate.engine',\n checker=checker))\n return suite\n\n\nif __name__ == '__main__':\n unittest.main(defaultTest='test_suite')\n","sub_path":"day-2018-04-02/myproject/venv/lib/python2.7/site-packages/zope/pagetemplate/tests/test_engine.py","file_name":"test_engine.py","file_ext":"py","file_size_in_byte":7267,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"378485437","text":"class Solution:\n # define function smallerNumbersThanCurrent, that takes in the class instance, and a list of \n # integers called nums, and returns a list of integers\n def smallerNumbersThanCurrent(self, nums: List[int]) -> List[int]:\n # create an empty dict called indices\n indices = {}\n # sorted() sorts a list in ascending order by default\n # for each item and its index in the list nums, sorted\n for idx, num in enumerate(sorted(nums)):\n # setdefault() will set the key num in dict indices to idx if the key does not already have a value\n indices.setdefault(num, idx)\n # return a list -- for each num in list nums, add to the returned list the value of indices[num]\n return [indices[num] for num in nums]","sub_path":"how-many-numbers-are-smaller-than-the-current-number-48ms-14.1mb.py","file_name":"how-many-numbers-are-smaller-than-the-current-number-48ms-14.1mb.py","file_ext":"py","file_size_in_byte":744,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"619555354","text":"from xgboost import XGBClassifier, callback\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport time\nimport xgboost as xgb\nfrom xgboost.compat import XGBLabelEncoder\nimport pickle\nimport warnings\n\nimport pandas as pd\nfrom sklearn.feature_selection import SelectKBest\nfrom sklearn.feature_selection import f_classif\nfrom sklearn.decomposition import PCA\nfrom sklearn.preprocessing import StandardScaler\nfrom sklearn.metrics import precision_recall_fscore_support as score\nfrom sklearn.metrics import accuracy_score, confusion_matrix\n\n\ndef select_best(x, y, k):\n selector = SelectKBest(f_classif, k=k)\n selector.fit(x, y)\n mask = selector.get_support() # list of booleans\n new_features = [] # The list of your K best features\n\n for contained, feature in zip(mask, x.columns.values):\n if contained:\n new_features.append(feature)\n else:\n print(\"Dropped {}\".format(feature))\n\n return pd.DataFrame(selector.transform(x), columns=new_features)\n\n\ndef cv(data, target, bucket_target, nthread, n_buckets):\n # data = data.loc[data['AvgMethodsLines'] > .5]\n classes = np.unique(data[target])\n n_classes = len(classes)\n le = XGBLabelEncoder().fit(data[target])\n\n data_y = data[target]\n data_x = data.drop([target, bucket_target, 'Path', 'NominalTabsLeadLines', 'NominalPunctuationBeforeBrace'], axis=1)\n\n folds = []\n for b in range(n_buckets):\n indices = data[bucket_target] == b\n folds.append((np.argwhere(np.invert(indices)), np.argwhere(indices)))\n\n param = {\n 'objective': \"multi:softprob\",\n 'eval_metric': ['mlogloss', 'merror'],\n 'seed': 239,\n 'eta': 0.2,\n 'max_depth': 3,\n 'tree_method': 'hist',\n 'silent': 1,\n 'num_class': n_classes,\n 'nthread': nthread\n }\n\n num_round = 20\n\n xg_train = xgb.DMatrix(data_x, label=le.transform(data_y))\n xgb.cv(param, xg_train, num_round, folds=folds,\n metrics={'merror', 'mlogloss'}, seed=239,\n callbacks=[xgb.callback.print_evaluation(show_stdv=True)])\n\n\ndef run_train(data, target, bucket_target, nthread, n_buckets):\n warnings.filterwarnings(\"ignore\", category=DeprecationWarning)\n\n counts = data[target].value_counts()\n n_classes = len(counts)\n le = XGBLabelEncoder().fit(data[target])\n\n big = []\n small = []\n for i in range(n_classes):\n if counts[le.inverse_transform(i)] < 100:\n small.append(le.inverse_transform(i))\n else:\n big.append(le.inverse_transform(i))\n data = data.loc[data[target].isin(small)]\n\n data_y = data[target]\n data_x = data.drop([target, bucket_target, 'Path', 'NominalTabsLeadLines', 'NominalPunctuationBeforeBrace'], axis=1)\n\n # print(min(data_x['AvgMethodsLines']), max(data_x['AvgMethodsLines']))\n # data_x = select_best(data_x, data_y, data_x.shape[1] // 2)\n # pca = PCA(n_components=data_x.shape[1] // 2)\n # data_x = StandardScaler().fit_transform(data_x)\n # data_x = pd.DataFrame(pca.fit_transform(data_x))\n # print(data_x.shape)\n\n train_indices = data[bucket_target].isin(range(4, n_buckets))\n test_indices = data[bucket_target].isin(range(0, 4))\n\n replication_coeff = 6\n\n rep_indices = data[target].isin(small) & train_indices\n\n replicate_x = data_x.loc[rep_indices]\n # x_train = data_x.loc[train_indices]\n x_train = data_x.loc[train_indices].append([replicate_x] * replication_coeff, ignore_index=True)\n\n replicate_y = data_y.loc[rep_indices]\n # y_train = le.transform(data_y.loc[train_indices])\n y_train = le.transform(data_y.loc[train_indices].append([replicate_y] * replication_coeff, ignore_index=True))\n\n x_test = data_x.loc[test_indices]\n y_test = le.transform(data_y.loc[test_indices])\n\n weights_train = np.ones(len(y_train))\n max_cnt = max(counts)\n for i, cls in enumerate(y_train):\n if le.inverse_transform(cls) in small:\n weights_train[i] = max_cnt / (counts[le.inverse_transform(cls)] * replication_coeff)\n # print(weights_train)\n\n xg_train = xgb.DMatrix(x_train, label=y_train, weight=weights_train)\n xg_test = xgb.DMatrix(x_test, label=y_test)\n\n param = {\n 'objective': \"multi:softprob\",\n 'eval_metric': ['mlogloss', 'merror'],\n 'seed': 239,\n 'eta': 0.2,\n 'max_depth': 3,\n 'tree_method': 'hist',\n 'silent': 1,\n 'num_class': n_classes,\n 'nthread': nthread\n }\n\n watchlist = [(xg_train, 'train'), (xg_test, 'test')]\n num_round = 150\n boost = xgb.train(param, xg_train, num_round, watchlist)\n pred_prob = boost.predict(xg_test).reshape(len(y_test), n_classes)\n pred_label = np.argmax(pred_prob, axis=1)\n error_rate = np.sum(pred_label != y_test) / len(y_test)\n print('Test error using softprob = {}'.format(error_rate))\n print('Baseline = {}'.format(0.25326370757180156))\n\n precision, recall, fscore, support = score(y_test, pred_label)\n np.set_printoptions(precision=4, linewidth=150)\n print('precision: {}'.format(precision))\n print('recall: {}'.format(recall))\n print('fscore: {}'.format(fscore))\n print('support: {}'.format(support))\n\n old_matrix = np.array([[82, 1, 0, 0, 0, 0, 0, 1, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0],\n [7, 4, 0, 0, 0, 0, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0, 0, 1],\n [0, 0, 7, 0, 3, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0, 0],\n [0, 0, 0, 13, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n [0, 0, 1, 0, 10, 0, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0, 13, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n [0, 0, 1, 0, 1, 0, 9, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0, 0, 0, 10, 0, 0, 1, 0, 0, 2, 1, 0, 0, 0],\n [0, 0, 0, 0, 0, 0, 0, 0, 12, 0, 1, 0, 0, 0, 0, 0, 0, 0],\n [0, 0, 2, 1, 0, 0, 1, 0, 1, 7, 2, 0, 0, 0, 0, 0, 0, 0],\n [9, 0, 0, 0, 0, 1, 0, 0, 1, 0, 72, 0, 0, 0, 1, 0, 0, 0],\n [1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 10, 0, 1, 0, 0, 0, 0],\n [2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 10, 0, 1, 0, 0, 0],\n [0, 0, 0, 0, 0, 0, 0, 2, 0, 0, 0, 2, 0, 9, 0, 0, 0, 0],\n [5, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 5, 0, 2, 0],\n [7, 1, 0, 0, 0, 0, 0, 0, 0, 0, 2, 0, 0, 0, 0, 4, 0, 0],\n [0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 2, 0, 3, 0, 0, 6, 0],\n [4, 0, 0, 0, 0, 0, 0, 0, 0, 0, 6, 0, 0, 0, 0, 1, 0, 3]]\n )\n print(confusion_matrix(y_test, pred_label))\n print()\n print(confusion_matrix(y_test, pred_label) - old_matrix)\n\n cnt_s_b = 0\n cnt_b_s = 0\n for pred, corr in zip(pred_label, y_test):\n if counts[le.inverse_transform(corr)] < 100 and counts[le.inverse_transform(pred)] > 100:\n cnt_s_b += 1\n\n if counts[le.inverse_transform(corr)] > 100 and counts[le.inverse_transform(pred)] < 100:\n cnt_b_s += 1\n\n print('Number of small->big mistakes: {}'.format(cnt_s_b))\n print('Number of big->small mistakes: {}'.format(cnt_b_s))\n\n\ndef xgb_train_buckets(data, target, bucket_target='Bucket', nthread=4, n_buckets=10):\n print(\"Start training with {} buckets...\".format(n_buckets))\n # cv(data, target, bucket_target, nthread, n_buckets)\n run_train(data, target, bucket_target, nthread, n_buckets)\n\n\ndef train_xgb_clf(x_train, y_train, x_test, y_test, target, nthread=4):\n print(\"Start training...\")\n\n y_train = y_train[target]\n classes = np.unique(y_train)\n n_classes = len(classes)\n\n le = XGBLabelEncoder().fit(y_train)\n training_labels = le.transform(y_train)\n\n param = {\n 'max_depth': 3,\n 'eta': 0.2,\n 'min_child_weight': 1,\n \"n_estimators\": 200,\n 'tree_method': 'hist',\n 'silent': 1,\n 'verbose_eval': False,\n 'objective': \"multi:softprob\",\n 'num_class': n_classes,\n 'nthread': nthread\n }\n\n num_round = 200\n\n dtrain = xgb.DMatrix(x_train, label=training_labels)\n tic = time.time()\n model = xgb.train(param, dtrain, num_round)\n print('passed time with xgb (hist, cpu): %.3fs' % (time.time() - tic))\n\n classes = set(y_test)\n dtest = xgb.DMatrix(x_test)\n # print(model.get_fscore())\n # print(model.get_score())\n predictions = model.predict(dtest)\n print(predictions)\n column_indexes = np.argmax(predictions, axis=1)\n predictions = le.inverse_transform(column_indexes)\n for i, prediction in enumerate(predictions):\n best = -100\n prediction = round(prediction)\n for answer in classes:\n if abs(answer - prediction) < abs(best - prediction):\n best = answer\n predictions[i] = best\n print(predictions)\n # print(y_test)\n print(accuracy_score(y_test, predictions))\n\n return accuracy_score(y_test, predictions)\n\n\ndef train_sklearn_xgb_classifier(x_train, y_train, x_test, y_test, target, path_to_classifier=None, nthread=4):\n print(\"Start training...\")\n\n params = {\n \"n_estimators\": 200,\n 'tree_method': 'hist',\n 'max_depth': 3,\n 'learning_rate': 0.2,\n 'n_jobs': nthread,\n 'eval_metric': ['mlogloss', 'merror']\n }\n\n model = XGBClassifier(**params)\n tic = time.time()\n model.fit(x_train, y_train[target], eval_set=[(x_train, y_train[target]), (x_test, y_test[target])], verbose=True,\n early_stopping_rounds=10)\n print('passed time with XGBClassifier (hist, cpu): %.3fs' % (time.time() - tic))\n\n if path_to_classifier:\n pickle.dump(model, open(path_to_classifier, \"wb\"))\n\n # feature_importances = sorted(zip(x_train.columns.values, model.feature_importances_), key=lambda x: x[1])\n # print(list(map(lambda p: p[0], feature_importances[-10:])))\n\n # classes = model.classes_\n # predictions = model.predict_proba(x_test)\n # n_classes = model.n_classes_\n # positions = np.zeros(n_classes)\n # pos_misses = []\n # correctness = []\n # for prediction, (index, row) in zip(predictions, y_test.iterrows()):\n # answer = row[target]\n # proba = prediction.copy()\n # prediction = np.argsort(prediction)[::-1]\n # if classes[prediction[0]] != answer:\n # # print(\"Missed on: {} ({}), decided: {}\".format(row['Path'], answer, classes[prediction[0]]))\n # correctness.append(0)\n # else:\n # correctness.append(1)\n # for i in range(n_classes):\n # if classes[prediction[i]] == answer:\n # positions[i] += 1\n # if i != 0:\n # pos_misses.append(proba)\n # break\n #\n # pos_misses = np.array(pos_misses)\n # for i in range(1, n_classes):\n # positions[i] += positions[i - 1]\n # print(positions)\n # accuracy = positions / float(len(y_test))\n # plt.plot(np.arange(1, n_classes + 1, 1), accuracy, 'b-')\n # plt.show()\n # return correctness\n","sub_path":"workflow/models/xgb.py","file_name":"xgb.py","file_ext":"py","file_size_in_byte":11159,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"321804800","text":"import appconfig\nimport toolsradarcas\nfrom combinGPR import GPRTrace\nfrom connexions.gpsconnexion import GPSConnexion\nimport pynmea2\nimport numpy as np\n\nfrom value import respath\n\n\ndef test_set_radar():\n pass\n\n\ndef test_load_radar_file():\n a = toolsradarcas.loadFile(\"radarMocks512.pkl\")\n print(len(a))\n print(type(a))\n print(type(a[0]))\n print(len(a[0]))\n print(a)\n\n\ndef test_save_gps_file():\n a = GPSConnexion(appconfig.basic_gps_config())\n a.connect()\n data = a.recv(recLineNum=9 * 10)\n a.disconnect()\n fname = toolsradarcas.save_data_pickle(data, instType='gps')\n return fname\n\n\ndef test_clean_gps_data():\n test = GPSConnexion(appconfig.basic_gps_config())\n if test.connect() == 0:\n data = test.recv(9 * 10)\n clean = GPSConnexion.cut_unknown_bytes(data)\n print(clean)\n f = open(respath.DEFAULT_DATA_NAME + \"testGPS.txt\", \"w\")\n f.write(clean)\n f.close()\n else:\n print(\"Connect failure.\")\n\n\ndef test_load_gps_file(filename=''):\n if filename == '':\n filename = '2020-12-31-12-47-45-gps.pkl'\n gpsData = toolsradarcas.loadFile(filename)\n return gpsData\n\n\n# 36 here means ==> '$'\n# And [index:-2] means slice chain from $ to checksum, \\r is at the last position\ndef test_parse_gps(gpsData):\n gpgga = []\n for i in gpsData:\n try:\n str(i, encoding=\"utf-8\")\n except Exception:\n for index, j in enumerate(i):\n if j == 36:\n gga = str(i[index:-2], encoding='utf8')\n gpgga.append(gga)\n continue\n return gpgga\n\n\ndef test_combin_GPR():\n radar = test_load_gps_file(\"2020-12-31-15-29-39-radar.pkl\") # 68 lines, 1024 bytes, 512points\n combin = GPRTrace()\n # Generating a bunch of gps data\n gpsPoint = np.random.rand(len(radar), 3)\n data = combin.pack_GRP_data(gpsPoint, radar)\n return data\n\n\ndef test_compare_feats():\n feats = toolsradarcas.loadFile(\"2021-01-07-18-33-37-feats1.pkl\")\n print(type(feats))\n print(len(feats))\n\n# f = test_save_gps_file()\n# gps = test_load_gps_file(\"2021-01-05-10-58-40-gps.pkl\")\n# ggas = GPSConnexion.catch_GGA_data(gps)\n# print(ggas)\n# print(len(ggas))\n# for i in ggas:\n# record = pynmea2.parse(i)\n# print(record.lat)\n# print(record.lon)\n# print(record.altitude)\n# count = 0\n# for i in gps:\n# res = GPSConnexion.check_GGA_data(i)\n# if len(res) != 0:\n# print(res)\n# count += 1\n# print(count)\n\n# print(GPSConnexion.cut_unknown_bytes(gps))\n# GPSConnexion.cut_unknown_bytes(gps)\n# radar = test_combin_GPR()\n# radar = tools.loadFile(\"2020-12-31-09-46-40.pkl\")\n# print(len(radar))\n# print(len(radar[0]))\n# print(tools.byte2signedInt(radar[0]))\n# tools.saveDataGPR(radar)\n# print(1065+(90+1024)*68)\n\n# test_load_radar_file()\ntest_compare_feats()","sub_path":"test/scratch.py","file_name":"scratch.py","file_ext":"py","file_size_in_byte":2844,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"175070216","text":"\"\"\"MxOnline URL Configuration\n\nThe `urlpatterns` list routes URLs to views. For more information please see:\n https://docs.djangoproject.com/en/1.11/topics/http/urls/\nExamples:\nFunction views\n 1. Add an import: from my_app import views\n 2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')\nClass-based views\n 1. Add an import: from other_app.views import Home\n 2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')\nIncluding another URLconf\n 1. Import the include() function: from django.conf.urls import url, include\n 2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))\n\"\"\"\nfrom django.conf.urls import url,include\nfrom django.contrib import admin\nfrom django.views.generic import TemplateView\nimport xadmin\n\nfrom users.views import *\n\nurlpatterns = [\n url(r'^xadmin/', xadmin.site.urls),\n url('^$',TemplateView.as_view(template_name=\"index.html\"),name=\"index\"),\n # url('^login/$',TemplateView.as_view(template_name=\"login.html\"),name=\"login\"),\n url('^login/$',LoginView.as_view(),name=\"login\"),\n url('^register/$', RegisterView.as_view(), name=\"register\"),\n url(r'^captcha/', include('captcha.urls')),\n url(r'^active/(?P.*)/$', ActiveUserView.as_view(), name=\"user_active\"),\n url('^forgetpwd/$', ForgetView.as_view(), name=\"forgetpwd\"),\n url('^resetpwd/(?P.*)/$', ResetPwdView.as_view(), name=\"resetpwd\"),\n url('^modify_pwd/$', ModifyPwdView.as_view(), name=\"modify_pwd\"),\n # url('^updatepwd/$', ResetPwdView.as_view(), name=\"updatepwd\"),\n url('^logout/$', LogoutView.as_view(), name=\"logout\"),\n]\n","sub_path":"MxOnline/MxOnline/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1628,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"454825982","text":"#!/usr/bin/env python3\n\nfrom flask import Flask, render_template, url_for\nimport requests\nimport json\nimport os\nimport re\n\nsensu_api = os.getenv('SENSU_API', \"\")\ndebug = os.getenv('DEBUG', False)\nsensu_apis = sensu_api.split(\",\")\nrefresh_interval = os.getenv('REFRESH_INTERVAL', 15)\n\napp = Flask(__name__)\n\nif not sensu_api:\n print(\"SENSU_API unset. Aborting\")\n sys.exit()\n\n@app.route('/', methods=['GET'])\ndef index():\n\n i = 0\n events = {}\n for api in sensu_apis:\n api_failed = False\n try:\n response = requests.get(api)\n if response.ok:\n data = response.json()\n for event in data:\n events[i] = {}\n events[i]['client_name'] = event['client']['name']\n events[i]['check_name'] = event['check']['name']\n if event['check']['status'] == 2:\n events[i]['status'] = \"critical\"\n elif event['check']['status'] == 1:\n events[i]['status'] = \"warning\"\n elif event['check']['status'] > 2:\n events[i]['status'] = \"unknown\"\n else:\n events[i]['status'] = \"ok\"\n events[i]['output'] = event['check']['output']\n if event['silenced']:\n events[i]['silenced'] = \"Yes\"\n else:\n events[i]['silenced'] = \"No\"\n events[i]['silenced_by'] = event['silenced_by']\n i = i + 1\n else:\n api_failed = True\n except requests.exceptions.RequestException:\n api_failed = True\n if api_failed:\n events[i] = {}\n events[i]['client_name'] = api.split('/')[2]\n events[i]['check_name'] = 'Sensu API'\n events[i]['status'] = \"critical\"\n events[i]['output'] = \"Unable to connect to Sensu API: %s\" % api\n events[i]['silenced'] = 'N/A'\n events[i]['silenced_by'] = 'N/A'\n i = i + 1\n unsilenced_critical_found = False\n unsilenced_warning_found = False\n unsilenced_unknown_found = False\n for event in events:\n if events[event]['status'] == \"critical\" and events[event]['silenced'] != \"Yes\":\n unsilenced_critical_found = True\n elif events[event]['status'] == \"warning\" and events[event]['silenced'] != \"Yes\":\n unsilenced_warning_found = True\n elif events[event]['status'] == \"unknown\" and events[event]['silenced'] != \"Yes\":\n unsilenced_unknown_found = True\n if unsilenced_critical_found:\n background_color_class = \"background_critical\"\n elif unsilenced_warning_found:\n background_color_class = \"background_warning\"\n elif unsilenced_unknown_found:\n background_color_class = \"background_unknown\"\n else:\n background_color_class = \"background_ok\"\n\n return render_template('index.html',\n events=events,\n refresh_interval=refresh_interval,\n sensu_apis=sensu_apis,\n background_color_class=background_color_class)\n\n\nif __name__ == '__main__':\n app.debug = debug\n app.run(host=\"0.0.0.0\", port=5000)\n","sub_path":"src/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":3409,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"133947583","text":"import ast\r\nimport os\r\nimport collections\r\n\r\nfrom nltk import pos_tag\r\n\r\nTOP_COMMON = 200\r\n\r\n\r\ndef flat(_list):\r\n \"\"\" [(1,2), (3,4)] -> [1, 2, 3, 4]\"\"\"\r\n return sum([list(item) for item in _list], [])\r\n\r\n\r\ndef is_verb(word):\r\n if not word:\r\n return False\r\n pos_info = pos_tag([word])\r\n return pos_info[0][1] == 'VB'\r\n\r\n\r\ndef get_trees(_path, language, with_filenames=False, with_file_content=False):\r\n file_names = []\r\n trees = []\r\n for dirname, dirs, files in os.walk(_path, topdown=True):\r\n for file in files:\r\n if file.endswith(language):\r\n file_names.append(os.path.join(dirname, file))\r\n\r\n print('total %s files' % len(file_names))\r\n for file_name in file_names:\r\n with open(file_name, 'r', encoding='utf-8') as attempt_handler:\r\n main_file_content = attempt_handler.read()\r\n\r\n try:\r\n tree = ast.parse(main_file_content)\r\n except SyntaxError as e:\r\n print(e)\r\n tree = None\r\n\r\n if with_filenames:\r\n if with_file_content:\r\n trees.append((file_name, main_file_content, tree))\r\n else:\r\n trees.append((file_name, tree))\r\n else:\r\n trees.append(tree)\r\n\r\n # print('trees generated')\r\n return trees\r\n\r\n\r\ndef get_all_names(tree):\r\n return [node.id for node in ast.walk(tree) if isinstance(node, ast.Name)]\r\n\r\n\r\ndef get_verbs_from_function_name(function_name):\r\n return [word for word in function_name.split('_') if is_verb(word)]\r\n\r\n\r\ndef split_snake_case_name_to_words(name):\r\n return [n for n in name.split('_') if n]\r\n\r\n\r\ndef get_all_words_in_path(path, language):\r\n trees = [t for t in get_trees(path, language) if t]\r\n function_names = [f for f in flat([get_all_names(tree) for tree in trees]) \\\r\n if not (f.startswith('__') and f.endswith('__'))]\r\n return flat([split_snake_case_name_to_words(function_name) for function_name in function_names])\r\n\r\n\r\ndef get_top_verbs_in_path(path, language):\r\n trees = [t for t in get_trees(path, language) if t]\r\n extracted_functions = [f for f in flat([[node.name.lower() \\\r\n for node in ast.walk(tree) if isinstance(node, ast.FunctionDef)] \\\r\n for tree in trees]) if not (f.startswith('__') and f.endswith('__'))]\r\n\r\n # print('functions extracted')\r\n verbs = flat([get_verbs_from_function_name(function_name) for function_name in extracted_functions])\r\n return collections.Counter(verbs).most_common(TOP_COMMON)\r\n\r\n\r\ndef get_top_functions_names_in_path(path, language):\r\n trees = get_trees(path, language)\r\n functions_names = [f for f in flat([[node.name.lower() for node in ast.walk(tree) \\\r\n if isinstance(node, ast.FunctionDef)] for tree in trees]) \\\r\n if not (f.startswith('__') and f.endswith('__'))]\r\n return collections.Counter(functions_names).most_common(TOP_COMMON)\r\n","sub_path":"analyse_file_names.py","file_name":"analyse_file_names.py","file_ext":"py","file_size_in_byte":3016,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"252426913","text":"from itertools import permutations, combinations, product, combinations_with_replacement\nfrom collections import Counter\nfrom math import gcd, factorial, pi\n\ndata = [\"a\", \"b\", \"c\"]\n\nresult = list(permutations(data, 2))\nresult2 = list(combinations(data, 3))\n\nprint(f\"1{result} \\n\\n2{result2}\\n\\n\")\n\nresult3 = list(product(data, repeat=2))\nresult4 = list(combinations_with_replacement(data, 2))\n\nprint(f\"3{result3} \\n\\n4{result4}\\n\\n\")\n\n\n# 내부 원소 몇개 있는지 찾기\n# 딕셔너리로 변환 가능\ncounter = Counter(result3)\nprint(counter[\"a\", \"a\"])\nprint(dict(Counter(result4)), \"\\n\\n\")\n\n# 최대 공약수 최소 공배수 찾기\nprint((lambda a, b: a * b // gcd(a, b))(12, 35))\n","sub_path":"coding_test/L_use_of_libraries.py","file_name":"L_use_of_libraries.py","file_ext":"py","file_size_in_byte":691,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"23543099","text":"import numpy as np\nfrom anytree import Node, LevelGroupOrderIter, RenderTree\n\n\nclass CartNode(Node):\n def __init__(self, name, parent, feature, division, val, is_left):\n super().__init__(name, parent)\n self.feature = feature\n self.division = division\n self.val = val\n self.is_left = is_left\n\n\nclass RegressionTree(object):\n def __init__(self, x, y):\n self.x = np.array(x)\n self.y = np.array(y)\n self.feature_num = len(x[0])\n self.div_list = np.zeros([self.feature_num])\n self.feature_list = [i for i in range(self.feature_num)]\n self.num_threshold = 0\n self.val_threshold = 0\n self.root = None\n self.build_node(self.x, self.y, None, self.feature_list, -1, is_left=False, is_root=True)\n\n @staticmethod\n def get_divide(x, feature):\n try:\n source = np.unique(x[:, feature])\n except IndexError:\n source = np.unique(x[feature])\n if len(source) == 1:\n return source\n interval = source[-1] - source[0]\n return np.array([i for i in np.arange(source[0], source[-1], np.divide(interval, 5))])\n\n @staticmethod\n def get_most_frequent(x):\n return np.argmax(np.bincount(x))\n\n def build_node(self, x, y, parent, feature_list, val, is_left, is_root=False):\n if len(x) <= self.num_threshold:\n return\n if len(feature_list) == 1:\n self.build_leaf(\"leaf val:%f\" % val, parent, val, is_left)\n return\n min_val = 9999999\n best_j = best_s = best_j_ind = 0\n div_val = (0, 0)\n ind1, ind2 = (0, 0)\n for j in range(len(feature_list)):\n feature = feature_list[j]\n divide = self.get_divide(x, feature)\n for i in range(len(divide)):\n s = divide[i]\n res, c_res, ind_res = self.region_loss(x[:, feature], y, s)\n if res < min_val:\n best_j = feature\n best_j_ind = j\n best_s = s\n min_val = res\n div_val = c_res\n ind1, ind2 = ind_res\n node = CartNode('feature %d, div %f' % (best_j, best_s), parent, best_j, best_s, -1, is_left=is_left)\n if is_root:\n self.root = node\n part_feature_list = np.delete(feature_list, best_j_ind)\n self.build_node(x[ind1], y[ind1], node, part_feature_list, div_val[0], is_left=True)\n self.build_node(x[ind2], y[ind2], node, part_feature_list, div_val[1], is_left=False)\n\n @staticmethod\n def build_leaf(name, parent, val, is_left):\n node = CartNode(name, parent, -1, -1, val, is_left=is_left)\n return node\n\n @staticmethod\n def region_loss(part_x, part_y, div):\n def loss(y1, y2):\n return np.sum(np.square(np.subtract(y1, y2)))\n ind1 = part_x <= div\n ind2 = part_x > div\n c1 = np.average(part_y[ind1]) if len(part_y[ind1]) > 0 else 0\n c2 = np.average(part_y[ind2]) if len(part_y[ind2]) > 0 else 0\n return loss(part_y[ind1], c1) + loss(part_y[ind2], c2), (c1, c2), (ind1, ind2)\n\n def inference(self, x):\n nodes = np.array([[node for node in children] for children in LevelGroupOrderIter(self.root)])\n parent = \"\"\n is_left = False\n for level in nodes:\n for node in level:\n if node.parent is None or node.parent.name == parent:\n if is_left == node.is_left:\n if node.is_leaf:\n return node.val\n val = x[node.feature]\n parent = node.name\n is_left = True if val <= node.division else False\n\nif __name__ == '__main__':\n # examples = np.array([\n # [0, 0, 0, 0], [0, 0, 0, 1], [0, 1, 0, 1], [0, 1, 1, 0], [0, 0, 0, 0],\n # [1, 0, 0, 0], [1, 0, 0, 1], [1, 1, 1, 1], [1, 0, 1, 2], [1, 0, 1, 2],\n # [2, 0, 1, 2], [2, 0, 1, 1], [2, 1, 0, 1], [2, 1, 0, 2], [2, 0, 0, 0]\n # ])\n # labels = np.array([\n # 0, 0, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 0\n # ])\n examples = np.array(\n [[3.2, 4.5, 2.1, 1.6, 5.3], [2.6, 3.2, 5.5, 6.1, 4.1], [5.2, 1.2, 3.6, 4.2, 2.6]]\n )\n labels = np.array([2.1, 3.2, 4.6])\n tree = RegressionTree(examples, labels)\n for pre, fill, n in RenderTree(tree.root):\n print('%s%s val:%f' % (pre, n.name, n.val))\n print(tree.inference(examples[2]))\n\n\n\n","sub_path":"cart.py","file_name":"cart.py","file_ext":"py","file_size_in_byte":4498,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"166508628","text":"from __future__ import print_function, absolute_import, division\n\n\ndef get_item_from_module(module_name, item_name):\n \"\"\"Load classes/modules/functions/... from given config\"\"\"\n try:\n module = __import__(module_name, fromlist=[item_name])\n item = getattr(module, item_name)\n except ImportError as error:\n message = 'Module \"{modulename}\" could not be loaded: {e}'\n raise Exception(message.format(\n modulename=module_name, e=error))\n except AttributeError as error:\n message = 'No item \"{itemname}\" in module \"{modulename}\": {e}'\n raise Exception(message.format(\n modulename=module_name,\n itemname=item_name,\n e=error))\n return item\n","sub_path":"src/main/python/pils/pils.py","file_name":"pils.py","file_ext":"py","file_size_in_byte":737,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"229306161","text":"# Glitch Bot\n# Incorrect/Incomplete Solution\n\ndest = input().split(\" \")\ninstructions = int(input())\nxdest = int(dest[0])\nydest = int(dest[1])\nsteps = []\nfor i in range(instructions):\n print(i + 1, \"\", end=\"\")\n steps.append(input())\nprint(steps)\n","sub_path":"kattis/GlitchBot.py","file_name":"GlitchBot.py","file_ext":"py","file_size_in_byte":251,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"2570387","text":"\"\"\"\n28. 搜索二维矩阵\n写出一个高效的算法来搜索 m × n矩阵中的值。\n\n这个矩阵具有以下特性:\n\n每行中的整数从左到右是排序的。\n每行的第一个数大于上一行的最后一个整数。\n样例\n考虑下列矩阵:\n\n[\n [1, 3, 5, 7],\n [10, 11, 16, 20],\n [23, 30, 34, 50]\n]\n给出 target = 3,返回 true\n\n挑战\nO(log(n) + log(m)) 时间复杂度\n\"\"\"\n\n\nclass Solution:\n \"\"\"\n @param matrix: matrix, a list of lists of integers\n @param target: An integer\n @return: a boolean, indicate whether matrix contains target\n \"\"\"\n \"\"\"\n # f方法1:这个代码少点\n # time: 1256 ms\n def searchMatrix(self, matrix, target):\n # write your code here\n from itertools import chain\n if target in list(chain(*matrix)):\n return True\n else:\n return False\n \"\"\"\n\n # 在上面的基础上,优化下,二分查找:\n # time:1172ms\n def searchMatrix(self, matrix, target):\n # write your code here\n from itertools import chain\n matrix = list(chain(*matrix))\n left, right = 0, len(matrix) - 1\n while left <= right:\n mid = (left + right) // 2\n if target > matrix[mid]:\n left = (left + right) // 2 + 1\n elif target < matrix[mid]:\n right = (left + right) // 2 - 1\n else:\n return True\n return False\n\n\ns = Solution()\nprint(s.searchMatrix([\n [1, 3, 5, 7],\n [10, 11, 16, 20],\n [23, 30, 34, 50]\n], 10))\n","sub_path":"算法 - 其他/二分法/28.搜索二维矩阵.py","file_name":"28.搜索二维矩阵.py","file_ext":"py","file_size_in_byte":1553,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"37665715","text":"import httpx\nimport json\nimport csv\nimport srsly\nfrom pathlib import Path\nfrom typing import List, Optional\n\nfrom fastapi import Request, Form, File, UploadFile, APIRouter, Depends, Query\nfrom fastapi.templating import Jinja2Templates\nfrom app.util.login import get_current_username\n\ntemplates = Jinja2Templates(directory=\"app/templates\")\n\nrouter = APIRouter(dependencies=[Depends(get_current_username)])\n\n\n@router.get(\"/lookups\")\nasync def read_items(request: Request):\n new_lang = Path.cwd() / \"new_lang\"\n if len(list(new_lang.iterdir())) > 0:\n return templates.TemplateResponse(\"lookups.html\", {\"request\": request})\n\n else:\n return templates.TemplateResponse(\n \"error_please_create.html\", {\"request\": request}\n )\n\n@router.post(\"/upload_lookups\")\nasync def update_lookups(file: UploadFile = File(...),lookup_type:str= Form(...)):\n contents = file.file.read()\n contents = contents.decode(\"utf-8\")\n\n #load lookups file \n new_lang = Path.cwd() / \"new_lang\"\n if len(list(new_lang.iterdir())) > 0:\n path = list(new_lang.iterdir())[0] / \"lookups\"\n if lookup_type == \"pos\":\n json_file = list(path.glob(\"*upos*\"))[0]\n if lookup_type == \"lemma\":\n json_file = list(path.glob(\"*lemma*\"))[0]\n if lookup_type == \"features\":\n json_file = list(path.glob(\"*features*\"))[0]\n if json_file.exists():\n lookup = srsly.read_json(json_file)\n\n # load CSV file \n if file.content_type == 'text/csv':\n reader = csv.reader(contents.splitlines())\n for row in reader:\n if row[0] == 'key' and row[1] == 'value':\n pass\n else:\n lookup[row[0]] = row[1] \n srsly.write_json(json_file, lookup)\n \n \n if file.content_type == 'application/json':\n data = srsly.json_loads(contents)\n join_dicts = {**lookup, **data}\n srsly.write_json(json_file, join_dicts)\n \n\n\n@router.get(\"/edit_lookup\")\nasync def edit_pos(request: Request, type: str):\n context = {}\n context[\"request\"] = request\n new_lang = Path.cwd() / \"new_lang\"\n if len(list(new_lang.iterdir())) > 0:\n path = list(new_lang.iterdir())[0] / \"lookups\"\n if type == \"pos\":\n json_file = list(path.glob(\"*upos*\"))[0]\n if type == \"lemma\":\n json_file = list(path.glob(\"*lemma*\"))[0]\n if type == \"features\":\n json_file = list(path.glob(\"*features*\"))[0]\n if json_file.exists():\n context[\"code\"] = json_file.read_text()\n\n else:\n raise HTTPException(status_code=404, detail=\"File not found\")\n return templates.TemplateResponse(\"edit_json.html\", context)\n\n#when code is not valid json, saves to file, but does not load \n\n@router.post(\"/edit_lookup\")\nasync def update_code(request: Request,):\n\n data = await request.json()\n type = data[\"type\"]\n code = data[\"code\"]\n \n # need something here to validate the json, return error and \n # help if not, but never save to disk if not valid (causes so much yuck)\n new_lang = Path.cwd() / \"new_lang\"\n \n if len(list(new_lang.iterdir())) > 0:\n path = list(new_lang.iterdir())[0] / \"lookups\"\n if type == \"pos\":\n json_file = list(path.glob(\"*upos*\"))[0]\n if type == \"lemma\":\n json_file = list(path.glob(\"*lemma*\"))[0]\n if type == \"features\":\n json_file = list(path.glob(\"*features*\"))[0]\n\n if json_file.exists():\n try: # assert that code is valid json \n assert json.loads(code)\n json_file.write_text(code) \n return {'message':'200'}\n except Exception as e:\n return {'message': str(e)}\n \n else:\n raise HTTPException(status_code=404, detail=\"File not found\")\n\n \n return templates.TemplateResponse(\n \"edit_json.html\", {\"request\": request, \"code\": code}\n )\n\n","sub_path":"app/routers/lookups.py","file_name":"lookups.py","file_ext":"py","file_size_in_byte":3992,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"465726623","text":"from functools import reduce\nimport operator\n\nfrom django.core.urlresolvers import reverse_lazy, reverse\n\nfrom django.views.generic import TemplateView, ListView, CreateView, DetailView, FormView, UpdateView, DeleteView\n\nfrom BMS_app.forms import PersonalInformationForm, RelativeInformationForm, UpdateForm\nfrom BMS_app.models import PersonalInformation, FamilyMemberInformation\n\n\nclass Index(TemplateView):\n template_name = 'homepage.html'\n\n\nclass ListAll(ListView):\n template_name = 'listall.html'\n model = PersonalInformation\n\n\nclass About(TemplateView):\n template_name = 'about.html'\n\n\nclass CreatePerson(CreateView):\n model = PersonalInformation\n template_name = 'create.html'\n form_class = PersonalInformationForm\n success_url = reverse_lazy('BMS_app:ListAll')\n\n\nclass UpdatePerson(UpdateView):\n model = PersonalInformation\n form_class = UpdateForm\n template_name = 'update.html'\n success_url = reverse_lazy('BMS_app:ListAll')\n context_object_name = 'person'\n\n def get_context_data(self, **kwargs):\n context = super(UpdatePerson, self).get_context_data(**kwargs)\n context['pk_person_id'] = PersonalInformation.objects.get(id=self.kwargs['pk'])\n return context\n\n\nclass DeletePerson(DeleteView):\n model = PersonalInformation\n success_url = reverse_lazy('BMS_app:ListAll')\n template_name = 'delete.html'\n\n\nclass PersonDetails(DetailView):\n template_name = 'details.html'\n context_object_name = 'relatives'\n\n def get_object(self, queryset=None):\n person = PersonalInformation.objects.get(id=self.kwargs['pk'])\n relatives = FamilyMemberInformation.objects.filter(person_id=person.id)\n return relatives\n\n def get_context_data(self, **kwargs):\n context = super(PersonDetails, self).get_context_data(**kwargs)\n context['pk_id'] = PersonalInformation.objects.get(id=self.kwargs['pk'])\n return context\n\n\nclass RelativeCreate(FormView):\n model = FamilyMemberInformation\n template_name = 'create_relative.html'\n form_class = RelativeInformationForm\n\n def get_success_url(self):\n return reverse('BMS_app:PersonDetails', kwargs={'pk': self.kwargs['pk']})\n\n def form_valid(self, form):\n relative = form.save(commit=False)\n relative.person_id = self.kwargs['pk']\n relative.save()\n\n return super(RelativeCreate, self).form_valid(form)\n\n\nclass UpdateRelative(UpdateView):\n model = FamilyMemberInformation\n form_class = UpdateForm\n template_name = 'updaterelative.html'\n\n def get_context_data(self, **kwargs):\n context = super(UpdateRelative, self).get_context_data(**kwargs)\n context['pk_person_id'] = FamilyMemberInformation.objects.get(id=self.kwargs['pk'])\n return context\n\n def get_success_url(self):\n return reverse('BMS_app:PersonDetails')\n\n\nclass DeleteRelative(DeleteView):\n model = FamilyMemberInformation\n success_url = reverse_lazy('BMS_app:ListAll')\n template_name = 'delete.html'\n\n\nclass BlogSearchListView(ListView):\n paginate_by = 10\n template_name = 'results.html'\n model = PersonalInformation\n\n def get_queryset(self):\n result = super(BlogSearchListView, self).get_queryset()\n query = self.request.GET.get('q')\n if query:\n query_list = query.split()\n from django.db.models import Q\n result = result.filter(\n reduce(operator.add,\n (Q(first_name__icontains=q) for q in query_list)) |\n reduce(operator.add,\n (Q(email__icontains=q) for q in query_list))\n )\n\n return result\n\n","sub_path":"BMS_app/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":3667,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"484661537","text":"#coding=utf-8\nfrom PySide import QtGui\nfrom main_widget import MainWidget\nimport win32gui, win32con\n\nimport ctypes\nfrom ctypes import wintypes\nimport win32con\nbyref = ctypes.byref\nuser32 = ctypes.windll.user32\n\n# from capture_widget import CaptureWidget\n\ndef registerHotkey(HOTKEYS):\n for id, values in HOTKEYS.items ():\n vk, modifiers = values[0], reduce (lambda x, y: x | y, values[1:])\n if not user32.RegisterHotKey (None, id, modifiers, vk):\n print (\"Unable to register id\", id)\n\n\ndef main():\n import sys\n\n app = QtGui.QApplication(sys.argv)\n main_widget = MainWidget()\n main_widget.show()\n # sys.exit(app.exec_())\n\n # user32.RegisterHotKey(None, 1, win32con.MOD_ALT, 75)\n # user32.RegisterHotKey(None, 2, win32con.MOD_ALT, 76)\n registerHotkey(main_widget.HOTKEYS)\n\n try:\n msg = wintypes.MSG()\n while user32.GetMessageA(byref(msg), None, 0, 0) != 0:\n if msg.message == 1050:\n break\n if msg.message == win32con.WM_HOTKEY:\n main_widget.OnHotkeyEvent(msg)\n\n user32.TranslateMessage(byref(msg))\n user32.DispatchMessageA(byref(msg))\n\n finally:\n user32.UnregisterHotKey(None, 1)\n user32.UnregisterHotKey(None, 2)\n\n # sys.exit()\n # sys.exit(app.exec_())\n\nif __name__ == '__main__':\n main()","sub_path":"ep_screenshot.py","file_name":"ep_screenshot.py","file_ext":"py","file_size_in_byte":1355,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"247177793","text":"__author__ = 'milosz'\n\nimport ReactorServer\nfrom ServerSocketWrapper import AcceptorSocketWrapper\nimport sys\n\nif '__main__' == __name__:\n assert 4 == len(sys.argv), 'usage: python main.py workers port tasks_limit'\n workers = int(sys.argv[1])\n port = int(sys.argv[2])\n tasks_limit = int(sys.argv[3])\n reactor = ReactorServer.Reactor(workers, sys.stdout, tasks_limit)\n reactor.register_handler(\n ReactorServer.Reactor.AcceptorSocketEventHandler(\n AcceptorSocketWrapper(port, False), reactor\n ))\n reactor.handle_events()","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":563,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"486873170","text":"#!/usr/bin/env python3\n# coding=utf-8\n# 从日志文件里面解析数据,用pandas直接存储到excel\nimport pandas as pd\nimport re\n\n\nclass PandasWriteExcel(object):\n def __init__(self, res_log, index, cols):\n self.res_log = res_log\n self.index = index\n self.cols = cols\n self.data = self.compose_data()\n\n @staticmethod\n def get_throughput(log):\n # frame_data = []\n tcp_data = []\n udp_data = []\n # pat = re.compile(r'iperf3.*-l\\s(\\d+)')\n with open(log, 'r') as f:\n for line in f:\n # m = pat.search(line)\n # if m is not None:\n # frame_data.append(m.group(1))\n if re.search(r'receiver', line):\n tcp_data.append(line.split()[-3])\n if re.search(r'\\d+%', line):\n udp_data.append(line.split()[-6])\n else:\n final_data = tcp_data + udp_data\n return list(map(float, final_data)) # 把字符串列表转化为整型列表# 构造dataframe的数据格式\n\n def compose_data(self):\n data = []\n for i in range(len(self.res_log)):\n res = self.get_throughput(self.res_log[i])\n data.append(res) # 每一个列表元素就是一个release的全部数据,即每一行\n # print(f'performance result:{data}') # [[247, 1082, 940, 6241], [247, 1082, 940, 6241]]\n print(dict(zip(self.cols, data))) # {'SR0610': [247.0, 1082.0, 940.0, 6241.0], 'SR0620': [247.0, 1082.0, 940.0, 6241.0], 'SR0630': [247.0, 1082.0, 940.0, 6241.0]}\n return dict(zip(self.cols, data)) # 返回一个字典,组成dataframe的数据格式\n\n def write_to_excel(self): # 把数据写到excel\n write_file = \"test_cols.xlsx\"\n df = pd.DataFrame(self.data, self.index) # 两个参数\n writer = pd.ExcelWriter(write_file)\n df.to_excel(writer, sheet_name='release', startrow=2, startcol=2)\n writer.save()\n\n def update_excel_column(self): # add or update one column,适合新增加或者修改某列数据\n write_file = \"test_cols.xlsx\"\n df = pd.DataFrame(self.data, self.index) # 两个参数\n # xiaozhan trial, this can write new data into the new column, which can be used for API test\n df['SR0640'] = [100, 300, 300, 400] # add or update data in this new column, get data from other function\n # xiaozhan trial\n writer = pd.ExcelWriter(write_file)\n df.to_excel(writer, sheet_name='release', startrow=2, startcol=2)\n writer.save()\n\n\nif __name__ == '__main__':\n # 和上一个例子的区别就是index和column的名字不同,需求不同\n index = ['tcp_64', 'tcp_1024', 'tcp_65536', 'udp_1400']\n cols = ['SR0610', 'SR0620', 'SR0630']\n res_log = ['SR0610.txt', 'SR0620.txt', 'SR0630.txt'] # 每一列是一个release的全部数据\n # 实例化类对象\n ts = PandasWriteExcel(res_log, index, cols)\n ts.write_to_excel()\n\n\n","sub_path":"numpy_pandas_matplotlib/21-pandas-write-cols-to-excel.py","file_name":"21-pandas-write-cols-to-excel.py","file_ext":"py","file_size_in_byte":3021,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"569566222","text":"\n\nfrom machine import Pin,I2C\nimport time\n\n\n\nSMPLRT_DIV\t\t= const(0x19) #陀螺仪采样率,典型值:0x07(125Hz)\nCONFIG\t\t\t = const(0x1A) #//低通滤波频率,典型值:0x06(5Hz)\nGYRO_CONFIG\t\t= const(0x1B) #//陀螺仪自检及测量范围,典型值:0x18(不自检,2000deg/s)\nACCEL_CONFIG\t= const(0x1C) #//加速计自检、测量范围及高通滤波频率,典型值:0x01(不自检,2G,5Hz)\nACCEL_XOUT_H\t= const(0x3B) #\nACCEL_XOUT_L\t= const(0x3C) #\nACCEL_YOUT_H\t= const(0x3D) #\nACCEL_YOUT_L\t= const(0x3E) #\nACCEL_ZOUT_H\t= const(0x3F) #\nACCEL_ZOUT_L\t= const(0x40) #\nTEMP_OUT_H\t\t= const(0x41) #\nTEMP_OUT_L\t\t= const(0x42) #\nGYRO_XOUT_H\t\t= const(0x43) #\nGYRO_XOUT_L\t\t= const(0x44) #\t\nGYRO_YOUT_H\t\t= const(0x45) #\nGYRO_YOUT_L\t\t= const(0x46) #\n\n\nGYRO_ZOUT_H\t\t= const(0x47) #\n\n\nGYRO_ZOUT_L\t\t= const(0x48) #\n\n\nPWR_MGMT_1\t\t= const(0x6B) #\t//电源管理,典型值:0x00(正常启用)\n\n\nWHO_AM_I\t\t\t= const(0x75) #\t//IIC地址寄存器(默认数值0x68,只读)\n\n\nSlaveAddress\t= const(0xD0) #\t//IIC写入时的地址字节数据,+1为读取\n\nclass mpu6050(object):\n def __init__(self):\n self.i2c = I2C(-1,scl=Pin(17), sda=Pin(16),freq=400000)\n self.i2c_address = self.i2c.scan()[0]\n print(\"mpu6050_add = \",self.i2c_address)\n self.mpu6050_write_reg(PWR_MGMT_1,0x00)\n self.mpu6050_write_reg(SMPLRT_DIV,0x07)\n self.mpu6050_write_reg(CONFIG,0x06)\n self.mpu6050_write_reg(GYRO_CONFIG,0x18)\n self.mpu6050_write_reg(ACCEL_CONFIG,0x01)\n \n def mpu6050_write_reg(self,uch_addr,uch_data):\n # buf = bytearray(1)\n #buf[0] = uch_data\n self.i2c.writeto_mem(self.i2c_address, uch_addr , chr(uch_data))\n #self.i2c.writeto(uch_addr, buf)\n \n def mpu6050_read_reg(self,uch_addr):\n buf = self.i2c.readfrom_mem(self.i2c_address, uch_addr , 1)\n return buf\n \n def git_data(self,uch_addr):\n h_adta = self.mpu6050_read_reg(uch_addr)\n l_adta = self.mpu6050_read_reg(uch_addr + 1)\n return (ord(h_adta) << 8) + ord(l_adta)\n\n \n def mpu6050_data(self):\n t = []\n t.append(self.git_data(ACCEL_XOUT_H))\n t.append(self.git_data(ACCEL_YOUT_H))\n t.append(self.git_data(ACCEL_ZOUT_H))\n t.append(self.git_data(GYRO_XOUT_H))\n t.append(self.git_data(GYRO_YOUT_H))\n t.append(self.git_data(GYRO_ZOUT_H))\n return t\n\n","sub_path":"ESP32_python_pro/esp32_watch/mpu6050.py","file_name":"mpu6050.py","file_ext":"py","file_size_in_byte":2348,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"123507597","text":"class HíresNő:\r\n def __init__(self, név, foglalkozás, nemzetiség):\r\n self.név = név\r\n self.foglalkozás = foglalkozás\r\n self.nemzetiség = nemzetiség\r\n def előtag(self):\r\n if self.nemzetiség == 'a':return 'Ms.'\r\n else:return 'Frau'\r\n\r\nhíres_nők = []\r\nfor _ in range(3):\r\n név = input('Add meg egy híres nő nevét! ')\r\n foglalkozás = input('Add meg a foglalkozását! ')\r\n nemzetiség = input('Add meg a nemzetiségét (a/n)! ')\r\n híres_nő = HíresNő(név, foglalkozás, nemzetiség)\r\n híres_nők.append(híres_nő)\r\nfor híres_nő in híres_nők:\r\n print(híres_nő.előtag(), híres_nő.név, 'egy híres', híres_nő.foglalkozás)","sub_path":"Takács Péter/Vizsga feladatok/Vizsga3.1.py","file_name":"Vizsga3.1.py","file_ext":"py","file_size_in_byte":728,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"53063548","text":"#coding=utf-8\nimport json\nfrom collections import OrderedDict\nfrom PIL import Image\nfrom pathlib import Path\nimport numpy as np\nimport tqdm\nimport os\n\ndef cut_img(img):\n #画像をトリミングする\n w,h = img.size\n if(w>=h):\n img = img.crop((0,0,h,h))\n else:\n img = img.crop((0,0,w,w)) \n return img\n \ndef load_img(path):\n #画像を読み込む\n img = Image.open(path)\n img = cut_img(img)\n #HSVへ変換\n img.convert('HSV')\n return np.asarray(img)[:, :, :3]\n\ndef load_data(id,size=50):\n \n for i in Path(id +'/data').glob('**/*.png'):\n print(type(i))\n rgb_im = Image.open(str(i)).convert('RGB')\n root,_ = os.path.splitext(str(i))\n name = root + '.jpg'\n rgb_im.save(name)\n os.remove(str(i))\n \n img_paths = list( Path('data/').glob('**/*.jpg') or Path('data/').glob('**/*.JPG'))\n img_paths = [str(path) for path in img_paths]\n\n img_list = [ load_img(img) for img in img_paths ]\n img_list = [ np.asarray(Image.fromarray(img).resize((size, size))) for img in img_list ]\n\n return (img_paths, img_list)\n\n# 画像の特徴量を計算\ndef feature(img,feature_div):\n chunk_sz = img.shape[0]/feature_div\n n_chunk_pixels = chunk_sz*chunk_sz\n\n f = np.zeros((feature_div, feature_div, img.shape[2]))\n for i in range(feature_div):\n for j in range(feature_div):\n tly = int(chunk_sz*i)\n tlx = int(chunk_sz*j)\n bry = int(chunk_sz*(i+1))\n brx = int(chunk_sz*(j+1))\n f[i,j] = np.sum(img[tly:bry, tlx:brx, :], axis=(0,1))\n\n return f / n_chunk_pixels\n\ndef main(feature_div,id):\n img_paths, img_list = load_data(id=id) # 素材画像の読み込み\n #print(img_list)\n img_paths.sort()\n assert(len(set((img.shape for img in img_list))) == 1) # 素材画像がすべて同じ大きさかチェック\n assert(img_list[0].shape[0] == img_list[0].shape[1]) # 素材画像が正方形かチェック\n block_size = img_list[0].shape[0] # 素材画像の一辺の長さをblock_sizeとする\n\n # 全画像の特徴量を計算\n features = [feature(img,feature_div).tolist() for img in img_list]\n\n for i in range(len(img_paths)):\n print(str(i)+' : '+str(img_paths[i]))\n \n # jsonに書き込む\n with open(id + '/features.json', 'w') as f:\n json.dump( OrderedDict([\n ('block_size', block_size),\n ('data',(('name', img_paths),('feature',features)))\n ]), f, indent=4 )\n\nif __name__ == '__main__':\n main(1,\"./\")\n","sub_path":"assets/python_sample/features.py","file_name":"features.py","file_ext":"py","file_size_in_byte":2566,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"596081917","text":"\"\"\"\n\n Helper functions for the pretrained model to be used within our API.\n\n Author: Explore Data Science Academy.\n\n Note:\n ---------------------------------------------------------------------\n Plase follow the instructions provided within the README.md file\n located within this directory for guidance on how to use this script\n correctly.\n\n Importantly, you will need to modify this file by adding\n your own data preprocessing steps within the `_preprocess_data()`\n function.\n ----------------------------------------------------------------------\n\n Description: This file contains several functions used to abstract aspects\n of model interaction within the API. This includes loading a model from\n file, data preprocessing, and model prediction. \n\n\"\"\"\n\n# Helper Dependencies\nfrom sklearn.ensemble import RandomForestRegressor\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.metrics import mean_squared_error\nfrom catboost import CatBoostRegressor,Pool, cv\nfrom sklearn.model_selection import GridSearchCV\nfrom sklearn.preprocessing import MinMaxScaler\nfrom sklearn.preprocessing import StandardScaler\nfrom sklearn.metrics import accuracy_score\nfrom sklearn.model_selection import KFold\nfrom sklearn.model_selection import cross_val_score\nfrom math import sqrt\nimport catboost\nimport math\nimport pandas as pd\nimport numpy as np\nimport pickle\nimport json\n\ndef _preprocess_data(data):\n \"\"\"Funtion just does preprocessing on the test data but will be used on the test dataset before converting to json string\n\n\n Parameters\n ----------\n data : str\n The data payload received within POST requests sent to our API.\n\n Returns\n -------\n Pandas DataFrame : \n The preprocessed data, ready to be used our model for prediction.\n\n \"\"\"\n combined_data = []\n if (type(data) != pd.DataFrame):\n # Convert the json string to a python dictionary object\n feature_vector_dict = json.loads(data)\n\n # Load the dictionary as a Pandas DataFrame.\n feature_vector_df = pd.DataFrame.from_dict([feature_vector_dict])\n\n combined_data = feature_vector_df.copy()\n return combined_data\n else:\n combined_data = data\n\n # ---------------------------------------------------------------\n # NOTE: You will need to swap the lines below for your own data\n # preprocessing methods.\n #\n # The code below is for demonstration purposes only. You will not\n # receive marks for submitting this code in an unchanged state.\n # ---------------------------------------------------------------\n\n # ----------- Replace this code with your own preprocessing steps --------\n\n combined_data['Placement_Datetime'] = pd.to_datetime(combined_data['Placement - Time'])\n combined_data.loc[:, 'Placement_Date'] = combined_data['Placement_Datetime'].dt.date\n combined_data['Confirmation_datetime'] = pd.to_datetime(combined_data['Confirmation - Time'])\n combined_data['Trip_Duration'] = (combined_data['Confirmation_datetime'] - combined_data['Placement_Datetime']).map(\n lambda x: x.total_seconds())\n\n combined_data.drop([\"Confirmation_datetime\", \"Placement_Date\", \"Placement_Datetime\"], axis=1, inplace=True)\n # combined_data.drop([\"Arrival at Destination - Day of Month\", \"Arrival at Destination - Weekday (Mo = 1)\",\n # \"Arrival at Destination - Time\"],\n # axis=1, inplace=True)\n combined_data.drop('Trip_Duration', axis=1, inplace=True)\n combined_data['Temperature'] = combined_data['Temperature'].fillna((combined_data['Temperature'].mean()))\n combined_data['Precipitation in millimeters'] = combined_data['Precipitation in millimeters'].fillna(0)\n combined_data['Arrival at Pickup - Time'] = pd.to_datetime(combined_data['Arrival at Pickup - Time'])\n combined_data['A_hour'] = combined_data['Arrival at Pickup - Time'].dt.hour\n combined_data['A_seconds'] = combined_data['Arrival at Pickup - Time'].dt.second\n combined_data['A_minutes'] = combined_data['Arrival at Pickup - Time'].dt.minute\n combined_data['am_or_pm_confirm'] = combined_data['Confirmation - Time'].astype('str').apply(\n lambda x: x.split(' ')[-1])\n combined_data['Confirmation - Time'] = pd.to_datetime(combined_data['Confirmation - Time'])\n combined_data['C_hour'] = combined_data['Confirmation - Time'].dt.hour\n combined_data['C_min'] = combined_data['Confirmation - Time'].dt.minute\n combined_data['C_sec'] = combined_data['Confirmation - Time'].dt.second\n combined_data.drop(['Arrival at Pickup - Time', 'Confirmation - Time'], axis=1, inplace=True)\n combined_data.drop('Order No', axis=1, inplace=True)\n combined_data.drop(['Pickup - Time', 'Placement - Time', 'Rider Id'], axis=1, inplace=True)\n transport = {\"Vehicle Type\": {\"Bike\": 1, \"Other\": 2},\n \"Personal or Business\": {\"Personal\": 1, \"Business\": 2, }}\n combined_data.replace(transport, inplace=True)\n combined_data = pd.get_dummies(combined_data)\n return combined_data\n\ndef load_model(path_to_model:str):\n \"\"\"Adapter function to load our pretrained model into memory.\n\n Parameters\n ----------\n path_to_model : str\n The relative path to the model weights/schema to load.\n Note that unless another file format is used, this needs to be a\n .pkl file.\n\n Returns\n -------\n \n The pretrained model loaded into memory.\n\n \"\"\"\n return pickle.load(open(path_to_model, 'rb'))\n\ndef make_prediction(data, model):\n \"\"\"Prepare request data for model prediciton.\n\n Parameters\n ----------\n data : str\n The data payload received within POST requests sent to our API.\n model : \n An sklearn model object.\n\n Returns\n -------\n list\n A 1-D python list containing the model prediction.\n\n \"\"\"\n # Data preprocessing.\n prep_data = _preprocess_data(data)\n # Perform prediction with model and preprocessed data.\n prediction = model.predict(prep_data)\n # Format as list for output standerdisation.\n return [round(i) for i in prediction.tolist()]\n","sub_path":"model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":6241,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"67306012","text":"#!/usr/bin/python3\n\"\"\"\nModule to register blueprints and run the flask server\nin preparation for api calls\n\"\"\"\nfrom api.v1.views import app_views\nfrom flask import Flask, jsonify\nfrom models import engine\nfrom models import storage\nfrom os import getenv\n\napp = Flask(__name__)\napp.register_blueprint(app_views)\n\n\n@app.teardown_appcontext\ndef teardown_flask(exception):\n \"\"\"\n Remove the database, exit and save file\n \"\"\"\n storage.close()\n\n\n@app.errorhandler(404)\ndef page_not_found(error):\n return (jsonify(error=\"Not found\"), 404)\n\nif __name__ == \"__main__\":\n host = getenv(\"HBNB_API_HOST\", \"0.0.0.0\")\n port = getenv(\"HBNB_API_PORT\", \"5000\")\n app.run(host=host, port=port)\n","sub_path":"api/v1/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":697,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"290064150","text":"# -*- coding: utf-8 -*-\n\nfrom odoo import api, fields, models, tools, SUPERUSER_ID, _\nfrom datetime import datetime\nfrom odoo.exceptions import UserError, AccessError\nfrom odoo.tools import float_compare, pycompat\nfrom odoo.addons import decimal_precision as dp\n\n\nclass Etat_des_lieux(models.Model):\n _name = 'lb.etat_des_lieux'\n _rec_name = 'bien_loue'\n\n # kanban\n color = fields.Integer()\n\n etat_des_lieux_type = fields.Selection(\n [('entree', 'Etat des lieux d\\'entrée'), ('pendant', 'Etat des lieux durant la location'),\n ('sortie', 'Etat des lieux de sortie')], string=\"Type\", required=True)\n date_etat_des_lieux = fields.Date(string=\"Date etat lieu\")\n\n location = fields.Many2one('lb.location', ondelete='cascade', string=\"Contrat associé\", required=True,\n domain=\"[('state','=','confirm')]\")\n\n locataires = fields.Many2one(related='location.locataires', string=\"Locataire\")\n mobile = fields.Char(string=\"N° Tel Locataire\", related='location.mobile')\n\n bien_loue = fields.Many2one(related='location.bien_loue', string=\"Bien Loué\")\n categ_id = fields.Many2one(related='location.categ_id', string=\"Type Bien\")\n adresse = fields.Many2one(related='location.adresse', string=\"Quartier de visite : \")\n\n notes = fields.Text(string=\"Notes\")\n user_id = fields.Many2one('res.users', string='Agent-Guide', track_visibility='onchange',\n default=lambda self: self.env.user)\n\n enregistrement_etat_des_lieux = fields.One2many('lb.enregistrement_etat_des_lieux', 'etat_des_lieux_id',\n string=\"Etat des lieux\")\n Etat = fields.Selection(\n [('non vérifié.', 'Non vérifié'), ('neuf', 'Neuf'), ('bon etat', 'Bon état'), ('etat moyen', 'Etat moyen'),\n ('mauvais etat', 'Mauvais état')], string=\"Etat lieu\", related='enregistrement_etat_des_lieux.Etat')\n\n etat_des_lieux_entree_associe = fields.Many2one('lb.etat_des_lieux', ondelete='cascade', string=\"Location associée\")\n\n # etat_des_lieux_entree_associe = fields.Many2one('lb.etat_des_lieux', string=\"Etat des lieux d'entrée associé\",\n # domain=[('etat_des_lieux_type', '=', 'entree')])\n doc_count = fields.Integer(compute='_compute_attached_docs_count', string=\"Documents\")\n\n # 2 fonctions pour l'image attaché\n\n def _compute_attached_docs_count(self):\n Attachment = self.env['ir.attachment']\n for etat in self:\n etat.doc_count = Attachment.search_count(\n [('res_model', '=', 'lb.etat_des_lieux'), ('res_id', '=', etat.id)])\n\n @api.multi\n def attachment_tree_view(self):\n self.ensure_one()\n domain = [('res_model', '=', 'lb.etat_des_lieux'), ('res_id', 'in', self.ids)]\n return {\n 'name': _('Attachments'),\n 'domain': domain,\n 'res_model': 'ir.attachment',\n 'type': 'ir.actions.act_window',\n 'view_id': False,\n 'view_mode': 'kanban,tree,form',\n 'view_type': 'form',\n 'help': _('''

\n Cliquez sur créer (et non importer) pour ajouter les images associées à vos biens.

\n

'''),\n 'limit': 80,\n 'context': \"{'default_res_model': '%s','default_res_id': %d}\" % (self._name, self.id)\n }\n\n pendant_etat_des_lieux = fields.One2many('lb.pendant_etat_des_lieux', 'etat_pendant_id',\n string=\"Etat pandant location\", required=True)\n\n Etat_pendant = fields.Selection(\n [('bon etat', 'Bon état'), ('etat moyen', 'Etat moyen'),\n ('mauvais etat', 'Mauvais état')], string=\"Etat Visite\", related='pendant_etat_des_lieux.Etat_pendant')\n\n date_pendant = fields.Date(string='Date Visite', default=datetime.now().strftime('%Y-%m-%d %H:%M:%S'),\n related='pendant_etat_des_lieux.date_pendant')\n\n\nclass Enregistrement_Etat_des_lieux(models.Model):\n _name = 'lb.enregistrement_etat_des_lieux'\n\n etat_des_lieux_id = fields.Many2one('lb.etat_des_lieux', ondelete='cascade', string=\"Etat des lieux\")\n nom_piece = fields.Char(string=\"Nom de la pièce\", required=True)\n Etat = fields.Selection(\n [('non vérifié.', 'Non vérifié'), ('neuf', 'Neuf'), ('bon etat', 'Bon état'), ('etat moyen', 'Etat moyen'),\n ('mauvais etat', 'Mauvais état')], string=\"Etat lieu\")\n commentaires = fields.Text(string=\"Commentaire\")\n\n photos = fields.Binary(string=\"photos\", attachment=True)\n fichier = fields.Binary(string=\"fichier\", attachment=True)\n\n\nclass Enregistrement_Etat_des_lieux_pendant(models.Model):\n _name = 'lb.pendant_etat_des_lieux'\n\n etat_pendant_id = fields.Many2one('lb.etat_des_lieux', ondelete='cascade', string=\"Etat des lieux Pandant\")\n nom_piece_pendant = fields.Char(string=\"Résumé\", required=True)\n Etat_pendant = fields.Selection(\n [('bon etat', 'Bon état'), ('etat moyen', 'Etat moyen'),\n ('mauvais etat', 'Mauvais état')], string=\"Etat Visite\")\n commentaires_pendant = fields.Text(string=\"Commentaire\")\n\n date_pendant = fields.Date(string='Date Visite', default=datetime.now().strftime('%Y-%m-%d %H:%M:%S'))\n\n fichier = fields.Binary(string=\"fichier\", attachment=True)\n\n photos = fields.Many2many(\"lb.photos_pendant\", string=\"Photos\")\n\n\nclass photos(models.Model):\n _name = 'lb.photos_pendant'\n _rec_name = 'description'\n\n photos = fields.Binary(string=\"Photos\", attachment=True)\n description = fields.Char('Nom de la pièce')\n\n\n\n\n\n\n","sub_path":"gestion_immobiliere/models/etat_des_lieux.py","file_name":"etat_des_lieux.py","file_ext":"py","file_size_in_byte":5637,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"9752480","text":"#!/usr/bin/env python\n\nclass Utility(object):\n \"\"\"\n \"\"\"\n \n def __init__(self, ):\n \"\"\"\n \"\"\"\n\n @staticmethod\n def get_instance(kls):\n parts = kls.split('.')\n module = \".\".join(parts[:-1])\n m = __import__(module)\n for comp in parts[1:]:\n m = getattr(m, comp)\n return m\n \n","sub_path":"python/ml/util.py","file_name":"util.py","file_ext":"py","file_size_in_byte":353,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"425065528","text":"import os\nimport json\nimport luigi\nfrom elf.io import open_file\n\nfrom paintera_tools import serialize_from_commit\nfrom paintera_tools.util import compute_graph_and_weights\nfrom cluster_tools.node_labels import NodeLabelWorkflow\nfrom cluster_tools.morphology import MorphologyWorkflow\nfrom cluster_tools.downscaling import DownscalingWorkflow\n\n\ndef graph_and_costs(path, ws_key, aff_key, out_path):\n tmp_folder = './tmp_preprocess'\n compute_graph_and_weights(path, aff_key,\n path, ws_key, out_path,\n tmp_folder, target='slurm', max_jobs=250,\n offsets=[[-1, 0, 0], [0, -1, 0], [0, 0, -1]],\n with_costs=True)\n\n\ndef accumulate_node_labels(ws_path, ws_key, seg_path, seg_key,\n out_path, out_key, prefix):\n task = NodeLabelWorkflow\n\n tmp_folder = './tmp_preprocess'\n config_dir = os.path.join(tmp_folder, 'configs')\n\n t = task(tmp_folder=tmp_folder, config_dir=config_dir,\n target='slurm', max_jobs=250,\n ws_path=ws_path, ws_key=ws_key,\n input_path=seg_path, input_key=seg_key,\n output_path=out_path, output_key=out_key,\n prefix=prefix)\n ret = luigi.build([t], local_scheduler=True)\n assert ret\n\n\ndef compute_bounding_boxes(path, key):\n task = MorphologyWorkflow\n tmp_folder = './tmp_preprocess'\n config_dir = os.path.join(tmp_folder, 'configs')\n\n out_key = 'morphology'\n t = task(tmp_folder=tmp_folder, config_dir=config_dir,\n target='slurm', max_jobs=250,\n input_path=path, input_key=key,\n output_path=path, output_key=out_key)\n ret = luigi.build([t], local_scheduler=True)\n assert ret\n\n\ndef downscale_segmentation(path, key):\n task = DownscalingWorkflow\n\n tmp_folder = './tmp_preprocess'\n config_dir = os.path.join(tmp_folder, 'configs')\n\n configs = task.get_config()\n conf = configs['downscaling']\n conf.update({'library_kwargs': {'order': 0}})\n with open(os.path.join(config_dir, 'downscaling.config'), 'w') as f:\n json.dump(conf, f)\n\n in_key = os.path.join(key, 's0')\n n_scales = 5\n scales = n_scales * [[2, 2, 2]]\n halos = n_scales * [[0, 0, 0]]\n\n t = task(tmp_folder=tmp_folder, config_dir=config_dir,\n # target='slurm', max_jobs=250,\n target='local', max_jobs=64,\n input_path=path, input_key=in_key,\n scale_factors=scales, halos=halos,\n output_path=path, output_key_prefix=key)\n ret = luigi.build([t], local_scheduler=True)\n assert ret\n\n\ndef copy_tissue_labels(in_path, out_path, out_key):\n with open_file(in_path, 'r') as f:\n names = f['semantic_names'][:]\n mapping = f['semantic_mapping'][:]\n\n semantics = {name: ids.tolist() for name, ids in zip(names, mapping)}\n with open_file(out_path) as f:\n ds = f[out_key]\n ds.attrs['semantics'] = semantics\n\n\n# preprocess:\n# - export current paintera segmentation\n# - build graph and compute weights for current superpixels\n# - get current node labeling\n# - compute bounding boxes for current segments\n# - downscale the segmentation\ndef preprocess(path, key, aff_key,\n tissue_path, tissue_key,\n out_path, out_key):\n tmp_folder = './tmp_preprocess'\n out_key0 = os.path.join(out_key, 's0')\n\n serialize_from_commit(path, key, out_path, out_key0, tmp_folder,\n max_jobs=250, target='slurm', relabel_output=True)\n\n ws_key = os.path.join(key, 'data', 's0')\n graph_and_costs(path, ws_key, aff_key, out_path)\n\n accumulate_node_labels(path, ws_key, out_path, out_key0,\n out_path, 'node_labels', prefix='node_labels')\n\n accumulate_node_labels(out_path, out_key0, tissue_path, tissue_key,\n out_path, 'tissue_labels', prefix='tissue')\n copy_tissue_labels(tissue_path, out_path, 'tissue_labels')\n\n compute_bounding_boxes(out_path, out_key0)\n downscale_segmentation(out_path, out_key)\n","sub_path":"mmpb/segmentation/correction/preprocess.py","file_name":"preprocess.py","file_ext":"py","file_size_in_byte":4080,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"543803251","text":"'''\nCreated on 05/07/2015\n\n@author: mangeli\n'''\nfrom GameQualityAssessment.code_pac.model import GenericGame, ItemTuple\nimport GameQualityAssessment.code_pac.italiano.model.game as ITmodel\n\nclass LegaNazionaleGame(GenericGame):\n def __init__(self, game):\n if not isinstance(game, ITmodel.Game):\n raise TypeError('Arg must to be a brasileiro.model.Game instance')\n self._winner = None\n super(LegaNazionaleGame, self).__init__(game)\n \n def _setGameStruct(self):\n self._players = []\n self._gameData = []\n lastScore = {}\n \n gameRounds = self._game.gameRounds\n order = 1\n for gameRound in gameRounds:\n \n scores = []\n for score in gameRound:\n if not score[0] in self._players:\n self._players.append(score[0])\n lastScore[score[0]] = 0\n scores.append(ItemTuple(playerCode=score[0], roundScore= int(score[1]) - lastScore[score[0]], totalScore=score[1]))\n lastScore[score[0]] = score[1]\n self._gameData.append((order, scores))\n order +=1\n \nif __name__ == \"__main__\":\n from GameQualityAssessment.code_pac.measures import DramaByPaths,DramaByPositionUp2First,DramaByPointsUp2First\n from GameQualityAssessment.code_pac.gamePlots import GamePlots\n import matplotlib.pyplot as plt\n import numpy as np\n \n the_list = ITmodel.Game.retrieveList()\n values=[]\n for game in the_list:\n g = LegaNazionaleGame(game)\n values.append([DramaByPaths(game=g, ignored=0).getMeasureValue(), \n DramaByPointsUp2First(game=g, ignored=0).getMeasureValue(),\n DramaByPositionUp2First(game=g, ignored=0).getMeasureValue()])\n print (values[:])\n print (np.transpose(values))\n #p = GamePlots(game)\n #p.byPosition(ignored=0)\n plt.figure()\n plt.hist(np.transpose(values).reshape((len(values),3)),\n normed=False, \n bins=5,\n range=[0,1],\n color=['red', 'green', 'blue'],\n label=['Path', 'Points', 'Position'])\n plt.legend()\n plt.show()\n","sub_path":"code_pac/model/italianoGame.py","file_name":"italianoGame.py","file_ext":"py","file_size_in_byte":2194,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"641717809","text":"import json\nimport logging\nimport types\nimport datetime\nimport collections\nimport tornado.template\nimport tornado.gen\nimport tornado.web\nimport tornado.websocket\nimport datetime\nimport re\n\n\ndef md(s):\n if s is None: s = ''\n return markdown.markdown(s, extensions=['markdown.extensions.nl2br'])\n\nclass DatetimeEncoder(json.JSONEncoder):\n def default(self, obj):\n if isinstance(obj, datetime.datetime):\n return obj.strftime('%Y-%m-%d %H:%M:%S')\n elif isinstance(obj, datetime.date):\n return obj.strftime('%Y-%m-%d')\n else:\n return json.JSONEncoder.default(self, obj)\n\nclass Service:\n pass\n\nclass RequestHandler(tornado.web.RequestHandler):\n def __init__(self, *args, **kwargs):\n\n super().__init__(*args, **kwargs)\n try:\n self.get_argument('json')\n self.res_json = True\n\n except tornado.web.HTTPError:\n self.res_json = False\n\n def log(self, msg):\n pass\n\n def get_args(self, name):\n meta = {}\n for n in name:\n try:\n if n[-2:] == \"[]\":\n meta[n[:-2]] = self.get_arguments(n)\n elif n[-6:] == \"[file]\":\n n = n[:-6]\n meta[n] = self.request.files[n][0]\n else:\n meta[n] = self.get_argument(n)\n except:\n meta[n] = None\n return meta\n\n @tornado.gen.coroutine\n def prepare(self):\n self.title = \"Hackathon\"\n try: \n self.token = self.get_secure_cookie('token').decode()\n self.id = self.get_secure_cookie('id').decode()\n except: \n self.token = None\n self.id = 0\n self.clear_cookie('token')\n self.clear_cookie('id')\n #super().prepare()\n\n\n\n\nclass ApiRequestHandler(RequestHandler):\n def render(self, code=200, msg=\"\"):\n self.finish(json.dumps({'status': code,\n 'msg': msg}, cls=DatetimeEncoder))\n return\n @tornado.gen.coroutine\n def prepare(self):\n super().prepare()\n self.acct = {}\n if self.token:\n err, self.acct = yield from Service.User.get_user_info(self.token, self.id)\n\nclass WebRequestHandler(RequestHandler):\n def set_secure_cookie(self, name, value, expires_days=30, version=None, **kwargs):\n kwargs['httponly'] = True\n super().set_secure_cookie(name, value, expires_days, version, **kwargs)\n\n def write_error(self, status_code, err=None, **kwargs):\n kwargs[\"err\"] = err\n self.render('error/%s.html'%(status_code), **kwargs)\n\n def render(self, templ, **kwargs):\n kwargs['title'] = self.title\n kwargs['acct'] = self.acct\n if(self.acct):\n kwargs['acct']['id'] = self.id\n kwargs['token'] = self.token\n super().render('./web/template/'+templ, **kwargs)\n pass\n\n @tornado.gen.coroutine\n def prepare(self):\n super().prepare()\n self.acct = {}\n if self.token:\n err, self.acct = yield from Service.User.get_user_info(self.token, self.id)\n if self.token is None and self.request.uri != \"/users/signin/\":\n self.redirect(\"/users/signin/\")\n \n\nclass WebSocketHandler(tornado.websocket.WebSocketHandler):\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n\n","sub_path":"backend/req.py","file_name":"req.py","file_ext":"py","file_size_in_byte":3417,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"267586202","text":"import sys\n\nfrom django.contrib.auth.models import User\nfrom django.utils.safestring import mark_safe\nfrom django import forms\nfrom django.contrib.auth.forms import UserChangeForm\n\n\nclass EditProfileForm(UserChangeForm):\n def __init__(self, *args, **kwargs):\n super(EditProfileForm, self).__init__(*args, **kwargs)\n self.label_suffix = \"\"\n\n first_name = forms.CharField(\n max_length=32,\n min_length=1,\n strip=True,\n widget=forms.TextInput(\n attrs={\n 'class': 'input',\n }\n ),\n label=mark_safe(\"Firstname
\"))\n\n last_name = forms.CharField(\n max_length=32,\n min_length=1,\n strip=True,\n widget=forms.TextInput(\n attrs={\n 'class': 'input',\n }\n ),\n label=mark_safe(\"Lastname
\"))\n\n email = forms.EmailField(\n max_length=32,\n widget=forms.TextInput(\n attrs={\n 'class': 'input',\n }\n ),\n label=mark_safe(\"Email
\"))\n\n class Meta:\n model = User\n fields = (\n 'first_name',\n 'last_name',\n 'email',\n )\n\n\nsys.modules[__name__] = EditProfileForm\n","sub_path":"arbitrator/forms/EditProfileForm.py","file_name":"EditProfileForm.py","file_ext":"py","file_size_in_byte":1247,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"499760436","text":"# Copyright 2013-2021 Lawrence Livermore National Security, LLC and other\n# Spack Project Developers. See the top-level COPYRIGHT file for details.\n#\n# SPDX-License-Identifier: (Apache-2.0 OR MIT)\n\nimport os\n\nimport llnl.util.tty as tty\n\n\nclass Umpire(CMakePackage, CudaPackage, ROCmPackage):\n \"\"\"An application-focused API for memory management on NUMA & GPU\n architectures\"\"\"\n\n homepage = 'https://github.com/LLNL/Umpire'\n git = 'https://github.com/LLNL/Umpire.git'\n\n maintainers = ['davidbeckingsale']\n\n version('develop', branch='develop', submodules=True)\n version('main', branch='main', submodules=True)\n version('6.0.0', tag='v6.0.0', submodules=True)\n version('5.0.1', tag='v5.0.1', submodules=True)\n version('5.0.0', tag='v5.0.0', submodules=True)\n version('4.1.2', tag='v4.1.2', submodules=True)\n version('4.1.1', tag='v4.1.1', submodules=True)\n version('4.1.0', tag='v4.1.0', submodules=True)\n version('4.0.1', tag='v4.0.1', submodules=True)\n version('4.0.0', tag='v4.0.0', submodules=True)\n version('3.0.0', tag='v3.0.0', submodules=True)\n version('2.1.0', tag='v2.1.0', submodules=True)\n version('2.0.0', tag='v2.0.0', submodules=True)\n version('1.1.0', tag='v1.1.0', submodules=True)\n version('1.0.1', tag='v1.0.1', submodules=True)\n version('1.0.0', tag='v1.0.0', submodules=True)\n version('0.3.5', tag='v0.3.5', submodules=True)\n version('0.3.4', tag='v0.3.4', submodules=True)\n version('0.3.3', tag='v0.3.3', submodules=True)\n version('0.3.2', tag='v0.3.2', submodules=True)\n version('0.3.1', tag='v0.3.1', submodules=True)\n version('0.3.0', tag='v0.3.0', submodules=True)\n version('0.2.4', tag='v0.2.4', submodules=True)\n version('0.2.3', tag='v0.2.3', submodules=True)\n version('0.2.2', tag='v0.2.2', submodules=True)\n version('0.2.1', tag='v0.2.1', submodules=True)\n version('0.2.0', tag='v0.2.0', submodules=True)\n version('0.1.4', tag='v0.1.4', submodules=True)\n version('0.1.3', tag='v0.1.3', submodules=True)\n\n patch('camp_target_umpire_3.0.0.patch', when='@3.0.0')\n patch('cmake_version_check.patch', when='@4.1')\n patch('missing_header_for_numeric_limits.patch', when='@4.1:5.0.1')\n\n variant('fortran', default=False, description='Build C/Fortran API')\n variant('c', default=True, description='Build C API')\n variant('numa', default=False, description='Enable NUMA support')\n variant('shared', default=True, description='Enable Shared libs')\n variant('openmp', default=False, description='Build with OpenMP support')\n variant('deviceconst', default=False,\n description='Enables support for constant device memory')\n variant('examples', default=True, description='Build Umpire Examples')\n variant('tests', default='none', values=('none', 'basic', 'benchmarks'),\n multi=False, description='Tests to run')\n\n depends_on('cmake@3.8:', type='build')\n depends_on('cmake@3.9:', when='+cuda', type='build')\n\n depends_on('blt@0.4.1:', type='build', when='@6.0.0:')\n depends_on('blt@0.4.0:', type='build', when='@4.1.3:5.0.1')\n depends_on('blt@:0.3.6', type='build', when='@:4.1.2')\n\n # variants +rocm and amdgpu_targets are not automatically passed to\n # dependencies, so do it manually.\n depends_on('camp+rocm', when='+rocm')\n for val in ROCmPackage.amdgpu_targets:\n depends_on('camp amdgpu_target=%s' % val, when='amdgpu_target=%s' % val)\n\n depends_on('camp')\n depends_on('camp@0.2.2', when='@6.0.0:')\n depends_on('camp+cuda', when='+cuda')\n for sm_ in CudaPackage.cuda_arch_values:\n depends_on('camp cuda_arch={0}'.format(sm_),\n when='cuda_arch={0}'.format(sm_))\n\n conflicts('+numa', when='@:0.3.2')\n conflicts('~c', when='+fortran', msg='Fortran API requires C API')\n\n # device allocator exports device code, which requires static libs\n # currently only available for cuda.\n conflicts('+shared', when='+cuda')\n\n def cmake_args(self):\n spec = self.spec\n\n options = []\n options.append(\"-DBLT_SOURCE_DIR={0}\".format(spec['blt'].prefix))\n options.append(\"-Dcamp_DIR={0}\".format(spec['camp'].prefix))\n\n if '+cuda' in spec:\n options.extend([\n '-DENABLE_CUDA=On',\n '-DCUDA_TOOLKIT_ROOT_DIR=%s' % (spec['cuda'].prefix)])\n\n if not spec.satisfies('cuda_arch=none'):\n cuda_arch = spec.variants['cuda_arch'].value\n options.append('-DCUDA_ARCH=sm_{0}'.format(cuda_arch[0]))\n options.append('-DCMAKE_CUDA_ARCHITECTURES={0}'.format(cuda_arch[0]))\n flag = '-arch sm_{0}'.format(cuda_arch[0])\n options.append('-DCMAKE_CUDA_FLAGS:STRING={0}'.format(flag))\n\n if '+deviceconst' in spec:\n options.append('-DENABLE_DEVICE_CONST=On')\n else:\n options.append('-DENABLE_CUDA=Off')\n\n if '+rocm' in spec:\n options.extend([\n '-DENABLE_HIP=ON',\n '-DHIP_ROOT_DIR={0}'.format(spec['hip'].prefix)\n ])\n archs = self.spec.variants['amdgpu_target'].value\n if archs != 'none':\n arch_str = \",\".join(archs)\n options.append(\n '-DHIP_HIPCC_FLAGS=--amdgpu-target={0}'.format(arch_str)\n )\n else:\n options.append('-DENABLE_HIP=OFF')\n\n options.append('-DENABLE_C={0}'.format(\n 'On' if '+c' in spec else 'Off'))\n\n options.append('-DENABLE_FORTRAN={0}'.format(\n 'On' if '+fortran' in spec else 'Off'))\n\n options.append('-DENABLE_NUMA={0}'.format(\n 'On' if '+numa' in spec else 'Off'))\n\n options.append('-DENABLE_OPENMP={0}'.format(\n 'On' if '+openmp' in spec else 'Off'))\n\n options.append('-DBUILD_SHARED_LIBS={0}'.format(\n 'On' if '+shared' in spec else 'Off'))\n\n options.append('-DENABLE_BENCHMARKS={0}'.format(\n 'On' if 'tests=benchmarks' in spec else 'Off'))\n\n options.append('-DENABLE_EXAMPLES={0}'.format(\n 'On' if '+examples' in spec else 'Off'))\n\n options.append('-DENABLE_TESTS={0}'.format(\n 'Off' if 'tests=none' in spec else 'On'))\n\n return options\n\n def test(self):\n \"\"\"Perform stand-alone checks on the installed package.\"\"\"\n if self.spec.satisfies('@:1') or \\\n not os.path.isdir(self.prefix.bin):\n tty.info('Skipping: checks not installed in bin for v{0}'.\n format(self.version))\n return\n\n # Run a subset of examples PROVIDED installed\n # tutorials with readily checkable outputs.\n checks = {\n 'malloc': ['99 should be 99'],\n 'recipe_dynamic_pool_heuristic': ['in the pool', 'releas'],\n 'recipe_no_introspection': ['has allocated', 'used'],\n 'strategy_example': ['Available allocators', 'HOST'],\n 'tut_copy': ['Copied source data'],\n 'tut_introspection':\n ['Allocator used is HOST', 'size of the allocation'],\n 'tut_memset': ['Set data from HOST'],\n 'tut_move': ['Moved source data', 'HOST'],\n 'tut_reallocate': ['Reallocated data'],\n 'vector_allocator': [''],\n }\n\n for exe in checks:\n expected = checks[exe]\n reason = 'test: checking output from {0}'.format(exe)\n self.run_test(exe, [], expected, 0, installed=False,\n purpose=reason, skip_missing=True,\n work_dir=self.prefix.bin)\n","sub_path":"scripts/uberenv_configs/packages/_old_pkgs/umpire/package.py","file_name":"package.py","file_ext":"py","file_size_in_byte":7656,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"339709418","text":"import torch.nn as nn\nimport torch\nfrom BiLSTM_CRF import BiLSTM_CRF_NER\n\n\nclass BertLSTMCRF(nn.Module):\n def __init__(self, bert, vocab_size, hidden_size, num_tags, device):\n super().__init__()\n self.bert = bert\n embedding_dim = bert.config.to_dict()['hidden_size']\n self.lstmcrf = BiLSTM_CRF_NER(\n vocab_size=vocab_size,\n embedding_dim=embedding_dim,\n hidden_size=hidden_size,\n num_tags=num_tags,\n device=device\n )\n\n def forward(self, x, y):\n with torch.no_grad():\n embedded = self.bert(x)[0]\n\n loss = self.lstmcrf(embedded)\n return loss\n","sub_path":"model/Bert_LSTM_CRF.py","file_name":"Bert_LSTM_CRF.py","file_ext":"py","file_size_in_byte":668,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"315666384","text":"from flask import Flask, request, jsonify\nfrom flask_cors import CORS\n\nimport os, sys\nfrom os import environ\n\nimport requests\nfrom invokes import invoke_http\n\nimport amqp_setup\nfrom amqp_setup import *\n\nimport pika\nimport json\n\napp = Flask(__name__)\nCORS(app)\n\ncustomer_URL = environ.get('customer_URL') or \"http://localhost:5002/customers\"\norder_URL = environ.get('order_URL') or \"http://localhost:5004/order\" \n\n@app.route(\"/delete_customer\", methods=['POST'])\ndef delete_customer():\n # Simple check of input format and data of the request are JSON\n if request.is_json:\n try:\n c_account = request.get_json()\n print(\"\\nReceived a request to delete customer account in JSON:\", c_account)\n\n # do the actual work\n # 1. Send customer id\n result = processDeleteCustomer(c_account)\n print('\\n------------------------')\n print('\\nresult: ', result)\n return jsonify(result), result[\"code\"]\n\n except Exception as e:\n # Unexpected error in code\n exc_type, exc_obj, exc_tb = sys.exc_info()\n fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)[1]\n ex_str = str(e) + \" at \" + str(exc_type) + \": \" + fname + \": line \" + str(exc_tb.tb_lineno)\n print(ex_str)\n\n return jsonify({\n \"code\": 500,\n \"message\": \"account_management.py internal error: \" + ex_str\n }), 500\n\n # if reached here, not a JSON request.\n return jsonify({\n \"code\": 400,\n \"message\": \"Invalid JSON input: \" + str(request.get_data())\n }), 400\n\n\ndef processDeleteCustomer(c_account):\n # 2. Delete customer using customer microservice\n # Invoke the customer microservice\n\n print('\\n-----Invoking customer microservice-----')\n customer_result = invoke_http(customer_URL + \"/\" + str(c_account['customer_id']), method='DELETE')\n print('customer_result:', customer_result)\n\n # 3. Check the customer deletion result; if a failure, send it to the error microservice.\n code = customer_result[\"code\"]\n customer_result['type'] = \"delete\"\n customer_result['activity_name'] = \"customer_deletion\"\n message = json.dumps(customer_result)\n\n # check for amqp connection. If connection timeout, re-establish connection to amqp\n # The shared connection and channel created when the module is imported may be expired, \n # timed out, disconnected by the broker or a client;\n # - re-establish the connection/channel is they have been closed\n check_setup()\n\n\n if code not in range(200, 300):\n #print('\\n\\n-----Invoking error microservice as order creation fails-----')\n print('\\n\\n-----Publishing the (account error) message with routing_key=account.error-----')\n\n amqp_setup.channel.basic_publish(exchange=amqp_setup.exchangename, routing_key=\"account.error\", \n body=message, properties=pika.BasicProperties(delivery_mode = 2))\n\n print(\"\\Customer deletion status ({:d}) published to the RabbitMQ Exchange:\".format(\n code), customer_result)\n\n # 4. Return error\n return {\n \"code\": 401,\n \"data\": {\n \"customer_result\": customer_result\n },\n \"message\": \"There is an error with customer deletion\"\n }\n else:\n # 5. Record customer deletion result\n # record the activity log anyway\n #print('\\n\\n-----Invoking activity_log microservice-----')\n print('\\n\\n-----Publishing the (account deletion info) message with routing_key=account.info-----') \n\n\n amqp_setup.channel.basic_publish(exchange=amqp_setup.exchangename, routing_key=\"account.info\", \n body=message)\n \n print(\"\\nOrder published to RabbitMQ Exchange.\\n\")\n \n # 6. Remove corresponding orders for deleted customer \n # Invoke order microservice\n \n print('\\n-----Invoking order microservice-----')\n order_result = invoke_http(order_URL + \"/customer_delete/\" + str(c_account['customer_id']), method='DELETE') \n\n print('order_result:', order_result)\n \n # Check the order result;\n # if a failure, send it to the error microservice.\n code = order_result['code']\n order_result['type'] = \"order\"\n order_result['activity_name'] = \"order_deletion\"\n message = json.dumps(order_result)\n\n if code not in range(200, 300):\n #7. Inform the error microservice\n #print('\\n\\n-----Invoking error microservice as order deletion fails-----')\n print('\\n\\n-----Publishing the (order error) message with routing_key=account.error-----')\n\n amqp_setup.channel.basic_publish(exchange=amqp_setup.exchangename, routing_key=\"account.error\", \n body=message, properties=pika.BasicProperties(delivery_mode = 2))\n\n print(\"\\nOrder deletion status ({:d}) published to the RabbitMQ Exchange:\".format(\n code), order_result)\n\n # 8. Return error\n return {\n \"code\": 402,\n \"data\": {\n \"order_result\": order_result\n },\n \"message\": \"Simulated order deletion error sent for error handling.\"\n }\n else:\n # 9. Record order deletions\n # Record the activity log anyway\n #print('\\n\\n-----Invoking activity_log microservice-----')\n print('\\n\\n-----Publishing the (order deletion info) message with routing_key=account.info-----') \n\n amqp_setup.channel.basic_publish(exchange=amqp_setup.exchangename, routing_key=\"account.info\", \n body=message)\n \n print(\"\\nOrder deletion published to RabbitMQ Exchange.\\n\")\n # - reply from the invocation is not used;\n # continue even if this invocation fails\n\n # 10. Return created order\n return {\n \"code\": 201,\n \"data\": {\n \"order_result\": order_result,\n \"customer_result\": customer_result\n }\n }\n\n\n# Execute this program if it is run as a main script (not by 'import')\nif __name__ == \"__main__\":\n print(\"This is flask \" + os.path.basename(__file__) + \" for deleting customer account and corresponding existing orders...\")\n app.run(host=\"0.0.0.0\", port=5500, debug=True)\n # Notes for the parameters: \n # - debug=True will reload the program automatically if a change is detected;\n # -- it in fact starts two instances of the same flask program, and uses one of the instances to monitor the program changes;\n # - host=\"0.0.0.0\" allows the flask program to accept requests sent from any IP/host (in addition to localhost),\n # -- i.e., it gives permissions to hosts with any IP to access the flask program,\n # -- as long as the hosts can already reach the machine running the flask program along the network;\n # -- it doesn't mean to use http://0.0.0.0 to access the flask program.\n","sub_path":"account_management.py","file_name":"account_management.py","file_ext":"py","file_size_in_byte":7014,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"113213947","text":"import cv2\nimport numpy as np\nfrom gingerit.gingerit import GingerIt\nimport pytesseract\nfrom collections import Counter\nfrom nltk import everygrams\nfrom nltk.corpus import stopwords as sw\nimport en_core_web_sm\nfrom spacy_langdetect import LanguageDetector\nimport re\nimport json\nimport os\n\ndef automatic_brightness_and_contrast(image, clip_hist_percent=25):\n gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)\n # Calculate grayscale histogram\n hist = cv2.calcHist([gray],[0],None,[256],[0,256])\n hist_size = len(hist)\n # Calculate cumulative distribution from the histogram\n accumulator = []\n accumulator.append(float(hist[0]))\n for index in range(1, hist_size):\n accumulator.append(accumulator[index -1] + float(hist[index]))\n # Locate points to clip\n maximum = accumulator[-1]\n clip_hist_percent *= (maximum/100.0)\n clip_hist_percent /= 2.0\n # Locate left cut\n minimum_gray = 0\n while accumulator[minimum_gray] < clip_hist_percent:\n minimum_gray += 1\n # Locate right cut\n maximum_gray = hist_size -1\n while accumulator[maximum_gray] >= (maximum - clip_hist_percent):\n maximum_gray -= 1\n # Calculate alpha and beta values\n alpha = 255 / (maximum_gray - minimum_gray)\n beta = -minimum_gray * alpha\n auto_result = cv2.convertScaleAbs(image, alpha=alpha, beta=beta)\n return auto_result\n\ndef resolution_correction(im):\n pxmin = np.min(im)\n pxmax = np.max(im)\n imgContrast = (im - pxmin) / (pxmax - pxmin) * 255\n kernel = np.ones((2, 2), np.uint8)\n imgMorph = cv2.erode(imgContrast, kernel, iterations = 2)\n imgMorph = imgMorph.astype(np.uint8)\n imgMorph = cv2.resize(imgMorph, None, fx=2, fy=2, interpolation=cv2.INTER_CUBIC)\n return imgMorph\n\ndef fetch_text_from_image_base(im, alphanumeric, correction=True):\n # Applying Image based correction..\n if correction:\n im = resolution_correction(im)\n im = automatic_brightness_and_contrast(im)\n _, threshold = cv2.threshold(im, 127, 255,cv2.THRESH_BINARY)\n else:\n threshold = im\n text = pytesseract.image_to_string(threshold)\n if alphanumeric:\n text = re.sub('[^0-9a-zA-Z]+', ' ', text)\n return text\n\ndef make_text_corrections(txt, custom_stopwords, top_k_ngrams, upper_ngram, known_list_corrections):\n # Module Initialization\n parser = GingerIt()\n nlp = en_core_web_sm.load()\n nlp.add_pipe(LanguageDetector(), name='language_detector', last=True)\n\n if len(known_list_corrections) == 0:\n known_list_corrections = {'vou': 'you', 'ves':'yes', 'vea': 'yea'}\n\n corrected_txt = []\n for word in txt.split(' '):\n flag = False\n for key in known_list_corrections.keys():\n if key in word:\n word = word.replace(key, known_list_corrections.get(key))\n corrected_txt.append(word)\n flag = True\n if not flag and word != '':\n corrected_txt.append(word)\n coerced_word = ' '.join(corrected_txt)\n\n # Make grammatical_corrections\n all_changes = [(change.get('start'), len(change.get('text')), change.get('correct')) \n for change in parser.parse(coerced_word).get('corrections')]\n all_changes.sort(key=lambda item: item[0])\n grammatically_corrected_str = ''\n current_ptr = 0\n for start_index, length, suggested in all_changes:\n grammatically_corrected_str += coerced_word[current_ptr:start_index] + ' %s ' % suggested\n current_ptr = start_index + length\n \n # Stopword removal\n stopwords = list(set(sw.words('english') + custom_stopwords))\n filtered_final_str = [word for word in grammatically_corrected_str.split() \n if word.lower() not in stopwords and len(word) > 1]\n filtered_final_str = ' '.join(filtered_final_str)\n \n # Creating N-grams\n bi_trigrams = dict(Counter(list(everygrams(filtered_final_str.lower().split(), 2, upper_ngram))))\n bi_trigrams_tup = [(bi_trigrams.get(key), key) for key in bi_trigrams.keys()]\n bi_trigrams_tup.sort(key=lambda item: item[0], reverse=True)\n \n # Language detection\n import os\n lang_mapper_file = '../dataset/metadata/spacy_lang_mapper.json'\n if os.path.isfile(lang_mapper_file) is not True:\n spacy_lang_mapper = spacy_lang_mapper_dict()\n else:\n spacy_lang_mapper = json.loads(open(lang_mapper_file).read())\n detect_language = spacy_lang_mapper.get(nlp(filtered_final_str)._.language['language'])\n \n return (filtered_final_str, bi_trigrams_tup[:top_k_ngrams], detect_language)\n\ndef fetch_text_from_image(im, alphanumeric=True, perform_nlp=True, custom_stopwords=[], top_k_ngrams=10, upper_ngram=3, \n known_list_corrections=[], correction=True):\n if type(im) != np.ndarray:\n im = imread(im)\n original_text = fetch_text_from_image_base(im, alphanumeric, correction)\n if not perform_nlp:\n return original_text, None\n else:\n custom_stopwords = list(map(lambda x: x.lower(), custom_stopwords))\n corrected_text, top_ngrams, language = make_text_corrections(original_text, custom_stopwords, \n top_k_ngrams, upper_ngram, known_list_corrections)\n return original_text, (corrected_text, top_ngrams, language)\n \ndef spacy_lang_mapper_dict():\n return {\"de\": \"German\", \"el\": \"Greek\", \"en\": \"English\", \"es\": \"Spanish\", \"fr\": \"French\", \"it\": \"Italian\", \"lt\": \"Lithuanian\", \"nb\": \"Norwegian Bokm\\u00e5l\", \"nl\": \"Dutch\", \"pt\": \"Portuguese\", \"xx\": \"Multi-language\", \"af\": \"Afrikaans\", \"ar\": \"Arabic\", \"bg\": \"Bulgarian\", \"bn\": \"Bengali\", \"ca\": \"Catalan\", \"cs\": \"Czech\", \"da\": \"Danish\", \"et\": \"Estonian\", \"eu\": \"Basque\", \"fa\": \"Persian\", \"fi\": \"Finnish\", \"ga\": \"Irish\", \"he\": \"Hebrew\", \"hi\": \"Hindi\", \"hr\": \"Croatian\", \"hu\": \"Hungarian\", \"id\": \"Indonesian\", \"is\": \"Icelandic\", \"ja\": \"Japanese\", \"kn\": \"Kannada\", \"ko\": \"Korean\", \"lb\": \"Luxembourgish\", \"lv\": \"Latvian\", \"mr\": \"Marathi\", \"pl\": \"Polish\", \"ro\": \"Romanian\", \"ru\": \"Russian\", \"si\": \"Sinhala\", \"sk\": \"Slovak\", \"sl\": \"Slovenian\", \"sq\": \"Albanian\", \"sr\": \"Serbian\", \"sv\": \"Swedish\", \"ta\": \"Tamil\", \"te\": \"Telugu\", \"th\": \"Thai\", \"tl\": \"Tagalog\", \"tr\": \"Turkish\", \"tt\": \"Tatar\", \"uk\": \"Ukrainian\", \"ur\": \"Urdu\", \"vi\": \"Vietnamese\", \"yo\": \"Yoruba\", \"zh\": \"Chinese\"}","sub_path":"inference.py","file_name":"inference.py","file_ext":"py","file_size_in_byte":6336,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"288176371","text":"from rl_glue import BaseEnvironment\n\n\nclass Environment1D(BaseEnvironment):\n \"\"\"\n Example 1-Dimensional environment\n \"\"\"\n\n def __init__(self):\n \"\"\"Declare environment variables.\"\"\"\n\n # number of valid states\n self.numStates = None\n\n # state we always start in\n self.startState = None\n\n # state we are in currently\n self.currentState = None\n\n # possible actions\n self.actions = [-1, 1]\n\n def env_init(self):\n \"\"\"\n Initialize environment variables.\n \"\"\"\n\n self.numStates = 10\n self.startState = 5\n\n def env_start(self):\n \"\"\"\n The first method called when the experiment starts, called before the\n agent starts.\n\n Returns:\n The first state observation from the environment.\n \"\"\"\n self.currentState = self.startState\n return self.currentState\n\n def env_step(self, action):\n \"\"\"\n A step taken by the environment.\n\n Args:\n action: The action taken by the agent\n\n Returns:\n (float, state, Boolean): a tuple of the reward, state observation,\n and boolean indicating if it's terminal.\n \"\"\"\n\n # action = -1 for left; +1 for right\n self.currentState += self.actions[action]\n\n # This environment will give a +1 reward if the agent terminates on\n # the right, otherwise 0 reward\n if self.currentState == self.numStates:\n terminal = True\n reward = 1.0\n elif self.currentState == 1:\n terminal = True\n reward = 0.0\n else:\n terminal = False\n reward = 0.0\n\n return reward, self.currentState, terminal\n\n def env_message(self, message):\n pass\n","sub_path":"rlglue_example/environment1d.py","file_name":"environment1d.py","file_ext":"py","file_size_in_byte":1806,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"313417317","text":"n = int(input())\na = list(map(float, input().split()))\ns = 0\ncount = 0\nfor i in range(n):\n if a[i]<0:\n count += 1\n s += a[i]\n\nprint(count, s)\n","sub_path":"921.py","file_name":"921.py","file_ext":"py","file_size_in_byte":159,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"34521381","text":"class Polygon:\r\n\tdef __init__(self,banyak_sisi):\r\n\t\tself.n = banyak_sisi\r\n\t\tself.sisi = [0 for i in range(banyak_sisi)]\r\n\r\n\tdef inputSisi(self):\r\n\t\tprint(\"Masukkan panjang sisi(dalam cm):\")\r\n\t\tself.sisi=[float(input(\"Sisi\"+str(i+1)+\":\"))for i in range(self.n)]\r\n\t\tprint()\r\n\r\n\tdef dispSisi(self):\r\n\t\tfor i in range(self.n):\r\n\t\t\tprint(\"Panjang sisi\",i+1,\"adalah\",self.sisi[i],\"cm\")\r\n\t\t\tprint()\r\n\r\nclass segiEmpat(Polygon):\r\n\tdef __init__(self):\r\n\t\tPolygon.__init__(self,4)\r\n\t\tprint(\"Segiempat\")\r\n\r\n\tdef hitungKeliling(self):\r\n\t\ta,b,c,d=self.sisi\r\n\t\tk=a+b+c+d\r\n\t\tprint(\"Keliling Segiempat adalah\",k,\"cm.\")\r\n\r\npoligon=Polygon(int(input(\"Masukan banyak sisi :\")))\r\npoligon.inputSisi()\r\npoligon.dispSisi()\r\n\r\ns4=segiEmpat()\r\ns4.inputSisi()\r\ns4.dispSisi()\r\ns4.hitungKeliling()\r\n\r\n\r\n\r\n\r\n\r\n\r\n","sub_path":"P05-Class polygon.py","file_name":"P05-Class polygon.py","file_ext":"py","file_size_in_byte":783,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"552905569","text":"# SMS SPAM DETECTION USING BAG OF WORDS MODEL\r\n# BY - Omkar Sabnis - 26/06/2018\r\n\r\n# IMPORTING ALL THE MODULES\r\nimport pandas as pd\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\nimport re\r\nimport seaborn as sns\r\nfrom functools import reduce\r\nfrom collections import Counter\r\nfrom sklearn.feature_extraction.text import CountVectorizer, TfidfTransformer \r\nfrom sklearn.pipeline import Pipeline \r\nfrom sklearn.naive_bayes import MultinomialNB\r\nfrom sklearn.model_selection import train_test_split, cross_val_score\r\n\r\ndata = pd.read_csv('Data/SMSSpamCollection.txt',sep='\\t',header=None)\r\ndata.columns = ['label','message']\r\nprint('Entries in the dataset:')\r\nprint(data.head())\r\n\r\n# NUMBER OF HAM AND SPAM MESSAGES\r\nprint(data['label'].value_counts())\r\n\r\n# PREPROCESSING\r\ndef preprocess(text):\r\n output = re.findall('[A-Za-z]+', text.lower())\r\n return output\r\ndata['preprocessed'] = data.message.apply(lambda text:' '.join(preprocess(text)))\r\nprint('Processed Dataset:')\r\nprint(data.head())\r\n\r\n# VISUALIZATIONS\r\nspam_words = reduce(lambda x,y : x+\" \"+y, data[data.label == 'spam'].preprocessed)\r\nham_words = reduce(lambda x,y : x+\" \"+y, data[data.label == 'ham'].preprocessed)\r\nspam_freq = Counter(spam_words.split())\r\nham_freq = Counter(ham_words.split())\r\nsfdf = pd.DataFrame(spam_freq.most_common(), columns = ['word', 'frequency'])\r\nprint('Word Frequencies:')\r\nprint(sfdf.head())\r\n\r\n# PLOTTING THE WORDS THAT OCCUR MOSTLY IN SPAM MESSAGES\r\nfig, ax = plt.subplots(figsize = (30, 15))\r\nsfdf[:20].plot(x = 'word', y = 'frequency', kind = 'bar', width = 0.8, ax = ax, fontsize = 25)\r\nax.set_xlabel('Words')\r\nax.set_ylabel('Frequency')\r\nfor p in ax.patches:\r\n ax.annotate(format(p.get_height()),(p.get_x(),p.get_height()+1.0), fontsize = 25)\r\n#plt.show()\r\n\r\n#PLOTTING THE WORDS THAT OCCUR MOSTLY IN REAL MESSAGES\r\nhfdf = pd.DataFrame(ham_freq.most_common(), columns = ['word', 'frequency'])\r\nprint('Word Frequencies:')\r\nprint(hfdf.head())\r\nfig, ax = plt.subplots(figsize = (30, 15))\r\nhfdf[:20].plot(x = 'word', y = 'frequency', kind = 'bar', ax = ax, fontsize = 25, color = 'b')\r\nax.set_xlabel('Words')\r\nax.set_ylabel('Frequency')\r\n\r\nfor p in ax.patches:\r\n ax.annotate(format(p.get_height()), (p.get_x()-0.1, p.get_height()+1.0), fontsize = 25)\r\n\r\n#plt.show()\r\n\r\n# VISUALIZING MESSAGE LENGTHS\r\ndata['length'] = data.preprocessed.apply(len)\r\nprint('Message Lengths:')\r\nprint(data.head())\r\ndf1 = data[data['label'] == 'ham'].length\r\ndata.hist(column = 'length', by ='label', bins = 50, figsize = (11, 5))\r\n#plt.show()\r\n\r\n# BAG OF WORDS MODEL\r\nx_train, x_test, y_train, y_test = train_test_split(data.preprocessed, data.label, test_size = 0.1, random_state = 2018)\r\nprint(x_train.shape, y_test.shape)\r\nclf = Pipeline([('vect', CountVectorizer()),\r\n ('tfidf', TfidfTransformer()),\r\n ('clf', MultinomialNB())])\r\n\r\nclf.fit(x_train, y_train)\r\ncvs = cross_val_score(clf, x_train, y_train, cv = 10, verbose = 0, n_jobs = 4)\r\n\r\nprint(\"Accuracy : {} +-{}\".format(round(cvs.mean(), 2), round(cvs.std(), 2)))\r\n","sub_path":"SpamDetection.py","file_name":"SpamDetection.py","file_ext":"py","file_size_in_byte":3041,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"338606374","text":"import pickle\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport seaborn as sb\n\n\ndef tau_plots():\n results = pickle._load(open( \"tau_variations.pkl\", \"rb\" ))\n fig_tau, axs_tau = plt.subplots(4, sharex=True, sharey=True)\n\n fig_tau.set_size_inches(10, 20, forward=True)\n fig_tau.suptitle(\"Exploring the tau-parameter\", fontsize=20)\n\n for i,key in enumerate(sorted(results)):\n axs_tau[i].plot(results[key])\n axs_tau[i].set_title(key)\n axs_tau[i].set_ylabel(\"Steps\")\n\n axs_tau[-1].set_xlabel(\"Episodes\")\n fig_tau.savefig(\"tau_variations.png\")\n\ndef lambda_plots():\n results = pickle._load(open(\"lambda_variations.pkl\", \"rb\"))\n\n fig_lamb, axs_lamb = plt.subplots(3, sharex=True, sharey=True)\n\n fig_lamb.set_size_inches(10, 20, forward=True)\n fig_lamb.suptitle(\"Exploring the lambda-parameter\", fontsize=20)\n\n for i, key in enumerate(sorted(results)):\n axs_lamb[i].plot(np.mean(results[key], axis=0))\n axs_lamb[i].set_title(key)\n axs_lamb[i].set_ylabel(\"Steps\")\n\n axs_lamb[-1].set_xlabel(\"Episodes\")\n fig_lamb.savefig(\"lambda_variations.png\")\n\ndef weights_plots():\n results = pickle._load(open(\"weights_variations.pkl\", \"rb\"))\n\n fig_w, axs_w = plt.subplots(3, sharex=True, sharey=True)\n\n fig_w.set_size_inches(10, 20, forward=True)\n fig_w.suptitle(\"Exploring the weights initialization\", fontsize=20)\n\n for i, key in enumerate(results):\n axs_w[i].plot(results[key])\n axs_w[i].set_title(key)\n axs_w[i].set_ylabel(\"Steps\")\n\n axs_w[-1].set_xlabel(\"Episodes\")\n fig_w.savefig(\"weights_variations.png\")\n\ndef vector_field_plots():\n results = pickle._load(open(\"vector_fields2.pkl\", \"rb\"))\n\n fig, axs = plt.subplots(3, sharex=True, sharey=True)\n\n fig.set_size_inches(10, 20, forward=True)\n fig.suptitle(\"Exploring the vector fields\", fontsize=20)\n\n x = np.linspace(-150,30,20)\n dx = np.linspace(-15,15,20)\n u,v = np.meshgrid(x,dx)\n\n for i, key in enumerate(results):\n dummy = np.zeros((results[key][0].shape[0],results[key][0].shape[1]))\n axs[0].quiver(u, v, results[key][0], dummy)\n axs[0].set_title(\"Trial no 1\")\n\n axs[1].quiver(u, v, results[key][20], dummy)\n axs[1].set_title(\"Trial no 20\")\n\n axs[2].quiver(u, v, results[key][99], dummy)\n axs[2].set_title(\"Trial no 100\")\n\n fig.savefig(\"vector_fields.png\")\n\nif __name__ == \"__main__\":\n # lambda_plots()\n # tau_plots()\n # lambda_plots()\n # vector_field_plots()\n weights_plots()\n","sub_path":"Project 2/miniproject2/Aurlien&Paluchowski_MiniProject_2/code/curve.py","file_name":"curve.py","file_ext":"py","file_size_in_byte":2541,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"86306862","text":"#!/usr/bin/env python3\nimport random\n\nnotes = [random.randint(0,100) for i in range(35)]\nnotesModif = [0]*35\n\ncounter = 0\nfor note in notes:\n notesModif[counter] = calcNote(note)\n counter += 1\n\ndef calcNote(note):\n diff = note%5\n if diff>=3 :\n return note+1 if diff==4 else note+2\n else :\n return note\n","sub_path":"jour01/job31/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":330,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"360676378","text":"\"\"\"For .cowsay module, PepeBot wrapper for cow which says things.\n **⚠️🔞 NSFW WARNING**\n +++++CREDIT+++++++\n Code : @NeoMatrix90\n **Ported by @NeoMatrix90 (Ultra Legend)**\n Syntax : .cowsay {text} (cow will say dirty things. BE AWARE)\n \t\t .milksay {text} (A milk guy who hates cow)\n \t\t .tuxsay {text} (Find Out yourself)\n \t\t and many more, find out yourself, I iz Nub.\n\n ***#Curse WHOEVER Change this, is a Gay and will be a Motherfucker, and cannot able to produce children. And he/she is a Bhosdiwala***\n ***🔴 DON'T COPY WITHOUT CREDIT***\n \"\"\"\n\nfrom cowpy import cow\n\nfrom uniborg.util import admin_cmd, edit_or_reply\n\n\n@borg.on(admin_cmd(pattern=r\"^.(\\w+)say (.*)\", allow_sudo=True))\nasync def univsaye(event):\n \"\"\"For .cowsay module, uniborg wrapper for cow which says things.\"\"\"\n if event.text[0].isalpha() or event.text[0] in (\"/\", \"#\", \"@\", \"!\"):\n return\n\n arg = event.pattern_match.group(1).lower()\n text = event.pattern_match.group(2)\n\n if arg == \"cow\":\n arg = \"default\"\n if arg not in cow.COWACTERS:\n return await edit_or_reply(event, \"`This Character is not Supported`\")\n cheese = cow.get_cow(arg)\n cheese = cheese()\n\n await edit_or_reply(event, f\"`{cheese.milk(text).replace('`', '´')}`\")\n","sub_path":"stdplugins/cowsay.py","file_name":"cowsay.py","file_ext":"py","file_size_in_byte":1295,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"598292747","text":"#14.Find length of list\nlist1 = [\"abc\", \"pqr\", 1, 2, 3, 4, 5]\n#a\nprint(len(list1))\n\n#b\nlength = 0\nfor i in list1:\n length += 1\nprint(length)\n\n#c\ni = 1\nl = 0\nwhile i <= len(list1):\n l += 1\n i += 1\nprint(l)\n\n\n#15.Check if element exists in list\nlist2=[\"virendra\", \"sagar\", 12, 13, 14, [\"a\", \"b\", \"c\"]]\n#a\ndef element_check(a):\n if a in list2:\n return True\n else:\n return False\nprint(element_check(\"sagar\"))\n\n#b\nfor i in list2:\n if i==14:\n print(\"Present\")\n\n\n#16.Ways to clear a list\n#a\nlist3=[1, 2, 3, 4]\nlist3.clear()\nprint(list3)\n\n#b\nlist4=[1, 3, 5, 7, 9]\ndel list4[:]\nprint(list4)\n\n#c\nlist5=[5, 6, 7, 8, 9]\nfor i in range(len(list5)):\n list5.pop()\nprint(list5)\n\n#d\ndef abc(list):\n for i in range(len(list)):\n list.pop()\n\nlist6=[1,2,3,4]\na=filter(abc(list6),list6)\nprint(list(a))\n\n#e\nlist7=[\"pqr\", \"lmn\"]\nb=list(filter(lambda x: x==0, list6))\nprint(b)\n\n\n#17.Reversing a list\n#a\nl=[1, 2, 3, 4, 'a', 'b']\nl.reverse()\nprint(l)\n\n#b\nl1=l[::-1]\nprint(l1)\n\n#c\nl2=[]\nfor i in range((len(l1)-1),-1,-1):\n l2.append(l1[i])\nprint(l2)\n\n\n#18.Cloning or copying a list\n#a\nl=[\"abc\", \"pqr\", \"rst\", 12, 14]\nl2=l.copy()\nprint(l2)\n\n#b\nl3=[]\nfor i in range(len(l2)):\n l3.append(l2[i])\nprint(l3)\n\n#c\nl4=list(l3)\nprint(l4)\n\n\n#19.Count occurrences of an element in list\nl=[1, 2, 1, 1, 2, 3, 4, 5]\n#a\nn=int(input(\"Enter any number from list:\"))\nprint(f\"{n} occurs\",l.count(n),\"times in list.\")\n\n#b\nd={}\nfor i in l:\n if i in d:\n d[i]+=1\n else:\n d[i]=1\nprint(d)\n\n\n#20.Find the sum of elements in list\nl=[3, 4, 5, 5, 7, 8, 9]\n#a\nprint(sum(l))\n\n#b\nsum=0\nfor i in l:\n sum+=i\nprint(sum)","sub_path":"assign_14-20.py","file_name":"assign_14-20.py","file_ext":"py","file_size_in_byte":1636,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"470503517","text":"from Character.Throws.temp_saving_throw import *\n\nclass TestTempSavingThrow:\n\n def testBaseValues(self):\n tst = TempSavingThrow()\n assert tst.fortitude == 0\n assert tst.reflex == 0\n assert tst.will == 0\n\n def testSetValue(self):\n tst = TempSavingThrow()\n tst.will = 12\n assert tst.will == 12\n tst.fortitude = 12\n assert tst.fortitude == 12\n tst.reflex = 12\n assert tst.reflex == 12\n\n\n","sub_path":"Character/Throws/Tests/test_temp_saving_throw.py","file_name":"test_temp_saving_throw.py","file_ext":"py","file_size_in_byte":467,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"456785913","text":"\"\"\"\n\nGiven a digit string, return all possible letter combinations that the number could represent.\n\nA mapping of digit to letters (just like on the telephone buttons) is given.\n\n\"\"\"\n\nclass Solution(object):\n def letterCombinations(self, digits):\n dic={\"0\":\" \",\"1\":\"*\",\"2\":\"abc\",\"3\":\"def\",\"4\":\"ghi\",\"5\":\"jkl\",\"6\":\"mno\",\"7\":\"pqrs\",\"8\":\"tuv\",\"9\":\"wxyz\"}\n if digits==\"\": return [] \n x=list(dic[digits[0]])\n for i in range(1,len(digits)):\n x=[a+b for a in x for b in dic[digits[i]]]\n return x\n \"\"\"\n :type digits: str\n :rtype: List[str]\n \"\"\" \n\n","sub_path":"017_Letter_Combinations_of_a_Phone_Number.py","file_name":"017_Letter_Combinations_of_a_Phone_Number.py","file_ext":"py","file_size_in_byte":616,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"412749000","text":"import asyncio\nimport click\nimport json\nimport yaml as _yaml\nfrom typing import Any, cast\n\nfrom . import __version__\nfrom .client import Kong, KongError\nfrom .consumers import Consumer\nfrom .utils import local_ip\n\n\n@click.command()\n@click.option(\"--version\", is_flag=True, default=False, help=\"Display version and exit\")\n@click.option(\"--ip\", is_flag=True, default=False, help=\"Show local IP address\")\n@click.option(\n \"--key-auth\", help=\"Create or display an authentication key for a consumer\"\n)\n@click.option(\"--yaml\", type=click.File(\"r\"), help=\"Yaml configuration to upload\")\n@click.option(\n \"--clear\", default=False, is_flag=True, help=\"Clear objects not in configuration\"\n)\n@click.pass_context\ndef kong(\n ctx: click.Context,\n version: bool,\n ip: bool,\n key_auth: str,\n yaml: click.File | None,\n clear: bool,\n) -> None:\n if version:\n click.echo(__version__)\n elif ip:\n click.echo(local_ip())\n elif key_auth:\n _run(_auth_key(key_auth))\n elif yaml:\n _run(_yml(yaml, clear))\n else:\n click.echo(ctx.get_help())\n\n\ndef _run(coro: Any) -> None:\n asyncio.get_event_loop().run_until_complete(coro)\n\n\nasync def _yml(yaml: Any, clear: bool) -> None:\n async with Kong() as cli:\n try:\n result = await cli.apply_json(_yaml.safe_load(yaml), clear=clear)\n click.echo(json.dumps(result, indent=4))\n except KongError as exc:\n raise click.ClickException(str(exc))\n\n\nasync def _auth_key(consumer: str) -> None:\n async with Kong() as cli:\n try:\n c = cast(Consumer, await cli.consumers.get(consumer))\n keys = await c.keyauths.get_list()\n if keys:\n key = keys[0]\n else:\n key = await c.keyauths.create()\n click.echo(json.dumps(key.data, indent=4))\n except KongError as exc:\n raise click.ClickException(str(exc))\n","sub_path":"kong/cli.py","file_name":"cli.py","file_ext":"py","file_size_in_byte":1932,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"625863641","text":"import re\n\n\ndef common_parser(format, message):\n r = re.search(format, message)\n result = r.groupdict() if r else {}\n return result\n\n\ndef parse_message(format, message):\n return common_parser(format, message)\n\n\ndef to_number(number):\n if number:\n try:\n t_number = number.replace(' ', '')\n except AttributeError:\n t_number = number\n try:\n return int(t_number)\n except ValueError:\n try:\n return float(t_number)\n except ValueError:\n return number\n return number\n","sub_path":"application/sites/helper.py","file_name":"helper.py","file_ext":"py","file_size_in_byte":590,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"162415797","text":"import settings\nimport pandas as pd\n\nfrom dskc._util import df_to_list_w_column_idx\nfrom src.data_curation.datasets import rewards_clean\nfrom src.data_curation.pre_processing.settings import REWARD_SLOTS\nfrom src.data_curation.datasets.rewards.clean import REWARDS_N_TOPICS\n\ndef rewards_merge(df,rewards_path):\n df_rewards = pd.read_csv(rewards_path,index_col=None)\n df_rewards = rewards_clean(df_rewards)\n\n\n # initialize reward slots\n df[\"N_REWARDS\"] = 0\n for i in range(REWARD_SLOTS):\n df[\"REWARD_SLOT_{}_AMOUNT\".format(i + 1)] = 0\n #df[\"REWARD_SLOT_{}_N_PLEDGES\".format(i + 1)] = 0\n\n #df[\"REWARD_SLOT_{}_DOMINANT_TOPIC\".format(i + 1)] = 0\n #df[\"REWARD_SLOT_{}_PROB_DOMINANT_TOPIC\".format(i + 1)] = 0\n\n #for j in range(REWARDS_N_TOPICS):\n # df[\"REWARD_SLOT_{}_TOPIC_{}\".format(i+1,j + 1)] = 0\n\n # for each row\n for i in range(df.shape[0]):\n\n # get projects rewards sorted by amount\n project_rewards = df_rewards[df_rewards[\"PID\"] == df[\"PID\"][i]]\n project_rewards, c_idx, columns = df_to_list_w_column_idx(project_rewards)\n project_rewards.sort(key=lambda x: x[c_idx[\"AMOUNT\"]])\n\n # for each reward\n for j, row_reward in enumerate(project_rewards):\n if j + 1 > REWARD_SLOTS:\n break\n\n df[\"REWARD_SLOT_{}_AMOUNT\".format(j + 1)][i] = row_reward[c_idx[\"AMOUNT\"]]\n\n # add topics\n #df[\"REWARD_SLOT_{}_DOMINANT_TOPIC\".format(j + 1)][i] = row_reward[c_idx[\"title_description_dominant_topic\"]]\n #df[\"REWARD_SLOT_{}_PROB_DOMINANT_TOPIC\".format(j + 1)][i] = row_reward[c_idx[\"title_description_prob_dominant_topic\"]]\n\n #for k in range(1,REWARDS_N_TOPICS+1):\n # df[\"REWARD_SLOT_{}_TOPIC_{}\".format(j+1,k)] = row_reward[c_idx[\"title_description_topic_{}\".format(k)]]\n\n df[\"N_REWARDS\"][i] = min(len(project_rewards), REWARD_SLOTS)\n","sub_path":"src/data_curation/pre_processing/rewards.py","file_name":"rewards.py","file_ext":"py","file_size_in_byte":1930,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"385329355","text":"'''\r\ndatetime 모듈 이용 \r\n - 날짜 객체 생성\r\n시계열 데이터 생성\r\n'''\r\n\r\nimport pandas as pd\r\nfrom datetime import datetime\r\nimport numpy as np\r\n\r\n'''\r\nDatetime 주요 format 양식문자\r\n - 년도 : %Y, %y\r\n - 월, 일 : %m, %d\r\n - 시, 분, 초 : %H, %M, %S\r\n - 축약월, 월 : %b, %B\r\n - 축약요일, 요일 : %a, %A\r\n'''\r\n\r\n# 1. 다국어 날짜형 처리 \r\nfdate = ['Jul 11, 2016', 'Aug 11, 2016']\r\ncdate = [ datetime.strptime(x,'%b %d, %Y') for x in fdate ] # list + for\r\nfor c in cdate :\r\n print(c)\r\n'''\r\n2016-07-11 00:00:00\r\n2016-08-11 00:00:00\r\n'''\r\n\r\n# 2. 시계열 데이터 생성\r\ndatas = [datetime(2016,8,1),datetime(2016,8,2),\r\n datetime(2016,8,3),datetime(2016,8,4)]\r\nts = pd.Series(np.random.randn(4), index=datas)\r\nprint(ts)\r\n\r\n\r\n# 많은 data인 경우 \r\nts = pd.Series(np.random.random(1000),\r\n index=pd.date_range('2016,01,01', periods=1000))\r\nprint(ts[:5])\r\n\r\nprint(ts['2016-02-01':'2016-05-01'])\r\n\r\nprint(ts[:'2016-05-30'])\r\n\r\n\r\n# index 생성하는 방법 \r\n\r\n# (1)형식 : pd.date_range('시작일', '종료일', freq='단위')\r\n\r\n# 시간과 분 단위 \r\nindex1 = pd.date_range('2016-01-01', '2016-12-30', freq = '5h30min' )\r\nprint(index1)\r\n\r\n# 월, 주 단위 \r\nindex2 = pd.date_range('2016-01-01', '2016-12-30', freq = 'WOM-2WED' )\r\nprint(index2)\r\n\r\n# (2)형식 : pd.date_range('시작일', periods=길이, freq='단위')\r\nindex3 = pd.date_range('2016-01-01', periods=12, freq = 'M' )\r\nprint(index3)\r\n\r\nindex4 = pd.date_range('2016-01-01', periods=24, freq = 'Q' )\r\nprint(index4)\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n","sub_path":"chap18_TimeSeries/ex/step01_tsData.py","file_name":"step01_tsData.py","file_ext":"py","file_size_in_byte":1565,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"500964490","text":"#!/usr/bin/python\n# Copyright 2016 Sam Yaple\n#\n# This module is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# This software is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with this software. If not, see .\n\ndef _debug(name, obj):\n with open(\"/tmp/debug-%s\" % name, \"w\") as f:\n f.write(str(obj) + \"\\n\")\n\ntry:\n import shade\n HAS_SHADE = True\nexcept ImportError:\n HAS_SHADE = False\n\nfrom distutils.version import StrictVersion\n\nDOCUMENTATION = '''\n---\nmodule: os_keystone_service\nshort_description: Manage OpenStack Identity services\nextends_documentation_fragment: openstack\nauthor: \"Sam Yaple (@SamYaple)\"\nversion_added: \"2.2\"\ndescription:\n - Create, update, or delete OpenStack Identity service. If a service\n with the supplied name already exists, it will be updated with the\n new description and enabled attributes.\noptions:\n name:\n description:\n - Name of the service\n required: true\n description:\n description:\n - Description of the service\n required: false\n default: None\n enabled:\n description:\n - Is the service enabled\n required: false\n default: True\n service_type:\n description:\n - The type of service\n required: true\n state:\n description:\n - Should the resource be present or absent.\n choices: [present, absent]\n default: present\nrequirements:\n - \"python >= 2.6\"\n - \"shade\"\n'''\n\nEXAMPLES = '''\n# Create a service for glance\n- os_keystone_service:\n cloud: mycloud\n state: present\n name: glance\n service_type: image\n description: OpenStack Image Service\n# Delete a service\n- os_keystone_service:\n cloud: mycloud\n state: absent\n name: glance\n service_type: image\n'''\n\nRETURN = '''\nservice:\n description: Dictionary describing the service.\n returned: On success when I(state) is 'present'\n type: dictionary\n contains:\n id:\n description: Service ID.\n type: string\n sample: \"3292f020780b4d5baf27ff7e1d224c44\"\n name:\n description: Service name.\n type: string\n sample: \"glance\"\n service_type:\n description: Service type.\n type: string\n sample: \"image\"\n description:\n description: Service description.\n type: string\n sample: \"OpenStack Image Service\"\n enabled:\n description: Service status.\n type: boolean\n sample: True\nid:\n description: The service ID.\n returned: On success when I(state) is 'present'\n type: string\n sample: \"3292f020780b4d5baf27ff7e1d224c44\"\n'''\n\n\ndef _needs_update(module, service):\n if service.enabled != module.params['enabled']:\n return True\n if service.description is not None and \\\n service.description != module.params['description']:\n return True\n return False\n\n\ndef _system_state_change(module, service):\n state = module.params['state']\n if state == 'absent' and service:\n return True\n\n if state == 'present':\n if service is None:\n return True\n return _needs_update(module, service)\n\n return False\n\ndef _is_v2(cloud):\n return cloud.cloud_config.get_api_version('identity').startswith('2')\n\ndef _mk_service_tuples(module):\n result = {}\n region = module.params['region']\n enabled = module.params['enabled']\n\n public_endpoint = module.params['public_endpoint']\n admin_endpoint = module.params['admin_endpoint']\n internal_endpoint = module.params['internal_endpoint']\n\n if public_endpoint:\n result[(region, 'public')] = (public_endpoint, enabled)\n if admin_endpoint:\n result[(region, 'admin')] = (admin_endpoint, enabled)\n if internal_endpoint:\n result[(region, 'internal')] = (internal_endpoint, enabled)\n\n return result\n\ndef _service_to_tuple(service):\n return((service['region'], service['interface']))\n\ndef _get_service_ids(endpoints, ep):\n return [e['id'] for e in endpoints if ep['region'] == e['region'] and\n ep['interface'] == e['interface']]\n\ndef _conv_v2_ep_to_v3(endpoints):\n result = list()\n for ep in endpoints:\n for iface in ('public', 'internal', 'admin'):\n entry = dict()\n try:\n url = ep[\"%surl\" % iface]\n except KeyError:\n url = None\n if url:\n entry['id'] = ep['id']\n entry['region'] = ep['region']\n entry['url'] = url\n entry['enabled'] = ep['enabled']\n entry['interface'] = iface\n entry['service_id'] = ep['service_id']\n result.append(entry)\n return result\n\ndef _get_endpoints(cloud, service):\n all_ep = cloud.list_endpoints()\n endpoints = [e for e in all_ep if e['service_id'] == service['id']]\n if _is_v2(cloud):\n endpoints = _conv_v2_ep_to_v3(endpoints)\n current = {_service_to_tuple(e):\n (e['url'], e['enabled'], _get_service_ids(endpoints, e))\n for e in endpoints}\n return (endpoints, current)\n\ndef _update_endpoints(cloud, module, service):\n (endpoints, current) = _get_endpoints(cloud, service)\n requested = _mk_service_tuples(module)\n\n add_ep = set(requested.keys()) - set(current.keys())\n del_ep = set(current.keys()) - set(requested.keys())\n mod_ep = set(current.keys()).intersection(set(requested.keys()))\n mod_ep = [ep for ep in mod_ep if current[ep][0] != requested[ep][0] or\n current[ep][1] != requested[ep][1]]\n\n _debug(\"add_ep\", add_ep)\n _debug(\"del_ep\", del_ep)\n _debug(\"mod_ep\", mod_ep)\n _debug(\"requested\", requested)\n\n changed = _del_duplicate_endpoints(cloud, current)\n _debug(\"1\", changed)\n changed = _del_endpoints(cloud, del_ep, current) | changed\n _debug(\"2\", changed)\n changed = _del_endpoints(cloud, mod_ep, current) | changed\n _debug(\"3\", changed)\n changed = _add_endpoints(cloud, add_ep, requested, service, module) | changed\n _debug(\"4\", changed)\n changed = _add_endpoints(cloud, mod_ep, requested, service, module) | changed\n _debug(\"5\", changed)\n (endpoints, current) = _get_endpoints(cloud, service)\n changed = _del_duplicate_endpoints(cloud, current) | changed\n _debug(\"6\", changed)\n\n return changed\n\ndef _del_duplicate_endpoints(cloud, endpoints):\n changed = False\n for ep in endpoints:\n for dupe in endpoints[ep][2][1:]:\n cloud.delete_endpoint(dupe)\n changed = True\n return changed\n\ndef _del_endpoints(cloud, del_ep, current):\n changed = False\n for ep in del_ep:\n service_id = current[ep][2][0]\n cloud.delete_endpoint(service_id)\n changed = True\n return changed\n\ndef _extract_endpoint(requested, add_ep, ep_type):\n key = filter(lambda x: x[1] == ep_type, add_ep)\n if key:\n try:\n endpoint_tuple = requested[key[0]]\n except KeyError:\n return None\n if endpoint_tuple[1] == None:\n return None\n else:\n return endpoint_tuple[0]\n else:\n return None\n\ndef _add_endpoints(cloud, add_ep, requested, service, module):\n region = module.params['region']\n public_endpoint = _extract_endpoint(requested, add_ep, 'public')\n internal_endpoint = _extract_endpoint(requested, add_ep, 'internal')\n admin_endpoint = _extract_endpoint(requested, add_ep, 'admin')\n cloud.create_endpoint(service, public_url=public_endpoint,\n internal_url=internal_endpoint,\n admin_url=admin_endpoint, region=region)\n\n return (bool(public_endpoint) |\n bool(internal_endpoint) |\n bool(admin_endpoint))\n\ndef main():\n argument_spec = openstack_full_argument_spec(\n description=dict(default=None),\n enabled=dict(default=True, type='bool'),\n name=dict(required=True),\n service_type=dict(required=True),\n public_endpoint=dict(default=None),\n admin_endpoint=dict(default=None),\n internal_endpoint=dict(default=None),\n region=dict(default=None),\n state=dict(default='present', choices=['absent', 'present']),\n )\n\n module_kwargs = openstack_module_kwargs()\n module = AnsibleModule(argument_spec,\n supports_check_mode=True,\n **module_kwargs)\n\n if not HAS_SHADE:\n module.fail_json(msg='shade is required for this module')\n if StrictVersion(shade.__version__) < StrictVersion('1.6.0'):\n module.fail_json(msg=\"To utilize this module, the installed version of\"\n \"the shade library MUST be >=1.6.0\")\n\n description = module.params['description']\n enabled = module.params['enabled']\n name = module.params['name']\n state = module.params['state']\n service_type = module.params['service_type']\n public_endpoint = module.params['public_endpoint']\n admin_endpoint = module.params['admin_endpoint']\n internal_endpoint = module.params['internal_endpoint']\n region = module.params['region']\n\n try:\n cloud = shade.operator_cloud(**module.params)\n\n services = cloud.search_services(name_or_id=name,\n filters=dict(type=service_type))\n\n if len(services) > 1:\n module.fail_json(msg='Service name %s and type %s are not unique' %\n (name, service_type))\n elif len(services) == 1:\n service = services[0]\n else:\n service = None\n\n if module.check_mode:\n module.exit_json(changed=_system_state_change(module, service))\n\n if state == 'present':\n if service is None:\n service = cloud.create_service(name=name,\n description=description, type=service_type, enabled=True)\n changed = True\n else:\n if _needs_update(module, service):\n service = cloud.update_service(\n service.id, name=name, type=service_type, enabled=enabled,\n description=description)\n changed = True\n else:\n changed = False\n changed = _update_endpoints(cloud, module, service) | changed\n module.exit_json(changed=changed, service=service, id=service.id)\n\n elif state == 'absent':\n if service is None:\n changed=False\n else:\n cloud.delete_service(service.id)\n changed=True\n module.exit_json(changed=changed)\n\n except shade.OpenStackCloudException as e:\n module.fail_json(msg=str(e))\n\n\nfrom ansible.module_utils.basic import *\nfrom ansible.module_utils.openstack import *\nif __name__ == '__main__':\n main()\n","sub_path":"provisioning/library/os_keystone_service.py","file_name":"os_keystone_service.py","file_ext":"py","file_size_in_byte":11381,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"42398199","text":"import sqlite3 \nconn = sqlite3.connect('student.db')\n\nc = conn.cursor()\nsql = \"\"\"\n CREATE TABLE students (\n id INTEGER PRIMARY KEY,\n name TEXT NOT NULL,\n intro TEXT,\n avatar TEXT\n )\n\"\"\"\nc.execute(sql)\n\nconn.commit()\nconn.close()","sub_path":"etc/ex1.py","file_name":"ex1.py","file_ext":"py","file_size_in_byte":243,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"436174793","text":"# coding=utf-8\n# --------------------------------------------------------------------------\n# Copyright (c) Microsoft Corporation. All rights reserved.\n# Licensed under the MIT License. See License.txt in the project root for\n# license information.\n#\n# Code generated by Microsoft (R) AutoRest Code Generator.\n# Changes may cause incorrect behavior and will be lost if the code is\n# regenerated.\n# --------------------------------------------------------------------------\n\nfrom msrest.serialization import Model\n\n\nclass ApplicationGatewayRewriteRule(Model):\n \"\"\"Rewrite rule of an application gateway.\n\n :param name: Name of the rewrite rule that is unique within an Application\n Gateway.\n :type name: str\n :param rule_sequence: Rule Sequence of the rewrite rule that determines\n the order of execution of a particular rule in a RewriteRuleSet.\n :type rule_sequence: int\n :param conditions: Conditions based on which the action set execution will\n be evaluated.\n :type conditions:\n list[~azure.mgmt.network.v2019_02_01.models.ApplicationGatewayRewriteRuleCondition]\n :param action_set: Set of actions to be done as part of the rewrite Rule.\n :type action_set:\n ~azure.mgmt.network.v2019_02_01.models.ApplicationGatewayRewriteRuleActionSet\n \"\"\"\n\n _attribute_map = {\n 'name': {'key': 'name', 'type': 'str'},\n 'rule_sequence': {'key': 'ruleSequence', 'type': 'int'},\n 'conditions': {'key': 'conditions', 'type': '[ApplicationGatewayRewriteRuleCondition]'},\n 'action_set': {'key': 'actionSet', 'type': 'ApplicationGatewayRewriteRuleActionSet'},\n }\n\n def __init__(self, *, name: str=None, rule_sequence: int=None, conditions=None, action_set=None, **kwargs) -> None:\n super(ApplicationGatewayRewriteRule, self).__init__(**kwargs)\n self.name = name\n self.rule_sequence = rule_sequence\n self.conditions = conditions\n self.action_set = action_set\n","sub_path":"azure-mgmt-network/azure/mgmt/network/v2019_02_01/models/application_gateway_rewrite_rule_py3.py","file_name":"application_gateway_rewrite_rule_py3.py","file_ext":"py","file_size_in_byte":1962,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"25174939","text":"from Scripts.LogParser import *\nfrom Scripts.UserLogIO import *\nfrom Scripts.LogAnalyser import *\n\n\n\n\n\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nfrom matplotlib.ticker import NullFormatter # useful for `logit` scale\nfrom matplotlib.legend_handler import HandlerLine2D\n\nimport pandas as pd\nimport numpy as np\n\n\n\n#required class objects\nlogIO = UserLogIO()\nlogParser = LogParser()\n\n\n#load log file\nrawLog = logIO.loadLog('../Datasets/Pilot_User_Study/user2_study1.log')\n\n\n#log analyser class object\nlogAnalyser = LogAnalyser(rawLog)\n\n\n\n\n\nchatCount = logAnalyser.get_P2P_communication_count()\nprint('Chat Counts => ' + str(chatCount))\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\nplt.figure(1)\n\n\nm_turn = [1 ,6 ,2 ,7 ,2 ,8 ,9 ]\nm_attr = [1 ,8 ,2 ,6 ,3 ,8 ,9 ]\n\n\np_turn = [3 ,2 ,3 ,10 ,5 ,9 ,9 ]\np_attr = [2 ,10 ,9 ,2 ,4 ,3 , 2 ]\n\n\nt_turn = [1 ,4 ,2 ,10 ,4 ,7 ,3]\nt_attr = [1 ,4 ,2 ,2 ,1 ,1 ,2 ]\n\n\npr_turn = [5 ,6 ,3 ,8 ,8 ,8 ,8 ,8]\npr_attr = [4 ,5 ,1 ,7 ,3 ,7 ,2 ,2]\n\n\ne_turn = [3 ,7 ,6 ,10 ,6 ,8 ,4 ,8]\ne_attr = [5 ,10 ,9 ,10 ,3 ,9 ,9 ,7]\n\n\nf_turn = [2 ,4 ,3 ,8 ,7 ,7 ,2 ,3]\nf_attr = [3 ,3 ,2 ,1 ,3 ,3 ,2 ,8]\n\n\n\n\n\nbp = plt.boxplot((m_turn,m_attr, p_turn,p_attr, t_turn,t_attr, pr_turn,pr_attr, e_turn,e_attr, f_turn,f_attr), labels=['Mental (Turn)','Mental (Attr.)', 'Physical (Turn)','Physical (Attr.)', 'Temporal (Turn)','Temporal (Attr.)', 'Performance (Turn)','Performance (Attr.)', 'Effort (Turn)','Effort (Attr.)', 'Frustration (Turn)','Frustration (Attr.)'], patch_artist=True)\n\nbp['boxes'][0].set( facecolor = '#777777')\nbp['boxes'][1].set( facecolor = '#CCCCCC')\n\nbp['boxes'][2].set( facecolor = '#777777')\nbp['boxes'][3].set( facecolor = '#CCCCCC')\n\nbp['boxes'][4].set( facecolor = '#777777')\nbp['boxes'][5].set( facecolor = '#CCCCCC')\n\nbp['boxes'][6].set( facecolor = '#777777')\nbp['boxes'][7].set( facecolor = '#CCCCCC')\n\nbp['boxes'][8].set( facecolor = '#777777')\nbp['boxes'][9].set( facecolor = '#CCCCCC')\n\nbp['boxes'][10].set( facecolor = '#777777')\nbp['boxes'][11].set( facecolor = '#CCCCCC')\n\n\n\n\n#bp['medians'][0].set( color = '#FFFFFF')\n\nplt.xlabel('Assessment Dimensions', fontsize=18)\nplt.ylabel('NASA-TLX Scale', fontsize=18)\n\nplt.xticks(rotation=25)\n\n#plt.xticks(x, ['mental'])\n\n\n\n\n# plt.plot(x, [20,26,27, 40, 50], marker='.', label='W. Update')\n# plt.plot([1,2,3,4,5], [19,4,30, 31, 12], marker='.', label='Comms.')\n#\n# plt.grid(True)\nplt.title('NASA-TLX Workload', fontsize=18)\n# plt.xlabel('Collaborator', fontsize=18)\n# plt.ylabel('Frequency', fontsize=18)\n# plt.legend(loc=\"upper left\", fontsize=14)\n\n\n\n\n\n\n\n\nplt.show()\n","sub_path":"Scripts/GraphPlot_NASA_TLX_workflow_composition_pattern.py","file_name":"GraphPlot_NASA_TLX_workflow_composition_pattern.py","file_ext":"py","file_size_in_byte":2565,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"554115890","text":"from unit6_to_9 import *\nimport io\n\ndef find_primes_test():\n limit = 10\n expexted = [2, 3, 5, 7]\n result = find_primes(limit)\n assert expexted == result\n\n\ndef test_letter_count_alice():\n filename = \"alice.txt\"\n expected = [9083, 1621, 2817, 5228, 15085, 2248, 2751, 7581, 7803, 222, 1202, 5053, 2245, 7871, 9244, 1796, 135, 6400, 6981, 11631, 3867, 911, 2696, 170, 2442, 79]\n actual = letter_count(filename)\n assert actual == expected\n\n\ndef test_letter_count_not_file():\n try:\n letter_count(\"fakefile.txt\")\n assert True\n except FileNotFoundError:\n print(\"Exception no caught\")\n assert False\n\n\ndef test_reverse_string():\n string = 'hello'\n expected = 'olleh'\n actual = reverse_string_recursive(string)\n assert expected == actual\n\n\ndef test_reverse_string_empty():\n string = ''\n exepected = ''\n actual = reverse_string_recursive(string)\n assert exepected == actual\n\n\ndef test_add(capsys, monkeypatch):\n pass\n","sub_path":"unit6_to_9_test.py","file_name":"unit6_to_9_test.py","file_ext":"py","file_size_in_byte":991,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"234169680","text":"#Work or sleep in?\n#Prompt the user for a day of the week just like the previous problem. \n#But this time, print \"Go to work\" if it's a \n#a work day and \"Sleep in\" if it's a weekend day. \n\nday = int(input('Day (0-6)? '))\nif day == 0 or day == 6:\n print(\"Sleep in!\")\nelse:\n print(\"Go to work!\")","sub_path":"Py1_exercises_small/s5_work_or_sleep_in.py","file_name":"s5_work_or_sleep_in.py","file_ext":"py","file_size_in_byte":299,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"84966541","text":"\"\"\"\nVFS Demo App\nCopyright (C) 2014 Kibble Games Inc. In cooperation with Vancouver Film School All Rights Reserved. \n\n\"\"\"\nimport os\nimport sys\nimport logging\nimport webapp2\n\nimport json\n\nfrom google.appengine.ext.webapp import util\nfrom google.appengine.ext.webapp import template\n\n\n\"\"\"\nPageController manages JSON translation and message dispatch command handling\n\nAll pages with require AJAX handling should subclass this Class\n\n\"\"\"\nclass PageController( webapp2.RequestHandler ):\n \n CORSAccessAllowed = False\n\n \"\"\"\n @function post\n \n This is the core of responding to AJAX requests.\n The request is expecting a 'cmd' parameter identifying a command to process (other parameters are considered \n application specific\n \n If the command is valid (i.e the child class had a defined function 'do_' then a function pointer\n is generated pointing to the 'do_' and its executed.\n \n The executed command can call either \n \n \"\"\"\n def post(self):\n \"\"\" \n Process the passed instruction and respond\n\n \"\"\" \n # Look for the command argument\n if self.request.params['cmd'] == '':\n # missing argument(s)\n logging.warning('PageController.post() missing command argument.')\n self.send_json_response( {'returnCode': 10} )\n return\n \n cmd = self.request.params['cmd']\n logging.debug('PageController: command['+cmd+'] called.')\n \n # process the command\n command_handler_name = 'do_' + cmd\n if hasattr(self, command_handler_name):\n command_handler = getattr(self, command_handler_name)\n command_handler( self.request.params )\n else:\n self.error(cmd, 11)\n \n return\n\n \"\"\"\n Helper methods to render templates to either strings or directly back to the calling client\n \n \"\"\"\n def render_template(self, htmlTemplate, tValues):\n \n path = os.path.join( os.path.dirname(__file__), htmlTemplate )\n markup = template.render( path, tValues )\n return markup\n\n \n def render_json(self, data):\n \n jsonMarkup = json.dumps( data )\n return jsonMarkup\n \n\n def send_template(self, htmlTemplate, tValues ):\n \"\"\" \n if (self.CORSAccessAllowed):\n self.response.headers.add_header(\"Access-Control-Allow-Origin\", \"*\")\n \"\"\"\n self.response.write( self.render_template( htmlTemplate, tValues ) )\n return\n \n\n def send_json( self, data ):\n \"\"\" \n if (self.CORSAccessAllowed):\n self.response.headers.add_header(\"Access-Control-Allow-Origin\", \"*\")\n \"\"\"\n self.response.headers['Content-Type'] = 'application/json'\n self.response.write( self.render_json( data ) )\n return\n \n \n def send( self, data, asJSON ):\n if (self.CORSAccessAllowed):\n self.response.headers.add_header(\"Access-Control-Allow-Origin\", \"*\")\n \n # assumes data is a rendered template \n responseData = data \n \n # if its a dictionary, reformat as JSON data\n if (asJSON == True):\n self.response.headers['Content-Type'] = 'application/json'\n responseData = self.render_json( data )\n \n self.response.write( responseData ) \n return\n \n \"\"\"\n def options(self):\n \n if (self.CORSAccessAllowed):\n self.response.headers['Access-Control-Allow-Origin'] = '*'\n self.response.headers['Access-Control-Allow-Headers'] = 'Origin, X-Requested-With, Content-Type, Accept'\n self.response.headers['Access-Control-Allow-Methods'] = 'POST, GET, PUT, DELETE'\n \"\"\"\n\n\n\n\n\n\n\n\n\n","sub_path":"HTML5/WebApps/Risk_FeSer/app/views/page_controller.py","file_name":"page_controller.py","file_ext":"py","file_size_in_byte":3825,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"36317299","text":"## heap_queue.py\r\n## Implementation of heap classes: MinHeap and MaxHeap.\r\n## The class creates a copy of input list, and heapify it.\r\n## Original input list is not modified (for read-only).\r\n\r\n## Input: list ( int's, float's, char's)\r\n## 2 methods to add data:\r\n## 1. Passing a list in object declaration. Or,\r\n## 2. Adding each element into the heap by using heappush( val).\r\n\r\n\r\n\r\n\r\nclass MinHeap:\r\n\r\n def __init__(self, list_input = []):\r\n ## Constructor of MinHeap. Takes list as input, then heapify it.\r\n ## Retuns None. \r\n\r\n self.data = list_input;\r\n self.heapify();\r\n\r\n return;\r\n \r\n def heapify(self):\r\n # Using heappush() to populate the heap list.\r\n # Returns None.\r\n\r\n temp = self.data.copy();\r\n self.data = [];\r\n\r\n for n in temp:\r\n self.heappush(n);\r\n \r\n #print(\"HEAP: \", self.data);\r\n return; \r\n\r\n def sorted(self):\r\n ## Returns a copy of sorted heap list.\r\n ## This function doesn't modify the heap list/structure.\r\n\r\n ans = [];\r\n temp = self.data.copy(); # Create duplicate copy of heaplist, for restoration later.\r\n\r\n while self.data:\r\n ans.append( self.heappop());\r\n \r\n self.data = temp; # Restores back the heaplist.\r\n\r\n return ans;\r\n\r\n def parent(self, i):\r\n # Returns the parent of index i.\r\n return (i-1)//2;\r\n\r\n def children(self, i):\r\n # Returns the children indexes of index i.\r\n return (i<<1) + 1, (i<<1) + 2 ;\r\n \r\n def move_up(self, i_start):\r\n # Moves up last element to the upper heap chain.\r\n # This modifies the heap list data.\r\n # The function uses recursive calls to move the element.\r\n # Returns None.\r\n\r\n i = i_start;\r\n i_parent = self.parent(i);\r\n\r\n if i <= 0: return;\r\n\r\n if self.data[i] < self.data[i_parent]:\r\n self.data[i], self.data[i_parent] = self.data[i_parent], self.data[i];\r\n self.move_up(i_parent);\r\n\r\n return; \r\n\r\n def move_down(self, i_start = 0):\r\n # Moves down top element to the lower heap chain.\r\n # This modifies the heap list data.\r\n # The function uses recursive calls to move the element.\r\n # Returns None.\r\n\r\n i = i_start;\r\n max_size = len(self.data);\r\n\r\n if i >= max_size: return;\r\n\r\n l, r = self.children(i);\r\n\r\n if r < max_size:\r\n\r\n if self.data[l] < self.data[r]:\r\n\r\n if self.data[l] < self.data[i]:\r\n self.data[i], self.data[l] = self.data[l], self.data[i];\r\n self.move_down(l);\r\n\r\n else:\r\n\r\n if self.data[r] < self.data[i]:\r\n self.data[i], self.data[r] = self.data[r], self.data[i];\r\n self.move_down(r);\r\n \r\n elif l < max_size:\r\n\r\n if self.data[l] < self.data[i]:\r\n self.data[i], self.data[l] = self.data[l], self.data[i];\r\n self.move_down(l);\r\n \r\n return;\r\n \r\n def heappop(self):\r\n # Returns the top element of the heap, then heapify the list.\r\n # If heap list is empty, returns None.\r\n\r\n if not self.data: return None;\r\n elif len(self.data) == 1: return self.data.pop();\r\n\r\n ans = self.data[0];\r\n self.data[0] = self.data.pop();\r\n self.move_down();\r\n\r\n return ans;\r\n\r\n def heappush(self, x):\r\n # Adds a new element at bottom of heap list, then heapify the list.\r\n # Returns None.\r\n \r\n self.data.append(x);\r\n self.move_up(len(self.data)-1);\r\n\r\n\r\n## MaxHeap class inherited from MinHeap class.\r\n## The move_up and move_down methods are re-defined for MaxHeap.\r\n \r\nclass MaxHeap(MinHeap):\r\n\r\n def __init__(self, list_input = []):\r\n ## Constructor of MaxHeap. Inherit from MinHeap class.\r\n ## Takes list as input, then heapify it.\r\n ## Retuns None. \r\n\r\n MinHeap.__init__(self, list_input); \r\n return;\r\n \r\n def move_up(self, i_start):\r\n # Moves up last element to the upper heap chain.\r\n # This modifies the heap list data.\r\n # The function uses recursive calls to move the element.\r\n # Returns None.\r\n\r\n i = i_start;\r\n i_parent = self.parent(i);\r\n\r\n if i <= 0: return;\r\n\r\n if self.data[i] > self.data[i_parent]:\r\n self.data[i], self.data[i_parent] = self.data[i_parent], self.data[i];\r\n self.move_up(i_parent);\r\n\r\n return; \r\n\r\n def move_down(self, i_start = 0):\r\n # Moves down top element to the lower heap chain.\r\n # This modifies the heap list data.\r\n # The function uses recursive calls to move the element.\r\n # Returns None.\r\n \r\n i = i_start;\r\n max_size = len(self.data);\r\n\r\n if i >= max_size: return;\r\n\r\n l, r = self.children(i);\r\n\r\n if r < max_size:\r\n\r\n if self.data[l] > self.data[r]:\r\n\r\n if self.data[l] > self.data[i]:\r\n self.data[i], self.data[l] = self.data[l], self.data[i];\r\n self.move_down(l);\r\n\r\n else:\r\n\r\n if self.data[r] > self.data[i]:\r\n self.data[i], self.data[r] = self.data[r], self.data[i];\r\n self.move_down(r);\r\n \r\n elif l < max_size:\r\n\r\n if self.data[l] > self.data[i]:\r\n self.data[i], self.data[l] = self.data[l], self.data[i];\r\n self.move_down(l);\r\n \r\n return;\r\n","sub_path":"heap_queue.py","file_name":"heap_queue.py","file_ext":"py","file_size_in_byte":5720,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"441425404","text":"import sys\nimport numpy as np\nimport copy\nif __name__ == \"__main__\":\n\tfilename = sys.argv[1]\n\tif len(sys.argv) == 3:\n\t\tp = float(sys.argv[2])\n\telse:\n\t\tp = 1.0\n\twith open(filename) as f:\n\t\ta = np.array(f.readlines())\n\t\tarray = np.array([b.split(' ') for b in a]).astype(int)\n\t# print(array)\n\tcounterarray = np.zeros(array.shape, dtype = int)\n\tcounter = 0\n\tendstate = []\n\tfor i in range(array.shape[0]):\n\t\tfor j in range(array.shape[1]):\n\t\t\telement = array[i][j]\n\t\t\tif element == 0:\n\t\t\t\tcounterarray[i,j] = counter\n\t\t\t\tcounter+=1\n\t\t\telif element == 2:\n\t\t\t\tcounterarray[i,j] = startstate = counter\n\t\t\t\tcounter+=1\n\t\t\telif element == 3:\n\t\t\t\tcounterarray[i,j] = counter\t\n\t\t\t\tendstate.append(counter)\n\t\t\t\tcounter+=1\n\tnumstates = counter\n\tprint(\"numStates\" , numstates)\n\tprint(\"numActions\" , 4)\n\tprint(\"start\" , startstate)\n\tprint(\"end\" , )\n\tfor i in endstate:\n\t\tprint(i ,)\n\tprint()\n\t# print(counterarray)\n\t# print(startstate)\n\t# print(endstate)\n\tnumstates = counter\n\tdictionary = {}\n\tfor i in range(array.shape[0]):\n\t\tfor j in range(array.shape[1]):\n\t\t\tif array[i][j] == 2 or array[i][j] == 0:\n\t\t\t\tleft = max(j-1,0)\n\t\t\t\tright = min(j+1,array.shape[1] - 1)\n\t\t\t\tupper = max(i-1,0)\n\t\t\t\tlower = min(i+1, array.shape[0] - 1)\n\t\t\t\t# possible_moves = np.array([array[i,left], array[i,right] , array[upper,j] , array[lower, j]])\n\t\t\t\t# possible_moves = (possible_moves != 1)\n\t\t\t\t# denominator = sum(possible_moves)\n\t\t\t\t# actual_moves = [counterarray[i,j], counterarray[i,j], counterarray[i,j], counterarray[i,j]]\n\t\t\t\t# for k in range(4):\n\t\t\t\t# \tif possible_moves[k]:\n\t\t\t\t# \t\tif k == 0:\n\t\t\t\t# \t\t\tactual_moves[k] = counterarray[i,left]\n\t\t\t\t# \t\telif k == 1:\n\t\t\t\t# \t\t\tactual_moves[k] = counterarray[i, right]\n\t\t\t\t# \t\telif k == 2:\n\t\t\t\t# \t\t\tactual_moves[k] = counterarray[upper, j]\n\t\t\t\t# \t\telif k == 3:\n\t\t\t\t# \t\t\tactual_moves[k] = counterarray[lower,j]\n\t\t\t\t# for action1 in range(4):\n\t\t\t\t# \tif possible_moves[action1]:\n\t\t\t\t# \t\tdenominator = sum(possible_moves)\n\t\t\t\t# \t\tactual_prob = p + (1 - p)/denominator\n\t\t\t\t# \t\tresidual_prob = (1 - p)/denominator\n\t\t\t\t# \t\tfor action2 in range(4):\n\t\t\t\t# \t\t\tif possible_moves[action2]:\n\t\t\t\t# \t\t\t\tif (action2 == action1):\n\t\t\t\t# \t\t\t\t\tprint \"transition\" ,counterarray[i,j],action1, actual_moves[action2],-1,actual_prob\n\t\t\t\t# \t\t\t\telse:\n\t\t\t\t# \t\t\t\t\tprint \"transition\" ,counterarray[i,j],action1, actual_moves[action2],-1,residual_prob\n\t\t\t\t# \telse:\n\t\t\t\t# \t\tprint \"transition\" ,counterarray[i,j],action1, counterarray[i,j],-1000,1.0\n\n\t\t\t\tif array[i, left]!= 1:\n\t\t\t\t\tprint(\"transition\" ,counterarray[i,j],0, counterarray[i,left],-1,1)\n\t\t\t\telse:\n\t\t\t\t\tprint(\"transition\" ,counterarray[i,j],0, counterarray[i,j],-100000,1)\n\n\t\t\t\tif array[i, right]!= 1:\n\t\t\t\t\tprint(\"transition\" ,counterarray[i,j],1, counterarray[i,right],-1,1)\t\t\t\t\n\t\t\t\telse:\n\t\t\t\t\tprint(\"transition\" ,counterarray[i,j],1, counterarray[i,j],-100000,1)\n\n\t\t\t\tif array[upper, j]!= 1:\n\t\t\t\t\tprint(\"transition\" ,counterarray[i,j],2, counterarray[upper,j],-1,1)\t\t\t\t\n\t\t\t\telse:\n\t\t\t\t\tprint(\"transition\" ,counterarray[i,j],2, counterarray[i,j],-100000,1)\n\n\t\t\t\tif array[lower, j]!= 1:\n\t\t\t\t\tprint(\"transition\" ,counterarray[i,j],3, counterarray[lower,j],-1,1)\t\t\t\t\n\t\t\t\telse:\n\t\t\t\t\tprint(\"transition\" ,counterarray[i,j],3, counterarray[i,j],-100000,1)\n\n\t# print(dictionary)\n\t\t\n\tprint(\"discount\" , \"\", 1.0)","sub_path":"MDP/encoder.py","file_name":"encoder.py","file_ext":"py","file_size_in_byte":3251,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"510983019","text":"#!/usr/bin/python\n# -*- coding: utf-8 -*-\n#\n# Copyright (C) 2005 onwards University of Deusto\n# All rights reserved.\n#\n# This software is licensed as described in the file COPYING, which\n# you should have received as part of this distribution.\n#\n# This software consists of contributions made by many individuals,\n# listed below:\n#\n# Author: Pablo Orduña \n#\nimport sys\nimport voodoo.log as log\nfrom functools import wraps\n\nimport weblab.configuration_doc as configuration_doc\nimport weblab.data.dto.experiments as Experiment\nfrom weblab.data.experiments import ExperimentId\n\ntry:\n import ZSI\nexcept ImportError:\n ZSI_AVAILABLE = False\nelse:\n ZSI_AVAILABLE = True\n\nimport SimpleXMLRPCServer\nimport datetime\nimport traceback\n\nimport weblab.comm.context as RemoteFacadeContext\nimport weblab.comm.codes as RemoteFacadeManagerCodes\n\nUNEXPECTED_ERROR_MESSAGE_TEMPLATE = \"Unexpected error ocurred in WebLab-Deusto. Please contact the administrator at %s\"\nSERVER_ADMIN_EMAIL = 'server_admin'\nDEFAULT_SERVER_ADMIN_EMAIL = ''\n\n\ndef check_exceptions(exceptions_to_check):\n for i, (exc, _, _) in enumerate(exceptions_to_check):\n for exc2, _, _ in exceptions_to_check[i + 1:]:\n if issubclass(exc2, exc):\n raise AssertionError(\"In Facade Exceptions the order is important. There can't be any exception that is a subclass of a previous exception. In this case %s is before %s\" % (exc, exc2))\n\n def real_check_exceptions(func):\n @wraps(func)\n def wrapper(self, *args, **kwargs):\n try:\n return func(self, *args, **kwargs)\n except Exception as e:\n for exc, code, propagate in exceptions_to_check:\n if issubclass(e.__class__, exc):\n if propagate or self._cfg_manager.get_doc_value(configuration_doc.DEBUG_MODE):\n log.log(\n self.__class__,\n log.level.Info,\n \"%s raised on %s: %s: %s\" % ( exc.__name__, func.__name__, e, e.args)\n )\n log.log_exc(self.__class__, log.level.Debug)\n return self._raise_exception(code, e.args[0])\n else:\n # WebLabInternalServerError\n log.log(\n self.__class__,\n log.level.Warning,\n \"Unexpected %s raised on %s: %s: %s\" % ( exc.__name__, func.__name__, e, e.args)\n )\n log.log_exc(self.__class__, log.level.Info)\n return self._raise_exception(RemoteFacadeManagerCodes.WEBLAB_GENERAL_EXCEPTION_CODE, UNEXPECTED_ERROR_MESSAGE_TEMPLATE % self._cfg_manager.get_value(SERVER_ADMIN_EMAIL, DEFAULT_SERVER_ADMIN_EMAIL) )\n\n return wrapper\n return real_check_exceptions\n\ndef check_nullable(func):\n @wraps(func)\n def wrapper(self, *args, **kwargs):\n response = func(self, *args, **kwargs)\n return self._check_nullable_response(response)\n return wrapper\n\nclass AbstractRemoteFacadeManager(object):\n def __init__(self, cfg_manager, server):\n super(AbstractRemoteFacadeManager, self).__init__()\n self._server = server\n self._cfg_manager = cfg_manager\n\n def _get_client_address(self):\n return RemoteFacadeContext.get_context().get_ip_address()\n\ndef _propagate_stack_trace(cfg_manager, msg):\n formatted_exc = traceback.format_exc()\n propagate = cfg_manager.get_doc_value(configuration_doc.PROPAGATE_STACK_TRACES_TO_CLIENT)\n if propagate:\n msg = str(msg) + \"; Traceback: \" + formatted_exc\n return msg\n\nclass AbstractZSI(object):\n def _raise_exception(self, code, msg):\n if ZSI_AVAILABLE:\n msg = _propagate_stack_trace(self._cfg_manager, msg)\n raise ZSI.Fault( 'ZSI:' + code, msg )\n else:\n msg = \"Optional library 'ZSI' is not available, so SOAP clients will not be supported. However, AbstractZSI is being used, so problems will arise\"\n log.log( self, log.level.Error, msg )\n print >> sys.stderr, msg\n\nclass JSONError(Exception):\n pass\n\nclass AbstractJSON(object):\n def _raise_exception(self, code, msg):\n msg = _propagate_stack_trace(self._cfg_manager, msg)\n raise JSONError({ 'is_exception' : True, 'code' : 'JSON:' + code, 'message' : msg })\n\nclass AbstractXMLRPC(object):\n def _raise_exception(self, code, msg):\n msg = _propagate_stack_trace(self._cfg_manager, msg)\n raise SimpleXMLRPCServer.Fault('XMLRPC:' + code, msg)\n\n def _parse_experiment_id(self, exp_id):\n return ExperimentId(\n exp_id['exp_name'],\n exp_id['cat_name']\n )\n\n def _fix_dates_in_experiments(self, experiments_allowed):\n for experiment_allowed in experiments_allowed:\n experiment = experiment_allowed.experiment\n experiment_allowed.experiment = Experiment.Experiment(experiment.name, experiment.category,\n datetime.datetime( experiment.start_date.year, experiment.start_date.month, experiment.start_date.day ),\n datetime.datetime( experiment.end_date.year, experiment.end_date.month, experiment.end_date.day )\n )\n\n def _check_nullable_response(self, response):\n # In XML-RPC, None doesn't exist\n return response or ''\n\n","sub_path":"server/src/weblab/comm/manager.py","file_name":"manager.py","file_ext":"py","file_size_in_byte":5710,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"507748762","text":"# Copyright © 2015 Andrew Wilcox and Elizabeth Myers.\n# All rights reserved.\n# This file is part of the PyIRC 3 project. See LICENSE in the root directory\n# for licensing information.\n\n\n\"\"\"Some alternate nick handlers.\n\nThis contains an underscore-appending handler and a number-substituting\n(leetifying) handler.\n\n\"\"\"\n\n\nfrom logging import getLogger\n\nfrom taillight.signal import SignalStop\n\nfrom PyIRC.signal import event\nfrom PyIRC.numerics import Numerics\nfrom PyIRC.extensions import BaseExtension\n\n\n_logger = getLogger(__name__) # pylint: disable=invalid-name\n\n\nclass UnderscoreAlt(BaseExtension):\n \"\"\"This class attempts to append underscores to the nickname.\n\n If :py:class:`~PyIRC.extensions.ISupport` is present, it will try until\n the maximum nick length is reached; otherwise, it will try 5 times.\n\n \"\"\"\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n\n self.attempt_nick = self.nick # from base\n self.attempts = 0\n\n @event(\"commands\", Numerics.ERR_NICKNAMEINUSE, priority=-1000)\n @event(\"commands\", Numerics.ERR_ERRONEOUSNICKNAME, priority=-1000)\n @event(\"commands\", Numerics.ERR_NONICKNAMEGIVEN, priority=-1000)\n def change_nick(self, _, line):\n \"\"\"Try to complete registration with a long _.\"\"\"\n if self.registered:\n # Don't care!\n raise SignalStop()\n\n isupport = self.get_extension(\"ISupport\")\n if not isupport:\n if self.attempts_count >= 5:\n # Give up, but maybe something else can try...\n return\n elif len(self.attempt_nick) == isupport.get(\"NICKLEN\"):\n # Nick is too long! This isn't gonna work.\n return\n\n self.attempt_nick += '_'\n self.attempts += 1\n self.send(\"NICK\", [self.attempt_nick])\n raise SignalStop()\n\n\nclass NumberSubstitueAlt(BaseExtension):\n \"\"\"This class attempts to substitute letters for numbers and vis versa.\n\n This extension will try until all opportunities for leetifying have been\n exhausted.\n\n \"\"\"\n\n leetmap = {\n 'A': '4',\n 'a': '4',\n 'B': '8',\n 'E': '3',\n 'e': '3',\n 'G': '6',\n 'g': '9',\n 'I': '1',\n 'i': '1',\n 'O': '0',\n 'o': '0',\n 'S': '5',\n 's': '5',\n 'T': '7',\n 't': '7',\n '`': '\\\\',\n }\n\n unleetmap = {v: k for k, v in leetmap.items()}\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n\n self.attempt_nick = self.nick # from base\n self.index = 0 # The present nick index\n\n @event(\"commands\", Numerics.ERR_NICKNAMEINUSE, priority=-1000)\n @event(\"commands\", Numerics.ERR_ERRONEOUSNICKNAME, priority=-1000)\n @event(\"commands\", Numerics.ERR_NONICKNAMEGIVEN, priority=-1000)\n def change_nick(self, _, line):\n \"\"\"Try to complete registration by being a 1337 h4x0r.\"\"\"\n if self.registered:\n # Don't care!\n raise SignalStop()\n\n while self.index < len(self.attempt_nick):\n # Try to leetify a letter\n char = self.attempt_nick[self.index]\n if self.index > 0 and char in self.leetmap:\n # Nicks can't begin with any character in leetmap.\n char = self.leetmap[char]\n elif char in self.unleetmap:\n char = self.unleetmap[char]\n else:\n self.index += 1\n continue\n\n # Munge!\n self.attempt_nick = (self.attempt_nick[:self.index] + char +\n self.attempt_nick[self.index + 1:])\n self.send(\"NICK\", [self.attempt_nick])\n self.index += 1\n raise SignalStop()\n","sub_path":"PyIRC/extensions/altnick.py","file_name":"altnick.py","file_ext":"py","file_size_in_byte":3758,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"228274178","text":"\n\n#calss header\nclass _BROAD():\n\tdef __init__(self,): \n\t\tself.name = \"BROAD\"\n\t\tself.definitions = [u'very wide: ', u'If something is a particular distance broad, it measures this distance from side to side: ', u'including a wide range of things; general: ', u'If someone has a broad accent (= way of speaking), it is strong and noticeable, showing where they come from: ', u'a hint (= when you tell someone something without saying it directly) that is easy to understand']\n\n\t\tself.parents = []\n\t\tself.childen = []\n\t\tself.properties = []\n\t\tself.jsondata = {}\n\n\n\t\tself.specie = 'adjectives'\n\n\n\tdef run(self, obj1, obj2):\n\t\tself.jsondata[obj2] = {}\n\t\tself.jsondata[obj2]['properties'] = self.name.lower()\n\t\treturn self.jsondata\n","sub_path":"xai/brain/wordbase/adjectives/_broad.py","file_name":"_broad.py","file_ext":"py","file_size_in_byte":726,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"66520756","text":"import pytest\nfrom src.search.find_positive_integer_solution_for_a_given_equation import functions\n\nmultiple_params = True\n\ndata = [\n ((lambda x, y: x + y, 5), [[1, 4], [2, 3], [3, 2], [4, 1]]),\n ((lambda x, y: x * y, 5), [[1, 5], [5, 1]])\n]\n\ntestdata = [i + (f,) for i in data for f in functions]\n\n\n@pytest.mark.parametrize(\"params, expected, f\", testdata)\ndef test_func(params, expected, f):\n if multiple_params:\n result = f(*params)\n else:\n result = f(params)\n assert result == expected\n","sub_path":"src/search/tests/test_find_positive_integer_solution_for_a_given_equation.py","file_name":"test_find_positive_integer_solution_for_a_given_equation.py","file_ext":"py","file_size_in_byte":519,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"421892440","text":"\"\"\"\nGiven a string S, find length of the longest substring with all distinct characters. For example, for input \"abca\", the output is 3 as \"abc\" is the longest substring with all distinct characters.\n\nInput:\nThe first line of input contains an integer T denoting the number of test cases.\nThe first line of each test case is String str.\n\nOutput:\nPrint length of smallest substring with maximum number of distinct characters.\nNote: The output substring should have all distinct characters.\n\nConstraints:\n1 ≤ T ≤ 100\n1 ≤ size of str ≤ 10000\n\nExample:\nInput:\n2\nabababcdefababcdab\ngeeksforgeeks\n\nOutput:\n6\n7\n\"\"\"\n\n\ndef longest_distinct(string):\n longest = 0\n beginning_index = 0\n\n for i in range(len(string)):\n\n char = string[i]\n\n if char in string[beginning_index:i]:\n longest = max(longest, len(string[beginning_index:i]))\n beginning_index = string.index(char, beginning_index) + 1\n\n return max(longest, len(string[beginning_index:]))\n\n\nif __name__ == '__main__':\n\n test_cases = int(input())\n\n for i in range(test_cases):\n test = input()\n print(longest_distinct(test))\n","sub_path":"geeks_for_geeks/string/longest_distinct_characters_in_string.py","file_name":"longest_distinct_characters_in_string.py","file_ext":"py","file_size_in_byte":1143,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"474980026","text":"# %load q05_top_10_plotting/build.py\n# default imports\nimport matplotlib.pyplot as plt\nimport pandas as pd\nfrom greyatomlib.olympics_project_new.q04_find_top_10.build import q04_find_top_10, q03_better_event, q02_country_operations, q01_rename_columns\nplt.switch_backend('agg')\npath = './data/olympics.csv'\nOlympicsDF=q01_rename_columns(path) \nOlympicsDF=q02_country_operations(OlympicsDF)\nOlympicsDF=q03_better_event(OlympicsDF) \nTop10Summer,Top10Winter, Top10, Common =q04_find_top_10(OlympicsDF,'Total_Summer', 'Total_Winter','Total')\ndef q05_top_10_plotting(OlympicsDF,Top10Summer,Top10Winter,Top10):\n sl,wl,tl=[],[],[]\n for i in range(0,10):\n s=OlympicsDF.Total_Summer[OlympicsDF['Total_Summer'].notnull()&(OlympicsDF['Country_Name']==Top10Summer[i])]\n sl+=list(s)\n w=OlympicsDF.Total_Winter[OlympicsDF['Total_Winter'].notnull()&(OlympicsDF['Country_Name']==Top10Winter[i])]\n wl+=list(w)\n t=OlympicsDF.Total_Winter[OlympicsDF['Total'].notnull()&(OlympicsDF['Country_Name']==Top10[i])]\n tl+=list(t) \n plt.bar(Top10Summer,sl)\n plt.xlabel('country')\n plt.ylabel('medal count')\n plt.title('Top10Summer')\n plt.xticks(Top10Summer,rotation=90)\n plt.show()\n plt.bar(Top10Winter,wl)\n plt.xlabel('country')\n plt.ylabel('medal count')\n plt.title('Top10Winter')\n plt.xticks(Top10Winter,rotation=90)\n plt.show()\n plt.bar(Top10,tl)\n plt.xlabel('country')\n plt.ylabel('medal count')\n plt.title('Top10')\n plt.xticks(Top10,rotation=90)\n plt.show()\nq05_top_10_plotting(OlympicsDF,Top10Summer,Top10Winter,Top10) \n \n\n\n\n\n\n\n","sub_path":"q05_top_10_plotting/build.py","file_name":"build.py","file_ext":"py","file_size_in_byte":1631,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"423174008","text":"import sys\r\nsys.path.append('/home/davis/Desktop/work stuff/pymol/scripts/modules')\r\nsys.path.append('/home/davis/Desktop/work stuff/calculator/modules')\r\n\r\nfrom os import chdir\r\nfrom useful import one_letter\r\nfrom useful import CIDict\r\nimport csv\r\nimport warnings\r\nimport numpy as np\r\nfrom Bio.PDB import PDBParser\r\nfrom biodata import file_dict\r\nimport ezb\r\n\r\nchdir('/home/davis/Desktop/work stuff/final paper/figures')\r\n#chdir(r'C:\\Users\\Nanda Lab\\Desktop\\Alex\\final paper\\figures')\r\n\r\nstructure_files = file_dict('structures with 1qd5', ['aligned_(.*).pdb'])\r\nparser = PDBParser()\r\n\r\nwith warnings.catch_warnings():\r\n warnings.simplefilter('ignore')\r\n structures = [(name, parser.get_structure(name, path)) \\\r\n for name, path in structure_files.items()]\r\n structures = CIDict(structures)\r\n\r\nwith open('cored 1 selections with 1qd5.csv', 'rb') as f:\r\n reader = csv.reader(f)\r\n inclusive_selections = ezb.selections_by_resi(reader)\r\n \r\nwith open('beta_selections.csv', 'rb') as f:\r\n reader = csv.reader(f)\r\n beta_selections = ezb.selections_by_resi(reader)\r\n\r\nexclusive_selections = CIDict()\r\n \r\nfor name, selection in beta_selections.items():\r\n exclusive_selections.update(((name, list(set(selection).intersection( \r\n set(inclusive_selections[name])))),))\r\n \r\n\r\nwith open('cored 1 centers with 1qd5.csv', 'rb') as f:\r\n reader = csv.reader(f)\r\n inc_centers = ezb.load_centers(reader)\r\n\r\nwith open('exc centers.csv', 'rb') as f:\r\n exc_centers = ezb.load_centers(csv.reader(f))\r\n \r\ndef moments(path, selections, centers):\r\n with open('published params.csv', 'rb') as f:\r\n reader = csv.reader(f)\r\n new_calc = ezb.Calculator(reader, normalize = True)\r\n\r\n moments = CIDict()\r\n for name in structures.keys():\r\n if name.upper() == '1QD5':\r\n continue\r\n\r\n moments.update({name:\r\n ezb.moment(structures[name], selections[name],\r\n centers[name], new_calc,\r\n paramless_option = '.5',\r\n old_style_gly = True) })\r\n\r\n with open(path, 'wb') as f:\r\n target = csv.writer(f)\r\n for name, moment in moments.items():\r\n row = [name.upper()] + [str(component) for component in moment]\r\n target.writerow(row)\r\n\r\nmoments('inclusive_moment.csv',inclusive_selections, inc_centers)\r\nmoments('exclusive_moment.csv',exclusive_selections, exc_centers)\r\n\r\n\r\n# Now the eisenberg stuff:\r\n\r\neisenberg_values = CIDict()\r\nwith open('eisenberg.csv', 'rb') as f:\r\n freader = csv.reader(f) \r\n for row in freader:\r\n eisenberg_values.update({row[0]: float(row[1])})\r\n \r\ndef eisenberg(residue, ref = eisenberg_values):\r\n return ref[residue.get_resname()]\r\n \r\ndef flexible_moments(path, selections, centers, function):\r\n moments = list()\r\n for name in structures.keys():\r\n \r\n if name.lower() == '1qd5':\r\n continue\r\n\r\n structure = structures[name]\r\n center = centers[name]\r\n selection = selections[name]\r\n \r\n sum_ = np.zeros(3)\r\n for residue in structure.get_residues():\r\n if residue.get_id()[1] not in selection:\r\n continue\r\n\r\n # My selection files just give residue numbers. If two residues\r\n # have the same number, if there're insertions, then the selection\r\n # files are ambiguous and I need to change how I do them.\r\n # For now, anything identified by more than just a residue number\r\n # is ignored. The reason this is okay is that, as far as I can tell,\r\n # in Dan's structures that just means it ignores water.\r\n # If the structures have complex residue ids, then I need different\r\n # selection files.\r\n if residue.get_id()[0] != ' ':\r\n if residue.get_id()[0] != 'W':\r\n print('Ignored residue with id {0} in structure {1}'\\\r\n .format(residue.get_id(), structure))\r\n continue\r\n\r\n resn = one_letter[residue.get_resname()]\r\n\r\n # Vector points from center to Ca\r\n coordinates = residue.child_dict['CA'].get_coord()\r\n vector = coordinates - center\r\n # Take the projection on the xy plane\r\n vector[2] = 0\r\n # Normalize the vector\r\n normalized = vector / np.linalg.norm(vector) \r\n # Give it a magnitude determined by the 'function' argument \r\n complete = normalized * function(residue)\r\n sum_ += complete \r\n\r\n moments.append((name, sum_))\r\n\r\n with open(path, 'wb') as f:\r\n target = csv.writer(f)\r\n for name, moment in moments:\r\n row = [name.upper()] + \\\r\n [str(component) for component in moment]\r\n target.writerow(row) \r\n \r\nflexible_moments('inc_eisenmoment.csv',inclusive_selections, inc_centers, eisenberg)\r\nflexible_moments('exc_eisenmoment.csv',exclusive_selections, exc_centers, eisenberg) \r\nprint('done')\r\n","sub_path":"fall 2011 final paper data/generate moments.py","file_name":"generate moments.py","file_ext":"py","file_size_in_byte":5255,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"453405980","text":"def merge_sort(my_list):\n # need a base case\n if len(my_list) == 1:\n return\n \n # split the list in half\n mid = len(my_list)//2\n left_half = my_list[:mid]\n right_half = my_list[mid:]\n \n # sort each half\n merge_sort(left_half)\n merge_sort(right_half)\n \n # at this point, left_half and right_half are sorted\n \n # merge sorted halves together\n # use indicies to keep track of tops of sorted halves and bottom of the \n # original list\n \n i = 0 # top of the left half\n j = 0 # top of the right half\n k = 0 # bottom of the sorted pile\n \n while i < len(left_half) and j < len(right_half):\n # compare the tops of each half\n if left_half[i] < right_half[j]:\n my_list[k] = left_half[i]\n i+=1\n else:\n my_list[k] = right_half[j]\n j+=1\n k+=1\n \n # merge anything left in the left half \n while i < len(left_half):\n my_list[k] = left_half[i]\n i+=1 \n k+=1\n \n while j < len(right_half):\n my_list[k] = right_half[j]\n j+=1\n k+=1\n\nif __name__ == \"__main__\":\n my_list = [2, 5, 1, 6, 4, 3]\n merge_sort(my_list)\n print(my_list)","sub_path":"2014 summer/week 4/LEC 2/merge.py","file_name":"merge.py","file_ext":"py","file_size_in_byte":1233,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"541992783","text":"from django.contrib import admin\n\n# Register your models here.\nfrom shop_list_app.models import User, Product\n\nclass UserAdmin(admin.ModelAdmin):\n list_display = ['email','active','id_tlf_and','created_at','modified_at']\n list_filter = ['active','created_at']\n\nclass ProductAdmin(admin.ModelAdmin):\n list_display = ['name','active','created_at','modified_at']\n list_filter = ['active','created_at']\n\nadmin.site.register(User, UserAdmin)\nadmin.site.register(Product, ProductAdmin)\n","sub_path":"shop_list_app/admin.py","file_name":"admin.py","file_ext":"py","file_size_in_byte":492,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"605643332","text":"#!usr/bin/env python \n#-*- coding:utf-8 _*- \n\"\"\" \n@author:$USER\n@file: $NAME \n@time: $YEAR/$MONTH/$DAY\n\"\"\"\n\"\"\"\nav394281 中,充满威严的蕾米莉亚大小姐因为触犯某条禁忌,被隙间妖怪八云紫(紫m……èi)按住头在键盘上滚动。\n同样在弹幕里乱刷梗被紫姐姐做成罪袋的你被指派找到大小姐脸滚键盘打出的一行字中的第 `k` 个仅出现一次的字。\n(为简化问题,大小姐没有滚出 ascii 字符集以外的字)\n\n\n\n输入描述:\n每个输入都有若干行,每行的第一个数字为`k`,表示求第`k`个仅出现一次的字。然后间隔一个半角空格,\n之后直到行尾的所有字符表示大小姐滚出的字符串`S`。\n\n输出描述:\n输出的每一行对应输入的每一行的答案,如果无解,输出字符串`Myon~`\n\n(请不要输出多余的空行)\n\n为了方便评测,如果答案存在且为c,请输出[c]\n\n输入例子1:\n2 misakamikotodaisuki\n3 !bakabaka~ bakabaka~ 1~2~9!\n3 3.1415926535897932384626433832795028841971693993751o582097494459211451488946419191919l91919hmmhmmahhhhhhhhhh\n7 www.bilibili.com/av170001\n1 111\n\n输出例子1:\n[d]\n[9]\n[l]\n[7]\nMyon~\n\"\"\"\nfrom IPython import embed\n\nnan = 'Myon~'\n\ndef func(s, n):\n if s is None:\n return None\n _s = []\n d_s = [] # 重复\n for i in s:\n if i not in _s:\n _s.append(i)\n else:\n if i not in d_s:\n d_s.append(i)\n for k in d_s:\n if k in _s:\n _s.remove(k)\n if len(_s) < n:\n return None\n return _s[n-1]\n\n\nif __name__ == '__main__':\n ns = []\n ss = []\n while True:\n _input = input()\n idx = _input.find(' ')\n try:\n n = int(_input[:idx])\n s = _input[idx+1:]\n ns.append(n)\n ss.append(s)\n except Exception as e:\n break\n for i in range(len(ns)):\n n = ns[i]\n s = ss[i]\n r = func(s, n)\n if r is None:\n print(nan)\n else:\n print('[%s]' % r)\n pass","sub_path":"scripts/nowcode/bilibili_002.py","file_name":"bilibili_002.py","file_ext":"py","file_size_in_byte":2062,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"139274602","text":"\"\"\"\nin this file we declare functions used in main files\n\"\"\"\n\nimport os\nfrom datetime import datetime\n\nimport numpy as np\nfrom sklearn.metrics import make_scorer\nfrom sklearn.model_selection import cross_val_score\n\n\ndef rmsle(actual, prediction):\n \"\"\"\n calculates Root Mean Squared Logarithmic Error\n\n :param actual: vector containing actual prices\n :param prediction: vector containg predicted prices\n \"\"\"\n return np.sqrt(np.mean((np.log(prediction + 1) - np.log(actual + 1)) ** 2))\n\n\nrmsle_scorer = make_scorer(rmsle, greater_is_better=False)\n\n\ndef rmse(actual, prediction):\n \"\"\"\n calculates Root Mean Squared Error\n\n :param actual: vector containing actual log(prices + 1)\n :param prediction: vector containg predicted log(prices + 1)\n \"\"\"\n return np.sqrt(np.mean((prediction - actual) ** 2))\n\n\nrmse_scorer = make_scorer(rmse, greater_is_better=False)\n\n\ndef crossvalidate(data, model, scorer=rmsle_scorer, n=3):\n \"\"\"\n calculates RMSLE and saves model info into a text file 'models/info.txt'\n\n :param data: full data set\n :param model: model object (eg. RandomForest())\n :param scorer: which scoring functions should be used\n :param n: how many times it crossvalidates\n :return e: mean of scores from every validation\n \"\"\"\n\n scores = cross_val_score(model,\n data.drop(['price', 'id'], axis=1).values,\n data.price.values,\n cv=n,\n scoring=scorer)\n e = abs(np.mean(scores))\n\n # save model data to a text file\n if \"models\" not in os.listdir(os.getcwd()):\n os.mkdir(\"models\")\n\n name = \"models\\\\\" + str(np.round(e, 6)) + '_' + datetime.now().strftime('%Y_%m_%d_%H_%M_%S') + \".txt\"\n info = open(name, mode='w')\n info.write(\"mean RMSLE: \" + str(e) + '\\n')\n info.write(\"\\nfeatures:\\n\")\n for f in list(data.columns.values):\n info.write(f)\n info.write('\\n')\n info.write(\"\\nparameters:\\n\")\n params = model.get_params()\n for key in params:\n info.write(key + \": \" + str(params[key]) + \"\\n\")\n info.close()\n\n return e\n\n\ndef split_column(column):\n \"\"\"\n splits a column based on '/'\n\n :param column: column to be separated\n :return: a tuple of columns\n \"\"\"\n\n try:\n new_column1, new_column2, new_column3 = column.split('/')\n return new_column1, new_column2, new_column3\n except Exception:\n return np.nan, np.nan, np.nan\n\n\ndef handle_missing(dataset):\n \"\"\"\n changes nas to missing in columns category, brand_name and\n\n :param dataset:\n :return:\n \"\"\"\n dataset.category1.fillna(value=\"missing\", inplace=True)\n dataset.category2.fillna(value=\"missing\", inplace=True)\n dataset.category3.fillna(value=\"missing\", inplace=True)\n dataset.brand_name.fillna(value=\"missing\", inplace=True)\n dataset.item_description.fillna(value=\"missing\", inplace=True)\n return dataset\n","sub_path":"functions.py","file_name":"functions.py","file_ext":"py","file_size_in_byte":2958,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"142594290","text":"\"\"\"\nMadelyn Reyes\nOctober 3, 2015\nWhile Loop: Squarer\n\"\"\"\n\n\nanswer = 0 #setting the variables to 0\nnumber = 0\n\nprint(\"This program squares numbers. Enter a positive integer to square or -1 to quit.\")\n#printing the function of the program to the user\n\n\nwhile number >= 0: #while number is not 0 or a negative number\n number = float(input(\"Enter a positive integer or -1: \")) #asking for a number\n square = number **2 #squaring the number\n if number != -1: #if the number is -1 stop the program\n print(int(number),\"squared is\",int(square)) #printing the answer\n \n \n \n \n \n \n","sub_path":"Whileloopsquarer.py","file_name":"Whileloopsquarer.py","file_ext":"py","file_size_in_byte":635,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"141647905","text":"import datetime\n\nfrom django.template import RequestContext\nfrom django.shortcuts import render_to_response\nfrom django.contrib.auth.decorators import login_required\nfrom django.contrib import messages\n\nfrom accountifie.gl.models import ExternalBalance, ExternalAccount\nfrom accountifie.query.query_manager import QueryManager\nimport accountifie._utils\nimport accountifie.environment.api\nimport accountifie.gl.api\n\nimport tables.bstrap_tables\n\nfrom base.models import Expense, Mcard, Cashflow\n\n\n@login_required\ndef maintenance(request):\n return render_to_response('main_views/maintenance.html', RequestContext(request, {}))\n\n@login_required\ndef reports(request):\n d = {}\n return render_to_response('main_views/reports.html', RequestContext(request, d))\n\n\n@login_required\ndef home(request):\n from_date, to_date = accountifie._utils.extractDateRange(request)\n company_id = accountifie._utils.get_company(request)\n #gather some info on what we have in the database\n expenses = Expense.objects.filter(company_id=company_id)\n stub_expenses = Expense.objects.filter(stub=True).count()\n\n chk_acct = ExternalAccount.objects.get(gl_account__id='10010')\n cashflows = Cashflow.objects.filter(ext_account=chk_acct)\n #incomplete_cashflows = cashflows.filter(counterparty=None).count()\n incomplete_cashflows = 0\n\n incomplete_rows = []\n incomplete_rows.append(['Expenses', stub_expenses, '/admin/base/expense/?unmatched=UNMATCHED'])\n incomplete_rows.append(['Payments -- 1001', incomplete_cashflows, '/admin/base/cashflow/?unmatched=UNMATCHED'])\n\n\n\n expense_count = expenses.count()\n if expense_count:\n expense_latest = expenses.order_by('-expense_date')[0].expense_date #one sql query I hope\n else:\n expense_latest = datetime.date.today()\n\n \n\n mcard = Mcard.objects.filter(company_id=company_id)\n mcard_count = mcard.count()\n if mcard_count:\n mcard_latest = mcard.order_by('-trans_date')[0].trans_date\n else:\n mcard_latest = datetime.date.today()\n\n gl_strategy = request.GET.get('gl_strategy', None)\n query_manager = QueryManager(gl_strategy=gl_strategy)\n grant_table = query_manager.balance_by_cparty(company_id, ['11500'])\n\n grant_rec_rows = []\n for i in grant_table.index:\n if abs(grant_table.loc[i]) > 1:\n drilldown = '/reporting/history/account/11500/?from=%s&to=%s&cp=%s' % (from_date, to_date, i)\n grant_rec_rows.append([i, \"{:,.0f}\".format(grant_table.loc[i]), drilldown])\n\n exp_trends = tables.bstrap_tables.balance_trends('2015-3-31', accts_path='opexp')\n\n context = dict(\n expense_count = expense_count,\n incomplete_rows = incomplete_rows,\n mcard_count = mcard_count,\n mcard_latest = mcard_latest,\n company_id = company_id,\n grant_rec_rows = grant_rec_rows,\n exp_trends = exp_trends\n )\n\n return render_to_response('main_views/home.html', context, RequestContext(request))\n\n@login_required\ndef daily(request):\n today = datetime.datetime.now().date()\n\n bank_accts = [acct.gl_account_id for acct in ExternalAccount.objects.all()]\n \n missing_bank_bals = []\n\n for acct in bank_accts:\n if ExternalBalance.objects.filter(account=acct).filter(date=today).count() == 0:\n missing_bank_bals.append(acct)\n\n company_id = accountifie._utils.get_company(request)\n from_date, to_date = accountifie._utils.extractDateRange(request)\n \n AP_acct = accountifie.environment.api.variable({'name': 'GL_ACCOUNTS_PAYABLE'})\n AL_acct = accountifie.environment.api.variable({'name': 'GL_ACCRUED_LIAB'})\n PE_acct = accountifie.environment.api.variable({'name': 'GL_PREPAID_EXP'})\n AR_accts = accountifie.gl.api.path_accounts({'path': 'assets.curr.ap'})\n\n # new style\n gl_strategy = request.GET.get('gl_strategy', None)\n\n query_manager = QueryManager(gl_strategy=gl_strategy)\n ap_table = query_manager.balance_by_cparty(company_id, [AP_acct])\n prepaid_table = query_manager.balance_by_cparty(company_id, [PE_acct], to_date=today)\n al_table = query_manager.balance_by_cparty(company_id, [AL_acct], to_date=today)\n ar_table = query_manager.balance_by_cparty(company_id, AR_accts, to_date=today)\n\n ap_rows = []\n for i in ap_table.index:\n if abs(ap_table.loc[i]) > 1:\n drilldown = '/reporting/history/account/%s/?from=%s&to=%s&cp=%s' % (AP_acct, from_date, to_date, i)\n ap_rows.append([i, ap_table.loc[i], drilldown])\n\n prepaid_rows = []\n for i in prepaid_table.index:\n if abs(prepaid_table.loc[i]) > 1:\n drilldown = '/reporting/history/account/%s/?from=%s&to=%s&cp=%s' % (PE_acct, from_date, to_date, i)\n prepaid_rows.append([i, prepaid_table.loc[i], drilldown])\n\n al_rows = []\n for i in al_table.index:\n if abs(al_table.loc[i]) > 1:\n drilldown = '/reporting/history/account/%s/?from=%s&to=%s&cp=%s' % (AL_acct, from_date, to_date, i)\n al_rows.append([i, al_table.loc[i], drilldown])\n\n ar_rows = []\n for i in ar_table.index:\n if abs(ar_table.loc[i]) > 1:\n drilldown = '/reporting/history/account/%s/?from=%s&to=%s&cp=%s' % (AL_acct, from_date, to_date, i)\n ar_rows.append([i, ar_table.loc[i], drilldown])\n\n #gather some info on what we have in the database\n expenses = Expense.objects.filter(company_id=company_id)\n expense_count = expenses.count()\n if expense_count:\n expense_latest = expenses.order_by('-expense_date')[0].expense_date #one sql query I hope\n else:\n expense_latest = datetime.date.today()\n\n \n\n mcard = Mcard.objects.filter(company_id=company_id)\n mcard_count = mcard.count()\n if mcard_count:\n mcard_latest = mcard.order_by('-trans_date')[0].trans_date\n else:\n mcard_latest = datetime.date.today()\n\n \n if len(missing_bank_bals) > 0:\n messages.info(request, 'Missing external account balances: %s' % ','.join(missing_bank_bals))\n\n context = dict(\n expense_count = expense_count,\n expense_latest = expense_latest,\n mcard_count = mcard_count,\n mcard_latest = mcard_latest,\n creditor_rows = ap_rows,\n debitor_rows = ar_rows,\n prepaid_rows = prepaid_rows,\n al_rows = al_rows,\n )\n\n\n return render_to_response('main_views/daily.html', RequestContext(request, context))\n\n","sub_path":"cpd/main_views.py","file_name":"main_views.py","file_ext":"py","file_size_in_byte":6404,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"33213524","text":"# Sum square difference - Problem 6\n#\n# The sum of the squares of the first ten natural numbers is,\n# (1 ** 2) + (2 ** 2) + ... + (10 ** 2) = 385\n#\n# The square of the sum of the first ten natural numbers is,\n# (1 + 2 + ... + 10) ** 2 = 55 ** 2 = 3025\n#\n# Hence the difference between the sum of the squares of the first ten natural numbers and the square of the sum is\n# 3025 − 385 = 2640\n#\n# Find the difference between the sum of the squares of the first one hundred natural numbers and the square of the sum\n\n\ndef solution(n):\n total_sum = 0\n total_s_sum = 0\n for i in range(1, n + 1):\n total_sum += (i ** 2)\n total_s_sum += i\n total_s_sum **= 2\n return total_s_sum - total_sum\n\n\nprint(solution(100))\n","sub_path":"Solutions/problem_6.py","file_name":"problem_6.py","file_ext":"py","file_size_in_byte":748,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"145978959","text":"#!/usr/bin/env python\n\"\"\"\nSCP copy of CENC partipant ID to local directory for FTP to CENC servers.\n\"\"\"\n\nimport os\nimport argparse\n\ncenc_local_directory = \"/Users/bkraft/cenc/upload\"\ncenc_remote_directory = \"/cenc/mri/subjects/\"\ncenc_prefix = \"34P1\"\n\nusage = \"usage: %prog [options] arg1 arg2\"\n\nparser = argparse.ArgumentParser(prog='cenc_scp')\n\nparser.add_argument(\"cenc_id_number\", help=\"CENC ID number\", nargs='*', type=int)\nparser.add_argument(\"--username\", help=\"username\", default='bkraft')\nparser.add_argument(\"--machine\", help=\"machine\", default='aging1a.medeng.wfubmc.edu')\n\ninArgs = parser.parse_args()\n\nfor ii in inArgs.cenc_id_number:\n cenc_acrostic = \"%s%03d\" % (cenc_prefix, ii)\n cenc_data_directory = os.path.join(cenc_remote_directory, cenc_acrostic, 'data')\n\n cenc_tarballs = (os.path.join(cenc_data_directory, 'dicom', cenc_acrostic + '.tar.gz'),\n os.path.join(cenc_data_directory, 'fmri', cenc_acrostic + '_fmri.tar.gz')\n )\n\n for jj in cenc_tarballs:\n command = \"scp {user}@{machine}:{tarball} {local_directory}\".format(user=inArgs.username,\n machine=inArgs.machine,\n tarball=jj,\n local_directory=cenc_local_directory)\n\n os.system(command)","sub_path":"cenc/fetch_tarballs.py","file_name":"fetch_tarballs.py","file_ext":"py","file_size_in_byte":1450,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"144220105","text":"import numpy as np\nimport time\n\nfrom diff_cython import *\n\ndef diff_py(at, a, visc, dxidxi, dyidyi, dzidzi, itot, jtot, ktot): \n ii = 1\n jj = itot\n kk = itot*jtot\n\n for k in range(1, ktot-1):\n for j in range(1, jtot-1):\n for i in range(1, itot-1):\n ijk = i + j*jj + k*kk\n at[ijk] += visc * (\n + ( (a[ijk+ii] - a[ijk ]) \n - (a[ijk ] - a[ijk-ii]) ) * dxidxi \n + ( (a[ijk+jj] - a[ijk ]) \n - (a[ijk ] - a[ijk-jj]) ) * dyidyi\n + ( (a[ijk+kk] - a[ijk ]) \n - (a[ijk ] - a[ijk-kk]) ) * dzidzi\n )\n\ndef init(a, at, ncells):\n for i in range(ncells):\n a[i] = i**2./(i+1)**2.\n at[i] = 0.\n\nif(__name__ == \"__main__\"):\n mode = 'cy'\n nloop = 100\n itot = 128\n jtot = 128\n ktot = 128\n ncells = itot*jtot*ktot\n \n a = np.zeros(ncells)\n at = np.zeros(ncells)\n \n init(a, at, ncells)\n\n # Check results\n if(mode == 'py'):\n diff_py(at, a, 0.1, 0.1, 0.1, 0.1, itot, jtot, ktot) \n elif(mode == 'cy'):\n diff_pyx(at, a, 0.1, 0.1, 0.1, 0.1, itot, jtot, ktot) \n print(\"at=%f\"%at[itot*jtot+itot+itot/2])\n\n # Time performance \n start = time.time()\n \n if(mode == 'py'): \n for i in range(nloop): \n diff_py(at, a, 0.1, 0.1, 0.1, 0.1, itot, jtot, ktot) \n elif(mode == 'cy'):\n for i in range(nloop): \n diff_pyx(at, a, 0.1, 0.1, 0.1, 0.1, itot, jtot, ktot) \n \n end = time.time()\n\n print(\"time/iter = %f s (%i iters)\"%((end-start)/float(nloop),nloop))\n","sub_path":"diff_benchmark/diff.py","file_name":"diff.py","file_ext":"py","file_size_in_byte":1683,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"133066182","text":"import os\nimport random\nfrom datetime import datetime\nfrom django.conf import settings\n\n\nclass DeployConfig(object):\n \"\"\"\n This class keeps first assignment to attributes.\n It's intended for command-line options to be passed in first\n and have related values working as expected:\n\n In deployment/tst.py:\n deploy_config.branch = ...\n deploy_config.projectdir = os.path.join(\"releases\", deploy_config.branch)\n\n If branch is overruled on the command-line, projectdir changes\n with it.\n \"\"\"\n\n def __init__(self, **kwargs):\n self.__dict__.update(kwargs)\n\n def __setattr__(self, key, value):\n if not hasattr(self, key):\n self.__dict__[key] = value\n\n def __repr__(self):\n return repr(self.__dict__)\n\n\nclass DeploymentLabel(DeployConfig):\n def __init__(self, **kwargs):\n self.label = 'templateproject'\n self.branches = {\n 'tst': settings.CURRENT_BRANCH,\n 'acc': 'acceptance',\n 'prd': 'master',\n }\n self.sitenames = {\n 'tst': (lambda x: 'tst-%s.templateproject.nl' % x.branch),\n 'acc': 'acc.templateproject.nl',\n 'prd': 'www.templateproject.nl',\n }\n # stuff below this is overruled by cmdline, stuff above isn't.\n super(DeploymentLabel, self).__init__(**kwargs)\n self.layer = 'tst'\n self.host = '127.0.0.1'\n self.timestamp = datetime.now().strftime('%Y%m%d')\n self.baseport = 8000\n self.deployhost = 'app-%s-%s@%s' % (self.label, self.layer, self.host)\n self.homedir = '/opt/APPS/%s/%s' % (self.label, self.layer)\n self.branch = self.branches[self.layer]\n sitename = self.sitenames[self.layer]\n self.sitename = sitename(self) if callable(sitename) else sitename\n\n def defaults(self):\n if self.layer == 'tst':\n self.gunicorn_port = random.randint(10000, 19999)\n self.gunicorn_workers = 1\n self.projectdir = os.path.join(\"releases\", self.branch)\n\n elif self.layer == 'acc':\n self.gunicorn_port = self.baseport + 1\n self.gunicorn_workers = 1\n self.tag = \"acc-\" + self.timestamp\n self.projectdir = os.path.join(\"releases\", self.branch)\n\n elif self.layer == 'prd':\n self.gunicorn_port = self.baseport\n self.gunicorn_workers = 4\n self.tag = \"prd-\" + self.timestamp\n self.projectdir = os.path.join(\"releases\", self.tag)\n\n","sub_path":"deployment/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":2542,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"640546461","text":"from dwave.cloud import Client\nimport networkx as nx\nimport dimod\nimport penaltymodel.core as pm\nimport math\nimport dwavebinarycsp as csp\nfrom dwave.system.samplers import DWaveSampler\nfrom dwave.system.composites import EmbeddingComposite\nimport neal\np = int(input('32 bit int.'))\nclient = Client.from_config(token='DEV-5d0d06c4adddf71dfb4b5080d9070b1643b035a3')\nc = csp.ConstraintSatisfactionProblem(csp.BINARY)\nc = csp.factories.multiplication_circuit(16)\npvars=['p0','p1','p2','p3','p4','p5','p6','p7','p8','p9','p10','p11','p12','p13','p14','p15','p16','p17','p18','p19','p20','p21','p22','p23','p24','p25','p26','p27','p28','p29','p30','p31']\nprint('generating bqm...')\nbqm = csp.stitch(c)\nfvars=dict(zip(reversed(pvars),\"{0:032b}\".format(p)))\nfvars={var: int(x) for(var,x) in fvars.items()}\nfor var,value in fvars.items():\n bqm.fix_variable(var,value)\nsampler = EmbeddingComposite(DWaveSampler())\nsampler2 = neal.SimulatedAnnealingSampler()\nprint('sampling...')\nresponse = sampler.sample(bqm,num_reads=10000)\nprint('done with quantum.')\nlog = []\nfor s,o in response.data(['sample','energy']):\n m=[s['a15'],s['a14'],s['a13'],s['a12'],s['a11'],s['a10'],s['a9'],s['a8'],s['a7'],s['a6'],s['a5'],s['a4'],s['a3'],s['a2'],s['a1'],s['a0']]\n if not ([m,o]) in log:\n if o == 0:\n log.append([m,o])\nprint('your factors!\\n',log)\n","sub_path":"adder.py","file_name":"adder.py","file_ext":"py","file_size_in_byte":1351,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"7676563","text":"#! /usr/bin/env python\n# coding=utf-8\n\n\"\"\"\n\"\"\"\n\nimport sys,re,os\nimport shutil\nimport zipfile\nimport getopt\n\ndef make_zip(source_dir, output_filename):\n \"\"\"\n Zip source_dir to output file.\n \"\"\"\n if not os.path.exists(source_dir):\n raise Exception\n\n if not os.path.exists(os.path.dirname(output_filename)):\n os.makedirs(os.path.dirname(output_filename))\n\n zipf = zipfile.ZipFile(output_filename, 'w')\n pre_len = len(os.path.dirname(source_dir))\n for parent, dirnames, filenames in os.walk(source_dir):\n for filename in filenames:\n pathfile = os.path.join(parent, filename)\n arcname = pathfile[pre_len:].strip(os.path.sep)\n zipf.write(pathfile, arcname)\n zipf.close()\n\n\ndef deal_cmdline(argv):\n src = ''\n dst = ''\n\n try:\n opts, args = getopt.getopt(argv,\"hs:z:\",[\"sfile=\",\"zfile=\"])\n except getopt.GetoptError:\n sys.exit(2)\n\n for opt, arg in opts:\n if opt == '-h':\n print('master_zip_report.py -s -d ')\n sys.exit()\n elif opt in (\"-s\", \"--sfile\"):\n src = arg\n elif opt in (\"-z\", \"--zfile\"):\n dst = arg\n\n make_zip(src, dst)\n\nif __name__ == \"__main__\":\n deal_cmdline(sys.argv[1:])\n","sub_path":"acis_framework/acis_master/master_zip_report.py","file_name":"master_zip_report.py","file_ext":"py","file_size_in_byte":1264,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"126991948","text":"class K_Means:\n n = 0\n def __init__(self, n):\n self.n = n\n \n def fit(self, train_x, n_iterations = 100):\n iterations = n_iterations\n self.count = train_x.shape[0]\n self.features = train_x.shape[1]\n self.cluseter_centers = np.random.randn(self.n, self.features)\n \n current_assignment = np.zeros((self.count, 1), dtype=np.int) - 1\n \n self.train = np.concatenate((train_x, current_assignment), axis=1)\n \n prev_assignment = current_assignment\n \n distances = []\n for i in range(self.cluseter_centers.shape[0]):\n distance = np.linalg.norm(train_x - self.cluseter_centers[i], axis=1)\n distances.append(distance)\n distance_array = np.array(distances)\n current_assignment = np.argmax(distance_array, axis=0)\n \n self.train[:,-1] = current_assignment\n \n \n while (iterations > 0) and not (np.prod(current_assignment == prev_assignment)):\n \n iterations -= 1\n \n for i in range(self.cluseter_centers.shape[0]):\n index = np.where(self.train[:,-1] == i)\n data = self.train[np.where(self.train[:,-1] == i)][:,:-1]\n data_count = self.train[np.where(self.train[:,-1] == i)][:,:-1].shape[0]\n if data_count == 0:\n data = np.zeros((1, self.features))\n self.cluseter_centers[i] = data\n else:\n self.cluseter_centers[i] = np.sum(data, axis=0)/ data_count\n \n \n distances = []\n for i in range(self.cluseter_centers.shape[0]):\n distance = np.linalg.norm(train_x - self.cluseter_centers[i], axis=1)\n distances.append(distance)\n distance_array = np.array(distances)\n current_assignment = np.argmax(distance_array, axis=0)\n \n self.train[:,-1] = current_assignment\n prev_assignment = current_assignment\n \n self.assignment = current_assignment\n \n def view(self):\n pass\n","sub_path":"K_means_from_scratch_numpy.py","file_name":"K_means_from_scratch_numpy.py","file_ext":"py","file_size_in_byte":2153,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"645355867","text":"\ndef sortTwoNumbers_dc(x):\n try:\n x[0] - x[1]\n except:\n out = \"You need to enter two *NUMBERS* !\"\n else:\n if len(x) > 2:\n out = \"You need to enter *TWO* numbers !\"\n elif x[0] > x[1]:\n out = [x[1], x[0]]\n elif x[0] < x[1]:\n out = x\n return out","sub_path":"NEU231Functions_dc.py","file_name":"NEU231Functions_dc.py","file_ext":"py","file_size_in_byte":322,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"332048060","text":"from .common import *\nimport os\nimport shutil\nclass DataNode(threading.Thread):\n \"\"\"Data Server: execute command from nameserver.\"\"\"\n \n def __init__(self, server_id, gconf):\n super(DataNode, self).__init__(name='DataServer%s' % (server_id,))\n self.gconf = gconf\n self._server_id = server_id\n\n def run(self):\n gconf = self.gconf\n while True:\n gconf.data_events[self._server_id].wait()\n if gconf.cmd_flag:\n if gconf.cmd_type in [COMMAND.put, COMMAND.put2] and self._server_id in gconf.server_chunk_map:\n self.save()\n elif gconf.cmd_type in [COMMAND.read, COMMAND.read2]:\n self.read()\n elif gconf.cmd_type in [COMMAND.namenode_format]:\n self.format()\n elif gconf.cmd_type in [COMMAND.recover_chunks,COMMAND.recover_servers]:\n self.copy()\n elif gconf.cmd_type in [COMMAND.rm]:\n self.rm()\n else:\n pass\n gconf.data_events[self._server_id].clear()\n gconf.main_events[self._server_id].set()\n\n def save(self):\n \"\"\"Data Node save file\"\"\"\n\n data_node_dir = DATA_NODE_DIR % (self._server_id,)\n with open(self.gconf.file_path, 'r') as f_in:\n for chunk, offset, count in self.gconf.server_chunk_map[self._server_id]:\n f_in.seek(offset, 0)\n content = f_in.read(count)\n\n with open(data_node_dir + os.path.sep + chunk, 'w') as f_out:\n f_out.write(content)\n f_out.flush()\n def rm(self):\n if self._server_id not in self.gconf.server_chunk_map.keys():\n return\n data_node_dir = DATA_NODE_DIR % (self._server_id,)\n for chunkid in self.gconf.server_chunk_map[self._server_id]:\n os.remove(\"{}/{}\".format(data_node_dir,chunkid))\n\n\n def format(self):\n data_node_dir = DATA_NODE_DIR % (self._server_id,)\n shutil.rmtree(data_node_dir)\n os.mkdir(data_node_dir)\n def copy(self):\n if self._server_id not in self.gconf.server_chunk_map.keys():\n return \n if self.gconf.cmd_type in [COMMAND.recover_servers]:\n try:\n os.makedirs(\"dfs/datanode%d\"%self._server_id)\n except: \n pass\n data_node_dir = DATA_NODE_DIR % (self._server_id,)\n\n for chunk in self.gconf.server_chunk_map[self._server_id]:\n chunkid=list(chunk.keys())[0]\n copyfrom=chunk[chunkid]\n copy_data_dir= DATA_NODE_DIR % (copyfrom)+\"/\"+chunkid\n with open(copy_data_dir, 'r') as f_in:\n cont=f_in.read()\n with open(data_node_dir + os.path.sep + chunkid, 'w') as f_out:\n f_out.write(cont)\n f_out.flush()\n\n def read(self):\n \"\"\"read chunk according to offset and count\"\"\"\n\n read_path = (DATA_NODE_DIR % (self._server_id,)) + os.path.sep + self.gconf.read_chunk\n\n with open(read_path, 'r') as f_in:\n f_in.seek(self.gconf.read_offset)\n content = f_in.read(self.gconf.read_count)\n print(content)\n self.gconf.read_event.set()","sub_path":"core/datanode.py","file_name":"datanode.py","file_ext":"py","file_size_in_byte":3295,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"568116871","text":"'''\nThis application deals with various vendors and their providers\nsuch as SMS provided by Exotel\n'''\n\nimport sys\n\nPROVIDERS = {\n 'sms': {\n 'exotel': 'vendors.sms.exotel.sms',\n 'pinnacle': 'vendors.sms.pinnacle.sms',\n 'gupshup': 'vendors.sms.gupshup.sms',\n }\n}\n\n\ndef load_provider(vendor, provider):\n '''\n Loads the appropriate provider for a given vendor'''\n __import__(PROVIDERS[vendor][provider])\n return sys.modules[PROVIDERS[vendor][provider]]\n","sub_path":"vendors/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":489,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"275406441","text":"from unittest.mock import MagicMock\n\nfrom connections.sim.hw.hw_sim import HWSim, PinModes\nfrom connections.sim.hw.sensors.sensor import SensorType\nfrom connections.sim.hw.sensors.dummy_sensor import DummySensor\nfrom connections.sim.hw.ignitor_sim import Ignitor, IgnitorType\nfrom connections.sim.hw.clock_sim import Clock\n\n\ndef ignitor_test(hw, test, read):\n \"\"\"Utility function - does continuity check like FW would\"\"\"\n hw.digital_write(test, True)\n result = hw.analog_read(read)\n hw.digital_write(test, False)\n return result\n\n\nclass TestHWSim:\n def test_pin_mode(self):\n hw = HWSim(None, [], [])\n hw.set_pin_mode(1, PinModes.OUTPUT)\n hw.set_pin_mode(3, PinModes.INPUT)\n hw.set_pin_mode(2, PinModes.OUTPUT)\n\n assert hw.get_pin_mode(1) == PinModes.OUTPUT\n assert hw.get_pin_mode(3) == PinModes.INPUT\n assert hw.get_pin_mode(2) == PinModes.OUTPUT\n\n def test_ignitor_readwrite(self):\n hw = HWSim(None, [], [Ignitor(IgnitorType.MAIN, 1, 2, 3), Ignitor(IgnitorType.DROGUE, 5, 9, 10)])\n\n hw.set_pin_mode(2, PinModes.OUTPUT)\n hw.set_pin_mode(9, PinModes.OUTPUT)\n assert hw.analog_read(2) == Ignitor.OFF\n assert hw.analog_read(9) == Ignitor.OFF\n\n hw.set_pin_mode(1, PinModes.INPUT)\n hw.digital_write(1, True)\n assert hw.analog_read(2) == Ignitor.CONNECTED\n assert hw.analog_read(9) == Ignitor.OFF\n hw.digital_write(1, False)\n\n assert hw.analog_read(2) == Ignitor.OFF\n assert hw.analog_read(9) == Ignitor.OFF\n\n hw.set_pin_mode(5, PinModes.INPUT)\n hw.set_pin_mode(7, PinModes.INPUT)\n hw.digital_write(5, True)\n hw.digital_write(7, True)\n assert hw.analog_read(2) == Ignitor.OFF\n assert hw.analog_read(9) == Ignitor.CONNECTED\n\n def test_ignitor_fire(self):\n hw = HWSim(None, [], [Ignitor(IgnitorType.MAIN, 6, 3, 1)])\n\n hw.set_pin_mode(1, PinModes.INPUT)\n hw.set_pin_mode(3, PinModes.OUTPUT)\n hw.set_pin_mode(6, PinModes.INPUT)\n hw.digital_write(1, False) # Writing false does not fire\n assert hw.analog_read(3) == Ignitor.OFF\n hw.digital_write(6, True)\n assert hw.analog_read(3) == Ignitor.CONNECTED\n hw.digital_write(6, False)\n\n hw.digital_write(1, True) # Fire the pin\n assert hw.analog_read(3) == Ignitor.OFF\n hw.digital_write(6, True)\n assert hw.analog_read(3) == Ignitor.DISCONNECTED\n hw.digital_write(6, False)\n\n hw.digital_write(1, False) # Firing is one-way\n assert hw.analog_read(3) == Ignitor.OFF\n hw.digital_write(6, True)\n assert hw.analog_read(3) == Ignitor.DISCONNECTED\n hw.digital_write(6, False)\n\n def test_ignitor_broken(self):\n hw = HWSim(None, [], [Ignitor(IgnitorType.MAIN, 1, 2, 3, broken=True), Ignitor(IgnitorType.DROGUE, 4, 5, 6, broken=True)])\n\n hw.set_pin_mode(1, PinModes.INPUT)\n hw.set_pin_mode(2, PinModes.OUTPUT)\n hw.set_pin_mode(3, PinModes.INPUT)\n hw.set_pin_mode(4, PinModes.INPUT)\n hw.set_pin_mode(5, PinModes.OUTPUT)\n hw.set_pin_mode(6, PinModes.INPUT)\n\n assert ignitor_test(hw, 1, 2) == Ignitor.DISCONNECTED\n assert ignitor_test(hw, 4, 5) == Ignitor.DISCONNECTED\n\n hw = HWSim(None, [], [Ignitor(IgnitorType.MAIN, 1, 2, 3, broken=True), Ignitor(IgnitorType.DROGUE, 4, 5, 6, broken=False)])\n\n hw.set_pin_mode(1, PinModes.INPUT)\n hw.set_pin_mode(2, PinModes.OUTPUT)\n hw.set_pin_mode(3, PinModes.INPUT)\n hw.set_pin_mode(4, PinModes.INPUT)\n hw.set_pin_mode(5, PinModes.OUTPUT)\n hw.set_pin_mode(6, PinModes.INPUT)\n\n assert ignitor_test(hw, 1, 2) == Ignitor.DISCONNECTED\n assert ignitor_test(hw, 4, 5) == Ignitor.CONNECTED\n\n hw.digital_write(6, True)\n assert ignitor_test(hw, 4, 5) == Ignitor.DISCONNECTED\n\n def test_sensor_read(self):\n GPS_DATA = (1, 2, 3)\n BARO_DATA = (4, 5)\n\n GPS = DummySensor(SensorType.GPS, GPS_DATA)\n BARO = DummySensor(SensorType.BAROMETER, BARO_DATA)\n\n hw = HWSim(None, [GPS, BARO], [])\n\n assert hw.sensor_read(SensorType.GPS) == GPS_DATA\n assert hw.sensor_read(SensorType.BAROMETER) == BARO_DATA\n\n GPS_DATA = (11, 12, 13)\n BARO_DATA = (14, 15)\n\n GPS.set_value(GPS_DATA)\n BARO.set_value(BARO_DATA)\n\n assert hw.sensor_read(SensorType.GPS) == GPS_DATA\n assert hw.sensor_read(SensorType.BAROMETER) == BARO_DATA\n\n def test_clock(self):\n clock = Clock()\n rocket_sim = MagicMock()\n rocket_sim.get_clock = MagicMock(return_value=clock)\n\n hw = HWSim(rocket_sim, [], [])\n\n assert clock.get_time_ms() == 0\n assert clock.get_time_us() == 0\n\n assert hw.time_update(1000) == 1\n assert hw.time_update(1900) == 2\n assert hw.time_update(100) == 3\n","sub_path":"tests/test_hw_sim.py","file_name":"test_hw_sim.py","file_ext":"py","file_size_in_byte":4915,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"100756163","text":"import numpy as np\nimport matplotlib.pyplot as plt\n\n__all__ = ['Sensitivity']\n\n\nclass Sensitivity(object):\n\n def __init__(self, animal):\n \"\"\"\n Sets the sensitivity scaling for different animals.\n Sensitivity scalings are approximated as Gaussians.\n\n Parameters\n ----------\n animal : str\n The name of the animal you want to imitate. Current\n options are: human, blue tit, turkey, honeybee, pigeon,\n and house fly.\n \"\"\"\n self.animal = animal\n self.wave_x = np.linspace(300,700,1000)\n self.red_lim = 650\n self.blue_lim = 500\n\n if animal.lower() == 'human':\n self.human()\n\n elif animal.lower() == 'pigeon':\n self.pigeon()\n \n elif animal.lower() == 'honeybee':\n self.honeybee()\n\n elif animal.lower() == 'blue tit':\n self.bluetit()\n\n elif animal.lower() == 'turkey':\n self.turkey()\n\n elif animal.lower() == 'house fly':\n self.housefly()\n\n else:\n raise ValueError('Animal not implemented yet.')\n\n self.set_contributions()\n\n \n def pdf(self, x, mu, std):\n \"\"\"\n Creates Gaussian distribution for given colors.\n \n Parameters\n ----------\n x : float or np.ndarray\n mu : float\n Mean value.\n std : float\n Std value.\n \"\"\"\n fact = np.sqrt(2 * np.pi * std**2)\n exp = np.exp(-0.5 * ( (x-mu) / std)**2)\n return 1.0/fact * exp\n\n\n def set_contributions(self):\n \"\"\"\n Makes sure the appropriate wavelengths are contributing\n to the color map (e.g. removes red when the sensitivity\n function doesn't extend into red wavelengths).\n \"\"\"\n reset = np.zeros(self.mapped.shape)\n r = np.where(self.wave_x>=self.red_lim)[0]\n b = np.where(self.wave_x<=self.blue_lim)[0]\n g = np.where( (self.wave_xself.blue_lim) )[0]\n\n tot = np.nansum(self.mapped, axis=1)\n tot /= np.nanmax(tot)\n\n reset[:,0][r] = self.mapped[:,0][r]\n reset[:,1][g] = self.mapped[:,1][g]\n reset[:,2][b] = self.mapped[:,2][b]\n\n self.total_map = reset\n\n\n def plot(self):\n \"\"\"\n Plots sensitivity functions.\n \"\"\"\n for i in range(self.mapped.shape[1]):\n plt.plot(self.wave_x, self.mapped[:,i], lw=4, label='Cone {}'.format(i))\n\n plt.legend(bbox_to_anchor=(0., 1.02, 1., .102), \n loc='lower left',\n ncol=self.mapped.shape[1], mode=\"expand\", \n borderaxespad=0.)\n\n plt.xlabel('wavelength [nm]', fontsize=16)\n plt.ylabel('sensitivity', fontsize=16)\n plt.show()\n\n \n def human(self):\n \"\"\"\n Creates sensitivity distribution for humans.\n \"\"\"\n human_blue = self.pdf(self.wave_x, 420.0, 40.0)\n human_blue /= np.nanmax(human_blue)\n\n human_red = self.pdf(self.wave_x, 590, 50)\n human_red /= np.nanmax(human_red)\n \n human_green = self.pdf(self.wave_x, 550, 50)\n human_green /= np.nanmax(human_green)\n\n self.mapped = np.array([human_red, human_green, human_blue]).T\n \n\n def pigeon(self):\n \"\"\"\n Creates sensitivity distribution for pigeons.\n \"\"\"\n bird_blue = self.pdf(self.wave_x, 490.0, 20.0)\n bird_blue /= np.nanmax(bird_blue)\n\n bird_ultra_blue = self.pdf(self.wave_x, 400, 40)\n bird_ultra_blue /= np.nanmax(bird_ultra_blue)\n \n bird_blue = (bird_blue+bird_ultra_blue)/np.nanmax(bird_blue+bird_ultra_blue)\n\n bird_green = self.pdf(self.wave_x, 550, 20)\n bird_green /= np.nanmax(bird_green)\n\n bird_red = self.pdf(self.wave_x, 630, 20)\n bird_red /= np.nanmax(bird_red)\n\n self.mapped = np.array([bird_red, bird_green, bird_blue]).T\n\n\n def honeybee(self):\n \"\"\"\n Creates sensitivity distribution for honeybees.\n \"\"\"\n hb_blue = self.pdf(self.wave_x, 350.0, 30.0)\n hb_blue /= np.nanmax(hb_blue)\n\n hb_red = self.pdf(self.wave_x, 550, 40)\n hb_red /= np.nanmax(hb_red)\n \n hb_red_lower = self.pdf(self.wave_x, 400, 60.) * 30\n \n red = (hb_red+hb_red_lower)/np.nanmax(hb_red+hb_red_lower)\n \n hb_green = self.pdf(self.wave_x, 450, 30)\n hb_green /= np.nanmax(hb_green)\n \n hb_green_lower = self.pdf(self.wave_x, 370, 30) * 30\n green = (hb_green+hb_green_lower)/np.nanmax(hb_green+hb_green_lower)\n\n self.mapped = np.array([red, green, hb_blue]).T\n\n \n def bluetit(self):\n \"\"\"\n Creates sensitivity distribution for the blue tit.\n \"\"\"\n red = self.pdf(self.wave_x, 580, 40)\n red /= np.nanmax(red)\n\n green = self.pdf(self.wave_x, 500, 40)\n green /= np.nanmax(green)\n \n blue = self.pdf(self.wave_x, 420, 30)\n blue /= np.nanmax(blue)\n \n ultra = self.pdf(self.wave_x, 340, 30)\n ultra /= np.nanmax(ultra)\n \n blue = (blue+ultra)/np.nanmax(blue+ultra)\n\n self.mapped = np.array([red, green, blue]).T\n\n \n def turkey(self):\n \"\"\"\n Creates sensitivity distribution for the turkey.\n \"\"\"\n red = self.pdf(self.wave_x, 590, 40)\n red /= np.nanmax(red)\n \n green = self.pdf(self.wave_x, 530, 40)\n green /= np.nanmax(green)\n \n blue = self.pdf(self.wave_x, 470, 30)\n blue /= np.nanmax(blue)\n \n ultra = self.pdf(self.wave_x, 410, 30)\n ultra /= np.nanmax(ultra)\n \n blue = (blue+ultra)/np.nanmax(blue+ultra)\n\n self.mapped = np.array([red, green, blue]).T\n\n\n def housefly(self):\n \"\"\"\n Creates sensitivity distribution for the house fly.\n \"\"\"\n red = self.pdf(self.wave_x, 590, 20)\n red /= np.nanmax(red)\n\n green = self.pdf(self.wave_x, 500, 40)\n green /= np.nanmax(green)\n\n subgreen = self.pdf(self.wave_x, 410, 60)\n subgreen /= (np.nanmax(subgreen)*2)\n\n green = (green+subgreen)/np.nanmax(green+subgreen)\n\n blue = self.pdf(self.wave_x, 360, 30)\n blue /= np.nanmax(blue)\n\n self.mapped = np.array([red, green, blue]).T\n","sub_path":"animal_colors/animal_sensitivity.py","file_name":"animal_sensitivity.py","file_ext":"py","file_size_in_byte":6389,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"242492404","text":"'''\nCreated on July 5th, 2017\nContains Methods to load and visualize the puptputs of multi_run_hetero\n@author: Harald Ringbauer\n'''\n\n\n# from scipy.special import kv as kv # Import Bessel functions of second kind\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport cPickle as pickle # @UnusedImport\nimport os\n\n# Contains various Methods to analyze the Runs of Multirun Hetero!\n\ndef load_estimates(data_folder=\"./testfolder\", data_set_nr=1, scenario=0, suffix=\".tsv\"):\n '''Load and return estimates.\n Return Empty Sets if data not found'''\n full_path = data_folder + \"/scenario\" + str(scenario) + \"/data_set_nr_\" + str(data_set_nr).zfill(2) + suffix\n # If data non-existent return nothing:\n if not os.path.exists(full_path):\n print(\"Not found: %s\" % full_path)\n return ([], [])\n data = np.loadtxt(full_path, delimiter='\\t').astype('float64')\n params, ci_s = data[:, 0], data[:, 1:]\n return (params, ci_s)\n \ndef load_estimates_range(data_folder=\"./testfolder\", data_set_vec=[1], scenario=0, fil=True, suffix=\".tsv\"):\n '''Load Estimates for a Range of Datasets.\n fil: Whether to delete values that are missing'''\n params = np.array([load_estimates(data_folder=data_folder, scenario=scenario, data_set_nr=i, suffix=suffix)[0] for i in data_set_vec])\n cis = np.array([load_estimates(data_folder=data_folder, scenario=scenario, data_set_nr=i, suffix=suffix)[1] for i in data_set_vec])\n \n if fil == True:\n # Maybe check for missig data HERE\n lengths = np.array(map(len, params))\n inds = ~(lengths == 0)\n \n params = params[inds]\n cis = cis[inds]\n \n return params, cis\n \n \ndef plot_diff_start(data_folder=\"./testfolder\", scenarios=range(4), data_set_nr=1):\n '''Load and plot the Different Starting Value Estimates'''\n \n # Load the Data:\n params = np.array([load_estimates(data_folder=data_folder, scenario=i, data_set_nr=data_set_nr)[0] \n for i in scenarios]) # Dimension len(scenario), len(params)\n cis = np.array([load_estimates(data_folder=data_folder, scenario=i, data_set_nr=data_set_nr)[1] \n for i in scenarios]) # Dimension len(scenario), len(params), 2\n \n # First print the results:\n for i in range(np.shape(params)[1]):\n print(\"\\nParameter %i: \" % i)\n for s in scenarios:\n print(\"Scenario %.4f (%.4f , %.4f): \" % (params[s, i], cis[s, i, 0], cis[s, i, 1]))\n \n x_vec_l = np.array(scenarios) - 0.1\n x_vec_r = x_vec_l + 0.2\n x_min, x_max = min(x_vec_l), max(x_vec_r)\n #\n # plt.figure()\n _, ((ax1, ax2, ax3)) = plt.subplots(3, 1, sharex=True, figsize=(6, 6))\n c_left, c_right = \"salmon\", \"aqua\"\n cd_left, cd_right = \"crimson\", \"navy\"\n \n ax1.hlines(600, x_min, x_max, linewidth=3, color=c_left)\n ax1.hlines(1000, x_min, x_max, linewidth=3, color=c_right)\n ax1.errorbar(x_vec_l, params[:, 0], yerr=params[:, 0] - cis[:, 0, 0], fmt=\"o\", label=\"Left\", color=cd_left)\n ax1.errorbar(x_vec_r, params[:, 1], yerr=params[:, 1] - cis[:, 1, 0], fmt=\"o\", label=\"Right\", color=cd_right)\n ax1.set_ylabel(r\"$D_e$\", fontsize=18, rotation=0, labelpad=15)\n ax1.set_ylim([0, 1500])\n ax1.legend()\n # ax1.legend()\n \n ax2.hlines(0.8, x_min, x_max, linewidth=3, color=c_left)\n ax2.hlines(0.4, x_min, x_max, linewidth=3, color=c_right)\n ax2.errorbar(x_vec_l, params[:, 2], yerr=params[:, 2] - cis[:, 2, 0], fmt=\"o\", label=\"Left\", color=cd_left)\n ax2.errorbar(x_vec_r, params[:, 3], yerr=params[:, 3] - cis[:, 3, 0], fmt=\"o\", label=\"Right\", color=cd_right)\n ax2.set_ylabel(r\"$\\sigma$\", fontsize=18, rotation=0, labelpad=15)\n ax2.set_ylim([0, 1.0])\n # ax2.legend()\n \n ax3.hlines(1.0, x_min, x_max, linewidth=3, color=c_left)\n # ax3.hlines(1000, x_min, x_max, linewidth=3, color=c_right)\n ax3.errorbar(x_vec_l, params[:, 4], yerr=params[:, 4] - cis[:, 4, 0], fmt=\"o\", label=\"Left\", color=cd_left)\n # ax3.errorbar(x_vec_r, params[:, 1], yerr=params[:, 1] - cis[:, 1, 0], fmt=\"o\", label=\"Right\", color=cd_right)\n ax3.set_ylabel(r\"$\\beta$\", fontsize=18, rotation=0, labelpad=15)\n ax3.set_ylim([0, 1.1])\n # ax3.legend()\n \n\n plt.xlabel(\"Scenario\", fontsize=18)\n plt.xticks(scenarios)\n \n plt.show()\n \ndef plot_estimates_scenarios(folder=\"./output/xxx/\", scenario_nr=9, replicate_nr=20, \n sigmas=[], nr_inds=[], betas=[], suffix=\".tsv\", beta_fix=False,\n figsize=(8,8), lw=3, ylim_D=[5, 5000], ylim_sigma=[0, 1.1], ylim_b=[-0.3, 1.2],\n cd_left = \"red\", cd_right = \"blue\", cd_lefts = [\"#ff421d\", \"#b8001e\"],\n cd_rights = [\"#1700f5\", \"#8523ff\"], cs=5, ms=4, yscale_D=\"log\",\n print_res=False, title=\"\", leg_loc=\"lower left\", savepath=\"\"):\n '''Function to plot the outcome of the eight scenarios.\n beta_fix: If true do not plot estimate beta.'''\n scenarios = range(scenario_nr)\n replicates = range(replicate_nr)\n data_set_nrs = range(scenario_nr * replicate_nr)\n \n # If Only one Parameter is given, expand it.\n if len(sigmas) == 1:\n sigmas = [sigmas[0] for _ in xrange(scenario_nr)]\n if len(nr_inds) == 1:\n nr_inds = [nr_inds[0] for _ in xrange(scenario_nr)]\n if len(betas) == 1:\n betas = [betas[0] for _ in xrange(scenario_nr)]\n \n # Load the Data:\n params = [load_estimates(data_folder=folder, scenario=i, data_set_nr=j, suffix=suffix)[0] \n for i in scenarios for j in replicates] # Dimension len(scenario), len(params)\n cis = [load_estimates(data_folder=folder, scenario=i, data_set_nr=j, suffix=suffix)[1] \n for i in scenarios for j in replicates] # Dimension len(scenario), len(params), 2\n \n # Extract Indices where Dataset is actually there:\n lengths = np.array(map(len, params))\n good_inds = np.where(lengths > 0)[0] # Check\n \n # Extract Params and Cis as Numpy Array:\n params = np.array([params[i] for i in good_inds]) \n cis = np.array([cis[i] for i in good_inds])\n data_set_nrs_found = np.array(data_set_nrs)[good_inds]\n print(np.shape(cis))\n \n # First print the results:\n if print_res:\n for i in xrange(len(params[0])):\n print(\"\\nParameter %i: \" % i)\n for j in xrange(len(good_inds)):\n print(\"Dataset %i: %.4f (%.4f , %.4f): \" % (data_set_nrs_found[j], params[j, i], cis[j, i, 0], cis[j, i, 1]))\n \n # Do the plot\n scale = 1\n base_font = 18\n x_min, x_max = scale*min(data_set_nrs), scale*max(data_set_nrs)\n x_vec_l = (scale*data_set_nrs_found - 0.1)\n x_vec_r = (x_vec_l + 0.2)\n\n cd_lefts = np.tile(np.repeat(cd_lefts, replicate_nr), scenario_nr)\n cd_rights = np.tile(np.repeat(cd_rights, replicate_nr), scenario_nr)\n # Extract the right Colors:\n cd_lefts = cd_lefts[good_inds]\n cd_rights = cd_rights[good_inds]\n \n # Make the x-Vector:\n x_vec = [[scale*i * replicate_nr, scale*(i + 1) * replicate_nr] for i in xrange(scenario_nr)]\n #print(x_vec)\n\n if beta_fix:\n _, ((ax1, ax2)) = plt.subplots(2, 1, sharex=True, figsize=figsize)\n axes = (ax1, ax2)\n \n else:\n _, ((ax1, ax2, ax3)) = plt.subplots(3, 1, sharex=True, figsize=figsize)\n axes = (ax1, ax2, ax3)\n \n # Print the Data Points\n for i in xrange(len(data_set_nrs_found)):\n ax1.errorbar(x_vec_l[i], params[i, 0], yerr=params[i, 0] - cis[i, 0, 0], fmt=\"o\", color=cd_lefts[i], alpha=0.7, capsize=cs, ms=ms)\n ax1.errorbar(x_vec_r[i], params[i, 1], yerr=params[i, 1] - cis[i, 1, 0], fmt=\"o\", color=cd_rights[i], alpha=0.7, capsize=cs, ms=ms)\n ax2.errorbar(x_vec_l[i], params[i, 2], yerr=params[i, 2] - cis[i, 2, 0], fmt=\"o\", color=cd_lefts[i], alpha=0.7, capsize=cs, ms=ms)\n ax2.errorbar(x_vec_r[i], params[i, 3], yerr=params[i, 3] - cis[i, 3, 0], fmt=\"o\", color=cd_rights[i], alpha=0.7, capsize=cs, ms=ms)\n if not beta_fix:\n ax3.errorbar(x_vec_l[i], params[i, 4], yerr=params[i, 4] - cis[i, 4, 0], fmt=\"o\", color=cd_lefts[i], alpha=0.7, capsize=cs, ms=ms)\n \n # Print Ground Truth Lines:\n for i, x in enumerate(x_vec):\n ax1.hlines(nr_inds[i][0], x[0], x[1], linewidth=lw, color=cd_left)\n ax1.hlines(nr_inds[i][1], x[0], x[1], linewidth=lw, color=cd_right)\n ax2.hlines(sigmas[i][0], x[0], x[1], linewidth=lw, color=cd_left)\n ax2.hlines(sigmas[i][1], x[0], x[1], linewidth=lw, color=cd_right)\n if not beta_fix: \n ax3.hlines(betas[i], x[0], x[1], linewidth=3, color=cd_left) \n \n ax1.set_ylabel(r\"$D_e$\", fontsize=base_font+4, rotation=0, labelpad=15)\n ax1.set_ylim(ylim_D)\n ax1.set_yscale(yscale_D)\n \n ax2.set_ylabel(r\"$\\sigma$\", fontsize=base_font+4, rotation=0, labelpad=15)\n ax2.set_ylim(ylim_sigma)\n \n if not beta_fix:\n ax3.set_ylim(ylim_b)\n ax3.set_ylabel(r\"$\\gamma$\", fontsize=base_font+4, rotation=0, labelpad=15)\n \n \n xlim = [x_min-2, x_max+2]\n for ax in axes: \n ax.set_xlim(xlim)\n \n ax1.set_title(title, fontsize=base_font)\n \n ### Legend\n ax1.hlines(nr_inds[0][0], x_vec[0][0], x_vec[0][1], linewidth=3, color=cd_left, label=\"Parameter (left)\")\n ax1.hlines(nr_inds[0][1], x_vec[0][0], x_vec[0][1], linewidth=3, color=cd_right, label=\"Parameter (right)\")\n ax1.errorbar(x_vec_l[0], params[0, 0], yerr=params[0, 0] - cis[0, 0, 0], fmt=\"o\", color=cd_lefts[0], alpha=0.7, capsize=6, label=\"Estimator (left)\")\n ax1.errorbar(x_vec_r[0], params[0, 1], yerr=params[0, 1] - cis[0, 1, 0], fmt=\"o\", color=cd_rights[0], alpha=0.7, capsize=6, label=\"Estimator (right)\")\n ax1.legend(loc=leg_loc, fontsize=base_font-10)\n \n plt.xlabel(\"Scenario\", fontsize=base_font)\n plt.xticks(data_set_nrs[::replicate_nr])\n \n ### Save if Needed\n if len(savepath)>0:\n plt.savefig(savepath, bbox_inches = 'tight', pad_inches = 0, dpi=300)\n print(\"Saved figure to: \" + savepath)\n plt.show()\n \n \n############################################\n# Do the plotting. Uncomment what you need!\n\nif __name__ == \"__main__\":\n # Plots the 9 Scenarios for Different starting Values\n sigmas = [[0.8, 0.4], [0.4, 0.8], [0.5, 0.5], [0.5, 0.5], [0.4, 0.8], [0.4, 0.8], [0.4, 0.8], [0.4, 0.8], [0.8, 0.8]]\n nr_inds = [[500, 1000], [1000, 500], [40, 20], [2000, 1000], [40, 20], [1500, 1000], [20, 40], [100, 200], [100, 100]]\n betas = [1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 0.5, 0.5]\n #plot_eight_scenarios(folder=\"./hetero_runs1\", sigmas=sigmas, nr_inds=nr_inds, betas=betas, title=\"Original Run\") # Plots eight Scenarios.\n plot_eight_scenarios(folder=\"./hetero_runs_isotropic\", sigmas=sigmas, nr_inds=nr_inds, betas=betas, title=\"9 simulated scenarios\") # Plot the rerun eight Scenarios.\n #plot_eight_scenarios(folder=\"./hetero_runs_isotropicL100\", sigmas=sigmas, nr_inds=nr_inds, betas=betas, title=\"Run V2. L=100\") # Plot the rerun eight Scenarios.\n \n # Plots the Scenarios for different discretizations:\n #nr_inds = [[100, 200], ]\n #sigmas = [[0.4, 0.8], ]\n #betas = [0.5, ]\n #plot_eight_scenarios(folder=\"./var_discrete\", scenario_nr=5, sigmas=sigmas, nr_inds=nr_inds, betas=betas, title=\"Various Discretizations\") # Plots eight scenarios\n \n # Plots the reflected Scenarios\n #sigmas = [[0.8, 0.4], [0.4, 0.8], [0.8, 0.4], [0.4, 0.8], [0.4, 0.8], [0.8, 0.4]]\n #nr_inds = [[500, 1000], [1000, 500], [1000, 500], [500, 1000], [100, 200],[200, 100]]\n #betas = [1.0, 1.0, 1.0, 1.0, 0.5, 0.5]\n #plot_eight_scenarios(folder=\"./hetero_runs_symmetric\", scenario_nr=6, sigmas=sigmas, nr_inds=nr_inds, betas=betas, title=\"Three mirrored scenarios\")\n \n nr_inds = [[100, int(i * 100)] for i in [0.25, 0.5, 0.75, 1, 1.5, 2]]\n sigmas = [[0.4, 0.8] for _ in xrange(6)]\n betas = [0.5 for _ in xrange(6)]\n plot_estimates_scenarios(folder=\"./hetero_runs_var_beta\", scenario_nr=6, sigmas=sigmas, nr_inds=nr_inds, betas=betas, title=\"Increasing Beta\")\n \n \n","sub_path":"ibd_analysis/blocksim/.ipynb_checkpoints/figs_multi_hetero-checkpoint.py","file_name":"figs_multi_hetero-checkpoint.py","file_ext":"py","file_size_in_byte":12121,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"21954866","text":"\n# %%\nimport math\nimport numpy as np\nimport yaml\nimport sys\nimport my_module as mm\nimport pandas as pd\nsys.path.append('/home/milanpetrovic/my_module/src')\n\n\nCONFIG = '/home/milanpetrovic/my_module/configs/main.yaml'\nSAVE_PATH = '../../data/preproc/1_0_preproc_data'\n\n\nwith open(CONFIG) as f:\n config = yaml.safe_load(f)\n\nfiles_to_check = mm.load_files_from_folder(\n config['raw_data_path'], config['file_extension'])\n\nfor file_name, file_path in files_to_check.items():\n df = pd.read_csv(file_path)\n df = mm.interpolate_inf_values(df, interpolation_method='linear')\n\n df = mm.convert_radians_to_degrees(df, config['angle_column'])\n\n name = SAVE_PATH + '/' + config['pop_name'] + '/' + file_name\n df.to_csv(name)\n\n# %%\n\nPATH = '/home/milanpetrovic/my_module/data/preproc/1_0_preproc_data/25_03_2022/2022-03-25_11-46_fly_1.csv'\n\ndf = pd.read_csv(PATH).round(decimals=2)\ndf = df[['ANGLE', 'X (cm)', 'Y (cm)']]\ndf.reset_index(inplace=True)\n\n\ndf = df.iloc[:-1, :]\ndf = df[['X (cm)', 'Y (cm)']]\ndf.columns = ['x', 'y']\n# df1.reset_index()\n\nres = df\n\nres['x_axis_dif'] = (0 - res['x']).abs()\nres['y_axis_dif'] = (0 - res['y']).abs()\n\nres['distance'] = np.sqrt(np.square(res['x_axis_dif']) +\n np.square(res['y_axis_dif']))\n\nres['angle'] = np.sin(res['y_axis_dif']/res['distance'])\n\nres = mm.convert_radians_to_degrees(res, 'angle')\n\nchck = res.iloc[::100,:]\n# %%\nsys.path.append('/home/milanpetrovic/my_module/src')\n\n\n# ax = angles.plot.hist()\n# preproc_data(raw_data):\n# inspec_raw_data():\n# FOOS FOR DATA PRE PROCESING\n# NORMALIZE, ROUND VALUES, ETC.\n\n# %%\n","sub_path":"src/pipeline/1_0_preproc_data.py","file_name":"1_0_preproc_data.py","file_ext":"py","file_size_in_byte":1598,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"54400797","text":"# write your code here...\n# https://leetcode.com/problems/regular-expression-matching/\n\n\"\"\"\naaa\na*\n* -> dp[i-1][j-1]\n\n\na*\na\n\n0 , 1, ...*\n\ncb*\nb\n\ndp[i][j-2]\n\nif( p[i-1] == s[j-1] || p[i-1] == '.')\n dp[i][j] = dp[i-1][j-1]\nelse if(p[i-1] == '*')\n dp[i][j] = dp[i-2][j]\n if(s[j-1] == p[i-2] || p[i-1] == '.')\n {\n dp[i][j]= dp[i][j] or dp[i][j-1]\n }\n\n\n\nelse if( s[i-1] != p[j-1])\n dp[i][j] = F\n\n=T\n\n---\ndp[i][j] = if p[i-1]==s[j-1]\n \"\" a a a\n\"\" T F F F\na F T F F\n* T T\n\"\"\"\n\n# Incorrect\nclass Solution:\n def isMatch(self, s, p):\n if p == \".*\":\n return True\n n = len(s)\n m = len(p)\n dp = [[False for _ in range(n+1)] for _ in range(m+1)]\n dp[0][0] = True # empty string\n if p[0] == \".\":\n dp[1][0] = True\n for i in range(1, m+1):\n # 0th column\n if p[i-1] == \"*\":\n dp[i][0] = dp[i-2][0]\n for i in range(1, m+1):\n for j in range(1, n+1):\n if p[i-1] == s[j-1] or p[i-1] == \".\":\n dp[i][j] = dp[i-1][j-1]\n elif p[i-1] == \"*\":\n dp[i][j] = dp[i-1][j]\n if p[i-3] == s[j-1] or p[i-3] == \".\":\n dp[i][j] = dp[i][j] or dp[i-2][j]\n for i in dp: print(i)\n return dp[-1][-1]\n\n\"\"\"\n 0 1 2\n \"\" a a a\n \"\" T F F F\n0 a F T F F\n1 b F f f f\n2 * F t f f\n3 a F \n4 c F\n5 * F\n6 a F\n\n \"\" a a\n\"\" T F F\na F t f\n* T t \n\"\"\"\n\n\nclass Solution2:\n def isMatch(self, s: str, p: str) -> bool:\n form = [[False for _ in range(len(p) + 1)] for __ in range(len(s) + 1)]\n form[0][0] = True # empty\n for j in range(2, len(p) + 1):\n form[0][j] = self.check(\"\", p[j - 1], p[j - 2], False, False, form[0][j - 2])\n for i in range(1, len(s) + 1):\n for j in range(1, len(p) + 1):\n p_prev = p[j - 2] if j >= 2 else \"\"\n lefter = form[i][j - 2] if j >= 2 else False\n form[i][j] = self.check(s[i - 1], p[j - 1], p_prev, form[i - 1][j - 1],\n form[i - 1][j], lefter)\n # print(form)\n return form[-1][-1]\n\n def check(self, s: str, p: str, p_prev: str, prev: bool, up: bool, lefter: bool) -> bool:\n if s == p or p == \".\":\n return prev\n elif p == \"*\":\n if s == p_prev or p_prev == \".\":\n return lefter or up\n return lefter\n else:\n return False\n\n# s=\"aa\"\n# p = \"a*\"\ns = \"aab\"\np = \"c*a*b\"\ns = [\"aaa\", \"aab\", \"aa\"]\np = [\"ab*ac*a\", \"c*a*b\", \"a*\"]\n\nfor s1, p1 in zip(s, p):\n print(Solution().isMatch(s1, p1))","sub_path":"LeetCode/Hard/regular_expression.py","file_name":"regular_expression.py","file_ext":"py","file_size_in_byte":2698,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"311790386","text":"from urllib.request import urlopen\nfrom urllib.error import HTTPError,URLError\nfrom bs4 import BeautifulSoup\nhtml=urlopen(\"http://www.pythonscraping.com/pages/warandpeace.html\")\nbsObj=BeautifulSoup(html,\"html.parser\")\nprint(bsObj)\nnamelist=bsObj.findAll(\"span\",{\"class\":\"green\"})\nfor name in namelist:\n print(name.get_text())\nnamelist2=bsObj.findAll({\"h1\",\"h2\"})\nfor name in namelist2:\n print(name)\n\nnamelist3=bsObj.findAll(\"\",{\"id\":\"text\"})\nfor name in namelist3:\n print(name)\n print(\"\\n\\r\")\nnamelist4=bsObj.span\nprint(namelist4)\n\n","sub_path":"MorizeyaoJavaTest2/pythonMod/webScrapingWithPython/scraptest2.py","file_name":"scraptest2.py","file_ext":"py","file_size_in_byte":544,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"654270168","text":"#!/usr/bin/env python\n\nimport sys\nimport os\n\n\ndef list_dir(dirname, action_dir=None, action_dirs=None, action_files=None, check_md5=False):\n for dirname, dirnames, filenames in os.walk(dirname):\n if action_dir is not None:\n action_dir(dirname, dirnames, filenames)\n for subdirname in dirnames:\n if action_dirs is not None:\n action_dirs(dirname, subdirname)\n for filename in filenames:\n if action_files is not None:\n action_files(dirname, filename)\n\n\ndef master_file_append(dirname, filename):\n fullname = os.path.join(dirname, filename)\n try:\n filesize = os.path.getsize(fullname)\n master_files.append((filename, fullname, filesize))\n except OSError:\n pass\n\n\ndef source_file_append(dirname, filename):\n fullname = os.path.join(dirname, filename)\n try:\n filesize = os.path.getsize(fullname)\n source_files.append((filename, fullname, filesize))\n except OSError:\n pass\n\n\ndef is_files_identical_by_content(name1, name2):\n if os.path.getsize(name1) != os.path.getsize(name2):\n return False\n file1 = open(name1, 'rb')\n file2 = open(name2, 'rb')\n result = f1.read() == f2.read()\n file1.close()\n file2.close()\n return result\n\n\ndef is_files_identical_by_name_and_size(name1, fullname1, name2, fullname2):\n return (name1 == name2 and\n os.path.getsize(fullname1) != os.path.getsize(fullname2))\n\ndef is_files_identical_y_md5(name1, name2):\n if os.path.getsize(name1) != os.path.getsize(name2):\n return False\n # TODO\n return True\n\n\ndef print_match(name1, name2, print_rm, print_first, print_second):\n if print_first:\n if print_rm:\n print(\"rm %s\" % name1)\n else:\n print(name1)\n if print_second:\n if print_rm:\n print(\"rm %s\" % name2)\n else:\n print(name2)\n if print_first and print_second:\n print(\"\")\n\n\ndef usage():\n print(\"Usage: %s DIRECTORY1 DIRECTORY2 [OPTIONS]\" % sys.argv[0])\n print(\"\\t-1 --first-only --only-first print only first file (in DIRECTORY1) of two matched\")\n print(\"\\t-2 --second-only --only-second print only second file (in DIRECTORY2) of two matched\")\n print(\"\\t-f --fast --same-name-and-size is it default\")\n print(\"\\t-m --compare-md5\")\n print(\"\\t-r --rm add rm instruction to output\")\n\n\nif __name__ == '__main__':\n\n if len(sys.argv) < 3:\n usage()\n exit(0)\n \n print_directories_content = False\n \n print_first = False\n print_second = False\n\n check_fast = False\n check_md5 = False\n check_content = False\n\n print_rm = False\n\n for arg in sys.argv[3:]:\n if arg in ['-1', '--first-only', '--only-first']:\n print_first = True\n if arg in ['-2', '--second-only', '--only-second']:\n print_second = True\n if arg in ['-f', '--fast', '--same-name-and-size']:\n check_fast = True\n if arg in ['-m', '--compare-md5']:\n check_md5 = True\n if arg in ['-c', '--compare-content']:\n check_content = True\n if arg in ['-r', '--rm']:\n print_rm = True\n if arg in ['-d', '--print-directories-content']:\n print_directories_content = True\n\n if not(print_first or print_second):\n print_first = True\n print_second = True\n\n if not(check_fast or check_md5 or check_content):\n check_fast = True\n\n master_files = []\n source_files = []\n\n path_master = sys.argv[1]\n path_source = sys.argv[2]\n\n list_dir(path_master, None, None, master_file_append, check_md5)\n list_dir(path_source, None, None, source_file_append, check_md5)\n\n if print_directories_content:\n print(\"# %s\" % path_master)\n for name in master_files:\n print(name)\n print(\"\")\n print(\"# %s\" % path_source)\n for name in source_files:\n print(name)\n\n for m in master_files:\n for s in source_files:\n # do not compare file and same file\n if m[1] == s[1]:\n continue\n \n if check_fast:\n if is_files_identical_by_name_and_size(m[0], m[1], s[0], s[1]):\n print_match(m[1], s[1], print_rm, print_first, print_second)\n\n if check_content:\n if is_files_identical_by_content(m[1], s[1]):\n print_match(m[1], s[1], print_rm, print_first, print_second)\n\n if check_md5:\n if is_files_identical_by_md5(m[1], s[1], m[3], s[3]):\n print_match(m[1], s[1], print_rm, print_first, print_second)\n\n","sub_path":"find-duplicates.py","file_name":"find-duplicates.py","file_ext":"py","file_size_in_byte":4682,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"198688349","text":"def findStrobogrammatic(n):\n result = []\n hash = {'0':'0', '1':'1', '6':'9', '8':'8', '9':'6'}\n helper(result, [None]*n, 0, n-1, hash)\n \n return result\n \ndef helper(result, item, start, end, hash):\n if start > end:\n result.append(''.join(item))\n return\n \n for key in hash: \n if start == end and key in ('6','9'): # 6,9 cant be in odd middle position\n continue\n \n if start != end and start == 0 and key == '0': \n continue\n \n item[start], item[end] = key, hash[key]\n helper(result, item, start+1, end-1, hash)\n \nif __name__=='__main__':\n # strobogrammatic is it looks same upside down\n # find strings of length n that are strobogrammatic\n # time: O(5 ^(n/2))\n # space: O(n)\n print(findStrobogrammatic(5))","sub_path":"recursion/strobogrammatic2.py","file_name":"strobogrammatic2.py","file_ext":"py","file_size_in_byte":829,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"613155327","text":"from plugins import plugin, send_slack_message, startswithany\nimport json\n\n\n@plugin()\nclass cloudtrail(object):\n\tCLOUDWATCH_RULE_NAME = \"sec_alerts_cloudtrail\"\n\tCLOUDWATCH_FILTER = {\n\t\t\"EventPattern\": {\n\t\t\t\"detail-type\": [\"AWS API Call via CloudTrail\"],\n\t\t\t\"detail\": {\n\t\t\t\t\"eventSource\": [\"cloudtrail.amazonaws.com\"]\n\t\t\t}\n\t\t}\n\t}\n\n\tdef __init__(self, config):\n\t\tself.config = config\n\n\tdef match(self, event):\n\t\tif event.get(\"source\") == \"aws.cloudtrail\":\n\t\t\tif not startswithany(event['detail']['eventName'], [\"Create\", \"Lookup\"]):\n\t\t\t\treturn True\n\t\treturn False\n\n\tdef process(self, event):\n\t\ttext = \"```{}```\".format(json.dumps(event, indent=4, sort_keys=True))\n\t\tmessage = {\n\t\t\t\"channel\": self.config['SLACK_CHANNEL'],\n\t\t\t\"username\": \"Security-Otter Bot\",\n\t\t\t\"icon_url\": \"http://d.hx.io/1NeN/2oFhQrsA.png\",\n\t\t\t\"attachments\": [\n\t\t\t\t{\n\t\t\t\t\t\"fallback\": text,\n\t\t\t\t\t\"color\": \"#36a64f\",\n\t\t\t\t\t\"title\": \"AWS CloudTrail Modification - {} {} ({})\".format(event['detail']['eventName'], self.config['ACCOUNTS'][event['account']]['name'], event['account']),\n\t\t\t\t\t\"text\": text,\n\t\t\t\t\t\"mrkdwn_in\": [\"text\", \"pretext\"]\n\t\t\t\t}\n\t\t\t]\n\t\t}\n\t\tsend_slack_message(message, self.config['SLACK_WEBHOOK'])\n","sub_path":"plugins/cloudtrail.py","file_name":"cloudtrail.py","file_ext":"py","file_size_in_byte":1178,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"384532579","text":"import argparse\nimport random\nfrom glob import glob\nfrom typing import Tuple\n\nimport cv2\nimport matplotlib.pyplot as plt\nimport numpy\nimport pandas\nimport pandas as pd\nfrom PIL import Image\nfrom imblearn.over_sampling import SMOTE\nfrom pandas import DataFrame\nfrom tqdm import tqdm\nimport seaborn\n\n\ndef IOU(x, centroids):\n similarities = []\n k = len(centroids)\n for centroid in centroids:\n c_w, c_h = centroid\n w, h = x\n if c_w >= w and c_h >= h:\n similarity = w * h / (c_w * c_h)\n elif c_w >= w and c_h <= h:\n similarity = w * c_h / (w * h + (c_w - w) * c_h)\n elif c_w <= w and c_h >= h:\n similarity = c_w * h / (w * h + c_w * (c_h - h))\n else: # means both w,h are bigger than c_w and c_h respectively\n similarity = (c_w * c_h) / (w * h)\n similarities.append(similarity) # will become (k,) shape\n return numpy.array(similarities)\n\n\ndef avg_IOU(X, centroids):\n n, d = X.shape\n sum = 0.\n for i in range(X.shape[0]):\n # note IOU() will return array which contains IoU for each centroid and X[i] // slightly ineffective, but I am too lazy\n sum += max(IOU(X[i], centroids))\n return sum / n\n\n\ndef kmeans(X: numpy.ndarray, centroids: numpy.ndarray) -> Tuple[numpy.ndarray, numpy.ndarray]:\n N = X.shape[0]\n k, dim = centroids.shape\n prev_assignments = numpy.ones(N) * (-1)\n iter = 0\n\n while True:\n D = []\n iter += 1\n for i in range(N):\n d = 1 - IOU(X[i], centroids)\n D.append(d)\n D = numpy.array(D) # D.shape = (N,k)\n mean_IOU = numpy.mean(D)\n\n # assign samples to centroids\n assignments = numpy.argmin(D, axis=1)\n\n if (assignments == prev_assignments).all():\n return mean_IOU, centroids\n\n # calculate new centroids\n centroid_sums = numpy.zeros((k, dim), numpy.float)\n for i in range(N):\n centroid_sums[assignments[i]] += X[i]\n for j in range(k):\n centroids[j] = centroid_sums[j] / (numpy.sum(assignments == j))\n\n prev_assignments = assignments.copy()\n\n\ndef visualize_anchors(anchors: numpy.ndarray, visualization_width: int = 1000, visualization_height: int = 1000):\n colors = [(255, 0, 0), (255, 255, 0), (0, 255, 0), (0, 0, 255), (0, 255, 255), (255, 0, 255), (255, 100, 0),\n (0, 255, 100), (255, 255, 255), (100, 255, 55)]\n\n blank_image = numpy.zeros((visualization_height, visualization_width, 3), numpy.uint8)\n\n stride_h = 10\n stride_w = 10\n\n for i in range(len(anchors)):\n (w, h) = anchors[i]\n w = int(w * visualization_width)\n h = int(h * visualization_height)\n # print(w, h)\n left_upper_corner = (10 + i * stride_w, 10 + i * stride_h)\n right_lower_corner = (left_upper_corner[0] + w, left_upper_corner[1] + h)\n cv2.rectangle(blank_image, left_upper_corner, right_lower_corner, colors[i], thickness=3)\n\n cv2.imwrite(\"anchors-{0}.png\".format(len(anchors)), blank_image)\n\n\ndef resample_dataset(dimensions: DataFrame, resampling_method='svm') -> DataFrame:\n reproducible_seed = 42\n\n report(\"Class distribution before resampling\")\n class_statistics = dimensions[['class']].groupby('class').size()\n report(str(class_statistics))\n\n report(\"Resampling with SMOTE ({0})\".format(resampling_method))\n # See http://contrib.scikit-learn.org/imbalanced-learn/stable/auto_examples/over-sampling/plot_comparison_over_sampling.html for a comparison between different resampling methods\n smote = SMOTE(random_state=reproducible_seed, kind=resampling_method)\n X_resampled, y_resampled = smote.fit_sample(dimensions[[\"width\", \"height\"]],\n dimensions[\"class\"])\n y = DataFrame(y_resampled)\n y.columns = ['class']\n\n report(\"Class distribution after resampling\")\n report(str(y.groupby('class').size()))\n resampled_annotations = pd.concat([DataFrame(X_resampled), DataFrame(y_resampled)], axis=1) # type: DataFrame\n resampled_annotations.columns = [\"width\", \"height\", \"class\"]\n return resampled_annotations\n\n\ndef report(text):\n with open(\"dimension_clustering_protocol.txt\", \"a\") as dimension_clustering_protocol:\n print(text)\n dimension_clustering_protocol.writelines(text + \"\\n\")\n\n\ndef load_annotation_dimensions(annotations_csv_path: str):\n annotation_dimensions = pandas.read_csv(annotations_csv_path)\n seaborn.lmplot(x=\"width\", y=\"height\", hue='class', scatter_kws={\"s\": 1}, data=annotation_dimensions, legend=False,\n markers='o', fit_reg=False, palette=\"Set2\")\n # plt.show()\n plt.savefig(\"object_size_distribution.png\", dpi=300)\n return annotation_dimensions[['width', 'height']].as_matrix()\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser()\n parser.add_argument(\"--annotations_csv_path\", type=str, default=\"bounding_box_dimensions_relative.csv\",\n help=\"Path to the csv-file that holds the annotation dimensions. The csv file must contain a \"\n \"header and columns called 'width' and 'height'. It is recommended to use relative \"\n \"dimensions (width of an object / width of the entire image), unless all images have \"\n \"exactly the same size, to increase robustness.\")\n parser.add_argument(\"--visualization_width\", type=int, default=\"0\",\n help=\"Expected size of input images (only used for scaling output correctly)\")\n parser.add_argument(\"--visualization_height\", type=int, default=\"0\",\n help=\"Expected size of input images (only used for scaling output correctly)\")\n parser.add_argument(\"--maximum_number_of_clusters\", type=int, default=\"5\",\n help=\"Maximum number of clusters that should be evaluated. \"\n \"Will evaluate all integers between one and this number.\")\n\n flags, unparsed = parser.parse_known_args()\n total_number_of_clusters_to_evaluate = flags.maximum_number_of_clusters\n visualization_width, visualization_height = flags.visualization_width, flags.visualization_height\n\n if visualization_width == 0 or visualization_height == 0:\n # If not specified, load images from MUSCIMA++ dataset\n all_images = glob(\"../data/cvcmuscima_staff_removal/CvcMuscima-Distortions/ideal/**/image/*.png\",\n recursive=True)\n sizes = []\n for image_path in tqdm(all_images, desc=\"Collecting image sizes\"):\n image = Image.open(image_path)\n sizes.append(image.size)\n sizes_df = pandas.DataFrame(sizes, columns=[\"width\", \"height\"])\n visualization_width, visualization_height = sizes_df[\"width\"].mean(), sizes_df[\"height\"].mean()\n print(\"Minimum image size: {0:.0f}x{1:.0f}px\".format(sizes_df[\"width\"].min(), sizes_df[\"height\"].min()))\n print(\"Maximum image size: {0:.0f}x{1:.0f}px\".format(sizes_df[\"width\"].max(), sizes_df[\"height\"].max()))\n\n print(\"Average image size: {0:.0f}x{1:.0f}px\".format(visualization_width, visualization_height))\n\n dims = load_annotation_dimensions(flags.annotations_csv_path)\n\n statistics = []\n\n for num_clusters in tqdm(range(1, total_number_of_clusters_to_evaluate + 1), desc=\"Computing clusters\"):\n indices = [random.randrange(dims.shape[0]) for i in range(num_clusters)]\n initial_centroids = dims[indices]\n meanIntersectionOverUnion, centroids = kmeans(dims, initial_centroids)\n statistics.append((num_clusters, meanIntersectionOverUnion, centroids))\n\n grid_size = 16\n for (clusters, iou, centroids) in statistics:\n print(\"{0} clusters: {1:.4f} mean IOU\".format(clusters, iou))\n scales = []\n for c in centroids:\n print(\n \"[{0:.4f} {1:.4f}] - Ratio: {2:.4f} = {3:.0f}x{4:.0f}px scaled \"\n \"to {5:.0f}x{6:.0f} image\".format(\n c[0], c[1], c[0] / c[1], c[0] * visualization_width, c[1] * visualization_height,\n visualization_width, visualization_height))\n scales.append(c[0] * visualization_width / grid_size)\n scales.append(c[1] * visualization_height / grid_size)\n scales.sort()\n print(\"Scales relative to {0}x{0} grid: {1}\".format(grid_size, [\"{0:.2f}\".format(x) for x in scales]))\n visualize_anchors(centroids, int(visualization_width), int(visualization_height))\n","sub_path":"MusicObjectDetector/dimension_clustering/dimension_clustering.py","file_name":"dimension_clustering.py","file_ext":"py","file_size_in_byte":8521,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"337723361","text":"import asyncio\nfrom collections.abc import Mapping\n\nfrom typing import List, Dict, Tuple, Callable, Union, Any, Set, Optional, AsyncIterator\n\nfrom .element import Element\nfrom .helpers import EventHandler\nfrom .utils import to_coroutine\n\ntry:\n import vdom\nexcept ImportError:\n vdom = None\n\n\nRenderType = Tuple[List[str], Dict[str, Dict], List[str]]\n\n\nclass RenderError(Exception):\n \"\"\"An error occured while rendering element models.\"\"\"\n\n\nclass Layout:\n \"\"\"Renders the models generated by :class:`Element` objects.\"\"\"\n\n __slots__ = (\n \"_loop\",\n \"_render_semaphore\",\n \"_update_queue\",\n \"_animate_queue\",\n \"_rendering\",\n \"_root\",\n \"_state\",\n )\n\n def __init__(self, root: \"Element\", loop: asyncio.AbstractEventLoop = None):\n if loop is None:\n loop = asyncio.get_event_loop()\n if not isinstance(root, Element):\n raise TypeError(\"Expected an Element, not %r\" % root)\n self._loop = loop\n self._state: Dict[str, Dict] = {}\n self._root = root\n self._update_queue: List[Element] = []\n self._render_semaphore = asyncio.Semaphore(1, loop=loop)\n self._animate_queue: List[Callable] = []\n self._create_element_state(root.id, None)\n self._rendering = False\n self.update(root)\n\n @property\n def loop(self):\n return self._loop\n\n @property\n def root(self) -> str:\n return self._root.id\n\n async def apply(self, target: str, handler: str, data: dict):\n model_state = self._state[target]\n event_handler = model_state[\"event_handlers\"][handler]\n await event_handler(data)\n\n def animate(self, function: Callable):\n self._animate_queue.append(to_coroutine(function))\n if self._render_semaphore.locked():\n # We don't want to release more than once because\n # all changes are renderer in one go. Multiple releases\n # could cause another render even though there were no\n # no updates from the last.\n self._render_semaphore.release()\n\n def update(self, element: \"Element\"):\n self._update_queue.append(element)\n if self._render_semaphore.locked():\n # We don't want to release more than once because\n # all changes are renderer in one go. Multiple releases\n # could cause another render even though there were no\n # no updates from the last.\n self._render_semaphore.release()\n\n async def render(self) -> RenderType:\n if self._rendering:\n raise RuntimeError(\"Layout is already awaiting a render.\")\n else:\n self._rendering = True\n\n await self._render_semaphore.acquire()\n\n # current element ids\n current: Set[str] = set(self._state)\n\n callbacks = self._animate_queue[:]\n self._animate_queue.clear()\n await asyncio.gather(*[cb() for cb in callbacks])\n\n # root elements which updated\n roots: List[str] = []\n # all element updates\n new: Dict[str, Dict] = {}\n\n updates = self._update_queue[:]\n self._update_queue.clear()\n\n for element in updates:\n parent = self._state[element.id][\"parent\"]\n async for element_id, model in self._render_element(element, parent):\n new[element_id] = model\n roots.append(element.id)\n\n # all deleted element ids\n old: List[str] = list(current.difference(self._state))\n\n self._rendering = False\n\n return roots, new, old\n\n async def _render_element(\n self, element: \"Element\", parent_element_id: str\n ) -> AsyncIterator[Tuple[str, Dict]]:\n try:\n if not element.mounted():\n element.mount(self)\n\n model = await element.render()\n\n if isinstance(model, Element):\n model = {\"tagName\": \"div\", \"children\": [model]}\n\n element_id = element.id\n if self._has_element_state(element_id):\n self._reset_element_state(element_id)\n else:\n self._create_element_state(element_id, parent_element_id)\n\n async for i, m in self._render_model(model, element_id):\n yield i, m\n except Exception as error:\n raise RenderError(f\"Failed to render {element}\") from error\n\n async def _render_model(\n self, model: Mapping, element_id: str\n ) -> AsyncIterator[Tuple[str, Dict]]:\n index = 0\n to_visit: List[Union[Mapping, Element]] = [model]\n while index < len(to_visit):\n node = to_visit[index]\n if isinstance(node, Element):\n async for i, m in self._render_element(node, element_id):\n yield i, m\n elif isinstance(node, Mapping):\n if \"children\" in node:\n value = node[\"children\"]\n if isinstance(value, (list, tuple)):\n to_visit.extend(value)\n elif isinstance(value, (Mapping, Element)):\n to_visit.append(value)\n elif vdom is not None and isinstance(node, vdom.VDOM):\n to_visit.append(_from_vdom(node))\n index += 1\n yield element_id, self._load_model(model, element_id)\n\n def _load_model(self, model: Mapping, element_id: str):\n model = dict(model)\n if \"children\" in model:\n model[\"children\"] = self._load_model_children(model[\"children\"], element_id)\n if \"eventHandlers\" in model:\n model[\"eventHandlers\"] = self._load_event_handlers(\n model[\"eventHandlers\"], element_id\n )\n return model\n\n def _load_model_children(\n self, children: Union[List, Tuple], element_id: str\n ) -> List[Dict]:\n if not isinstance(children, (list, tuple)):\n children = [children]\n loaded_children = []\n for child in children:\n if isinstance(child, Mapping):\n child = {\"type\": \"obj\", \"data\": self._load_model(child, element_id)}\n elif isinstance(child, Element):\n child = {\"type\": \"ref\", \"data\": child.id}\n else:\n child = {\"type\": \"str\", \"data\": str(child)}\n loaded_children.append(child)\n return loaded_children\n\n def _load_event_handlers(\n self, handlers: Dict[str, Callable], element_id: str\n ) -> Dict[str, str]:\n event_targets = {}\n for event, handler in handlers.items():\n if not isinstance(handler, EventHandler):\n handler = EventHandler(handler, event)\n handler_specification = event_targets[element_id] = handler.serialize()\n self._state[element_id][\"event_handlers\"][handler_specification] = handler\n return event_targets\n\n def _has_element_state(self, element_id: str) -> bool:\n return element_id in self._state\n\n def _create_element_state(self, element_id: str, parent_element_id: Optional[str]):\n if parent_element_id is not None and self._has_element_state(parent_element_id):\n self._state[parent_element_id][\"inner_elements\"].add(element_id)\n self._state[element_id] = {\n \"parent\": parent_element_id,\n \"inner_elements\": set(),\n \"event_handlers\": {},\n }\n\n def _reset_element_state(self, element_id: str):\n parent_element_id = self._state[element_id][\"parent\"]\n self._delete_element_state(element_id)\n self._create_element_state(element_id, parent_element_id)\n\n def _delete_element_state(self, element_id: str):\n old = self._state.pop(element_id)\n parent_element_id = old[\"parent\"]\n if self._has_element_state(parent_element_id):\n self._state[parent_element_id][\"inner_elements\"].remove(element_id)\n for i in old[\"inner_elements\"]:\n self._delete_element_state(i)\n\n\ndef _from_vdom(node: Any):\n data = {\n \"tagName\": node.tag_name,\n \"children\": node.children,\n \"attributes\": node.attributes,\n }\n if node.style:\n data[\"attributes\"][\"style\"] = node.style\n if node.event_handlers:\n data[\"eventHandlers\"] = node.event_handlers\n if node.key:\n data[\"key\"] = node.key\n return data\n","sub_path":"src/py/idom/layout.py","file_name":"layout.py","file_ext":"py","file_size_in_byte":8354,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"181277067","text":"from file_reader import FileReader\nfrom decoder import Decoder\n\ndef main():\n file_name = \"data/to_decipher.txt\"\n crypto = FileReader().readFile(file_name)\n decoder = Decoder(crypto)\n decrypted = decoder.decode()\n return 0\n\nif __name__ == \"__main__\":\n main()","sub_path":"Lista2/Zad2/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":275,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"47780588","text":"from typing import List\n\nclass Solution:\n def gameOfLife(self, board: List[List[int]]) -> None:\n \"\"\"\n Do not return anything, modify board in-place instead.\n \"\"\"\n def state(i, j):\n if board[i][j] == 0 or board[i][j] == 2:\n return 0\n else:\n return 1\n def around(i, j):\n live = 0\n if i > 0:\n if j > 0:\n live += state(i-1, j-1)\n live += state(i-1, j)\n if j < len(board[0])-1:\n live += state(i-1, j+1)\n if i < len(board)-1:\n if j > 0:\n live += state(i+1, j-1)\n live += state(i+1, j)\n if j < len(board[0])-1:\n live += state(i+1, j+1)\n if j > 0:\n live += state(i, j-1)\n if j < len(board[0])-1:\n live += state(i, j+1)\n return live\n for i in range(len(board)):\n for j in range(len(board[0])):\n live = around(i, j)\n if board[i][j] == 0 and live == 3:\n board[i][j] = 2\n elif board[i][j] == 1 and not 2 <= live <= 3:\n board[i][j] = 3\n for i in range(len(board)):\n for j in range(len(board[0])):\n if board[i][j] == 2:\n board[i][j] = 1\n elif board[i][j] == 3:\n board[i][j] = 0\n","sub_path":"contest/2020/12/game_of_life.py","file_name":"game_of_life.py","file_ext":"py","file_size_in_byte":1503,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"237970051","text":"from graphviz import Digraph\nfrom graphviz import Graph\nfrom bottle import route, run, template, post, request\nimport pymysql\nimport webbrowser\nfrom random import randint\n\ndef FieldAuthorGraph(filename, authorName, targets, isPrintAuthor):\n global search\n\n institution = []\n insAu = []\n field = []\n fieldAu = []\n\n\n #include author when author is selected\n if isPrintAuthor == '1':\n \n #access author's field\n query = \"SELECT * FROM `fields` WHERE `scholarID` = '\" + search +\"'\"\n cur.execute(query)\n\n for row in cur:\n\n repeat = 0\n\n #check when there are repeated field name\n for fie in field:\n if fie == row[1]:\n repeat = 1\n if repeat == 0:\n field.append(row[1])\n\n #check if repeated field name and author name\n repeat = 0\n for fA in fieldAu:\n if authorName == fA[0] and row[1] == fA[1]:\n repeat = 1\n if repeat == 0:\n fieldAu.append((authorName, row[1]))\n\n #access author's institution \n query = \"SELECT * FROM `institutions` WHERE `scholarID` = '\" + search +\"'\"\n cur.execute(query)\n\n for row in cur:\n repeat = 0\n \n #parse the institution name and save in \"uni\"\n parse = row[1].split(\", \")\n for parsed in parse:\n if \"University\" in parsed:\n uni = parsed\n break\n else:\n uni = row[1]\n \n \n #check when there are repeated institution name\n for ins in institution:\n if ins == uni:\n repeat = 1\n break\n if repeat == 0:\n institution.append(uni)\n\n #check if repeated institution name and author name\n repeat = 0\n for iA in insAu:\n if authorName == iA[0] and uni == iA[1]:\n repeat = 1\n break\n if repeat == 0:\n insAu.append((authorName, uni))\n \n #access fields and scholars relationship\n for element in targets:\n query = \"SELECT * FROM `fields` WHERE `scholarID` = '\" + element[1] +\"'\"\n cur.execute(query)\n\n\n \n for row in cur:\n repeat = 0\n\n #check when there are repeated field name\n for fie in field:\n if fie == row[1]:\n repeat = 1\n if repeat == 0:\n field.append(row[1])\n\n #check if repeated field name and author name\n repeat = 0\n for fA in fieldAu:\n if element[0] == fA[0] and row[1] == fA[1]:\n repeat = 1\n if repeat == 0:\n #find author's name\n fieldAu.append((element[0], row[1]))\n \n\n\n #access institutions and scholars relationship\n for element in targets:\n query = \"SELECT * FROM `institutions` WHERE `scholarID` = '\" + element[1] +\"'\"\n cur.execute(query)\n\n for row in cur:\n repeat = 0\n \n #parse the institution name and save in \"uni\"\n parse = row[1].split(\", \")\n for parsed in parse:\n if \"University\" in parsed:\n uni = parsed\n break\n else:\n uni = row[1]\n \n \n #check when there are repeated institution name\n for ins in institution:\n if ins == uni:\n repeat = 1\n break\n if repeat == 0:\n institution.append(uni)\n\n #check if repeated institution name and author name\n repeat = 0\n for iA in insAu:\n if element[0] == iA[0] and uni == iA[1]:\n repeat = 1\n break\n if repeat == 0:\n insAu.append((element[0], uni))\n\n \n \n\n g = Graph('G')\n colorAu= []\n cCluster = []\n clusterCounter = 0\n colorNum = 1\n \n #construct multiple clusters and construct its nodes\n for ins in institution:\n cCluster.append(Graph(\"cluster_\" + str(clusterCounter)))\n cCluster[clusterCounter].body.append('style=filled')\n cCluster[clusterCounter].body.append(\"color=lightgrey\")\n cCluster[clusterCounter].body.append('label = \"' + ins +'\"')\n cCluster[clusterCounter].node_attr.update(style=\"filled\")\n\n for iA in insAu:\n #special case for the person it is searching for\n if ins == iA[1] and iA[0] == authorName:\n cCluster[clusterCounter].node(authorName, color = \"cyan\")\n colorAu.append((authorName, \"cyan\"))\n \n elif ins == iA[1]:\n cCluster[clusterCounter].node(iA[0], colorscheme=\"paired12\",color = str(colorNum)) \n colorAu.append((iA[0], colorNum)) #give each author a color\n #control colorNum\n colorNum += 1\n if colorNum >= 13:\n colorNum = 1\n clusterCounter += 1\n \n\n fieldCluster = Graph(\"cluster_\" + str(clusterCounter+1))\n fieldCluster.body.append('style=filled')\n fieldCluster.body.append(\"color=gray72\")\n fieldCluster.body.append('label = \"Field of Study\"')\n fieldCluster.node_attr.update(style=\"filled\")\n for fie in field:\n for fA in fieldAu:\n if fA[0] == authorName and fA[1] == fie:\n fieldCluster.node(fie, color=\"cyan\")\n else:\n fieldCluster.node(fie)\n\n\n\n for x in range(0, clusterCounter):\n g.subgraph(cCluster[x])\n g.subgraph(fieldCluster)\n\n for fA in fieldAu:\n #find the color of that author\n for cA in colorAu:\n if fA[0] == cA[0]:\n g.edge(fA[0],fA[1], colorscheme=\"paired12\", color = str(cA[1]), penwidth=\"2\") #create edge\n\n g.body.append('ratio = compress')\n g.body.append('size = \"8,30\"')\n g.body.append(' rankdir=\"LR\"')\n g.body.append('splines=line')\n #g.edge_attr.update(style='filled', color='green')\n g.format = \"svg\"\n g.render(\"img/\"+filename)\n\n\n\n\n\ndef CoAuGraph(filename, targets):\n coAuthorR = []\n coAuthorRLinks = []\n institution = []\n insAu = []\n \n #access other source author and coauthors relationship\n for element in targets:\n query = \"SELECT * FROM `connections` WHERE `sourceScholarID` = '\" + element[1] +\"' AND (\"\n idx = 0\n for target in targets:\n if idx == 0:\n query += \"targetScholarID = '\" + target[1] +\"'\"\n else:\n query += \" OR targetScholarID = '\" + target[1] +\"'\"\n idx += 1\n query += \")\"\n cur.execute(query)\n\n for row in cur:\n #change id into name\n for target in targets:\n if target[1] == row[0]:\n rowZero = target[0]\n if target[1] == row[1]:\n rowOne = target[0]\n \n #avoid repeated edges\n repeat = 0\n for coA in coAuthorR:\n if (coA[0] == rowOne and coA[1] == rowZero) or (coA[0] == rowZero and coA[1] == rowOne):\n repeat = 1\n \n\n #if not repeated then insert the relationship\n if repeat == 0:\n \n coAuthorR.append((rowZero, rowOne))\n\n\n #access institutions and scholars relationship\n for target in targets:\n query = \"SELECT * FROM `institutions` WHERE `scholarID` = '\" + target[1] +\"'\"\n cur.execute(query)\n\n for row in cur:\n repeat = 0\n\n #parse the institution name and save in \"uni\"\n parse = row[1].split(\", \")\n for parsed in parse:\n if \"University\" in parsed:\n uni = parsed\n break\n else:\n uni = row[1]\n \n #check when there are repeated institution name\n for ins in institution:\n if ins == uni:\n repeat = 1\n break\n if repeat == 0:\n institution.append(uni)\n\n #check if repeated institution name and author name\n repeat = 0\n for iA in insAu:\n if target[0] == iA[0] and uni == iA[1]:\n repeat = 1\n break\n if repeat == 0:\n insAu.append((target[0], uni))\n\n\n g = Graph('G')\n\n \n cCluster = []\n clusterCounter = 0\n \n #construct multiple clusters and construct its nodes\n for ins in institution:\n cCluster.append(Graph(\"cluster_\" + str(clusterCounter)))\n cCluster[clusterCounter].body.append('style=filled')\n cCluster[clusterCounter].body.append(\"color=lightgrey\")\n cCluster[clusterCounter].body.append('label = \"' + ins +'\"')\n cCluster[clusterCounter].node_attr.update(style=\"filled\")\n\n for iA in insAu:\n if ins == iA[1]:\n cCluster[clusterCounter].node(iA[0], fontsize = \"13\") \n clusterCounter += 1\n\n\n aCluster = Graph(\"cluster_\"+ str(clusterCounter+1))\n aCluster.body.append('style=filled')\n aCluster.body.append(\"color=orange\")\n aCluster.body.append('label = \"Author\"')\n aCluster.node(search)\n aCluster.node_attr.update(style=\"filled\")\n\n\n for num in range(0,clusterCounter):\n g.subgraph(cCluster[num])\n\n\n for cAR in coAuthorR:\n g.edge(cAR[0], cAR[1], penwidth=\"1.7e\")\n\n g.body.append('ratio = compress')\n g.body.append('size = \"13,30\"')\n g.body.append(' rankdir=\"BT\"')\n g.body.append('splines=line')\n #g.body.append('nodesep=\"0.3\"')\n\n\n g.format = \"svg\"\n g.render(\"img/\"+filename)\n\n\nhtmlFileName = \"dataviz\"\n\n#connect to mysql\ntry:\n print(\"Connecting to mySQL.....\")\n conn = pymysql.connect(user=\"root\", passwd=\"\", host=\"127.0.0.1\", port=3306, database=\"googlescholardb\")\n print(\"Connection established!\")\nexcept:\n print(\"Connection Failed!\")\n\n\n#Name going to be search\nsearch = \"G0yAJAw\"\n\ncur = conn.cursor()\ncur.execute(\"SELECT * FROM `connections` WHERE `sourceScholarID` = '\" + search +\"'\")\n\n#Check if there is any result\nresult = 0\nfor row in cur:\n result += 1\n\n\n\nif result > 0:\n #construct sources scholar first\n cur.execute(\"SELECT * FROM `connections` WHERE `sourceScholarID` = '\" + search +\"'\")\n targetsLinks = []\n targets = []\n\n\n for row in cur:\n repeat = 0\n for targetLink in targetsLinks:\n if targetLink == row[1]:\n repeat = 1\n\n if repeat == 0:\n targetsLinks.append(row[1])\n\n #search their name in database\n cur.execute(\"SELECT * FROM `profile` WHERE `authorID` = '\" + search +\"'\")\n\n #author first\n for row in cur:\n authorName = row[0]\n break\n\n #coauthor next\n for targetLink in targetsLinks:\n cur.execute(\"SELECT * FROM `profile` WHERE `authorID` = '\" + targetLink +\"'\")\n for row in cur:\n targets.append((row[0], targetLink))\n \n\n \n\n @route('/hello/creategraph')\n def index():\n authorHTML = '' + authorName\n coauthorHTML = ''\n target_counter = 0\n for target in targets:\n if target_counter == 0:\n coauthorHTML += '' + target[0] + ''\n else:\n coauthorHTML += '' + target[0] + ''\n target_counter += 1\n return template(\"html/\" + htmlFileName + \".html\", authorHTML=authorHTML, coauthorHTML=coauthorHTML, target_counter=target_counter)\n\n \n\n @post('/hello/graph')\n def getGraphData():\n option = request.forms.get(\"graphRelation\")\n authorOption = request.forms.get(\"authorBox\")\n coauthorOptions = request.forms.getlist(\"coauthorBox\")\n\n #check what target schlors are selected and put them into selectedSchlors\n selectedSchlors = []\n for coauthorOption in coauthorOptions:\n selectedSchlors.append((targets[int(coauthorOption)][0], targets[int(coauthorOption)][1]))\n\n #set authorOption to zero when it is empty\n if authorOption == None:\n authorOption = 0\n\n \n if option == \"relation1\":\n filename = \"coauthor_re\" + str(randint(0,10000))\n \n #generate graph\n CoAuGraph(filename, selectedSchlors)\n \n if option == \"relation2\":\n filename = \"field_coauthor\" + str(randint(0,10000))\n\n #generate graph\n FieldAuthorGraph(filename, authorName, targets, authorOption)\n\n return template(\"img/\" + filename + \".svg\")\n\n\n\n run(host='localhost', port=8080)\n\n cur.close()\n conn.close() \n\nelse:\n print (\"No Results Found On DB!!\")\n","sub_path":"Website/graph_select_generate.py","file_name":"graph_select_generate.py","file_ext":"py","file_size_in_byte":13298,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"456075027","text":"# Author: Olivia Oddo | olivia.oddo@gmail.com\n# Date: April 30, 2017\n# Description: This file creates 50 general users.\n\nfrom django.contrib.auth import get_user_model\nfrom django.utils import timezone\nfrom accounts.models import Salon, Stylist, User, SalonOwner, SalonHours\nfrom django.template.defaultfilters import slugify\nimport random\n\n\nfirst_names = ['Donna', 'Darren', 'Shannon', 'Esther', 'Linda', 'Martha', 'Devin', 'Saul', \n\t'Perry', 'Eric', 'Samuel', 'Jennifer', 'Amy', 'Della', 'Courtney', 'Julian', 'Erin', 'Gordon',\n\t'Nellie', 'Sara', 'Juanita', 'Barbara', 'Mercedes', 'Christine', 'Jacqueline', 'Shirley', 'Helen',\n\t'Rick', 'Aubrey', 'Blanca', 'Sherri', 'Angel', 'Amber', 'Janice', 'Mary', 'Robin', 'Marvin', 'Brandon',\n\t'Frances', 'Gail', 'Cassandra', 'Jane', 'Patricia', 'Helen', 'Patricia', 'Jeremiah', 'Paul', 'Meghan', 'Marta', 'Dorothy']\n\n\nlast_names =['Rhodes','Graves','Wilkins','Neal','Lewis','Nunez','Carr','Burton','Norris','Cain','Reid','Bowen',\n\t'Mcbride','Cohen','Farmer','Erickson','Davidson','Bishop','Carson','Lamb','Powers','Walker','Morgan','Hart','Harrison',\n\t'Webb','Sparks','Obrien','Wolfe','Olson','Berry','Reeves','Sims','Burke','Martinez','Bryan','Harper','Crawford','Moore',\n\t'Holmes','Wheeler','Parks','Boyd','Flores','Cox','Thornton','Griffith','Alvarado','Bell','Steele']\n\n\nfor i in range(50):\n #generate random indexes in order to get random names\n f_name_index = random.randint(0, len(first_names)-1)\n l_name_index = random.randint(0, len(last_names)-1)\n user = User(first_name=first_names[f_name_index], \n last_name=last_names[l_name_index],\n username=first_names[f_name_index]+last_names[l_name_index],\n email=first_names[f_name_index]+last_names[l_name_index]+'@hairconnect.com',\n password='pbkdf2_sha256$30000$3jG8MhyHuJio$O0lZR7i/2VQfMH+oQz/8rZuvOAPFglu5B+bk7jsqfjc=',\n is_active=True, \n is_staff=False,\n is_superuser=False,\n date_joined = timezone.now(),\n user_type='general_user',\n\n )\n user.save()\n #delete the names from the list to ensure uniqueness\n del first_names[f_name_index]\n del last_names[l_name_index]","sub_path":"hairconnect/create_genuser.py","file_name":"create_genuser.py","file_ext":"py","file_size_in_byte":2239,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"121986237","text":"\n\"\"\"\nStrategy name: 计算纯多头策略和市场中性策略的夏普比率, 计算最大挫跌和最长挫跌期\nDate: 2017/10/29\nAuthor: Michael Hsia\n\"\"\"\nimport datetime\nfrom matplotlib.dates import date2num\nimport tushare as ts\nimport numpy as np\nimport matplotlib.pyplot as plt\n\n\"\"\"\nTips:\n0. 重要指标:夏普比率,跌挫\n1. 任何夏普比率低于1的策略都不适合单独使用\n2. 几乎每月都实现盈利的策略,其年化夏普比率通常都大于2\n3. 几乎每天都盈利的策略,其年华夏普比率通常都大于3\n4. 应当去寻找被大多数机构投资者忽略的策略。例如:交易频繁而容量很低的策略,每天只交易少数股票的策略\n\nTerms:\n滑价\n存活偏差\n分拆及股息调整后的历史数据库\n\"\"\"\n\"\"\"\nTips:\n1. 继续使用多空市场中性策略,来计算最大挫跌和最长挫跌期\n2. 第一步:在每日收盘时计算\"高水位线\"。即到这一天为止策略的最大收益率\n3. 用累计收益率曲线和净值曲线计算高水位线是一样的。因为净值 = 初始投资额 * (1 + 累计收益率)\n\n\nTerms:\n挫跌\n最大挫跌\n最长挫跌期\n\"\"\"\n\n# Test whether the mean daily returns from Shanghai Index is zero:\ndef ts2mpf_all(quotes):\n\t_quoteList = []\n\n\t# Iterate over DataFrame rows as (index, Series) pairs.\n\tfor dates, row in quotes.iterrows():\n\t\t# 将时间转换为数字\n\t\tdate_time = datetime.datetime.strptime(dates, \"%Y-%m-%d\")\n\t\tt = date2num(date_time)\n\t\topened, high, closed, low = row[:4]\n\t\tdata = (t, opened, high, low, closed)\n\t\t_quoteList.append(data)\n\n\treturn _quoteList\n\n\ndef ts2mpf_dohcl(quotes):\n\t_dates = []\n\t_opens = []\n\t_closes = []\n\t_highs = []\n\t_lows = []\n\t# Iterate over DataFrame rows as (index, Series) pairs.\n\tfor date, row in quotes.iterrows():\n\t\t# 将时间转换为数字\n\t\tdate_time = datetime.datetime.strptime(date, \"%Y-%m-%d\")\n\t\tt = date2num(date_time)\n\t\topened, high, closed, low = row[:4]\n\t\t_dates.append(t)\n\t\t_opens.append(opened)\n\t\t_highs.append(high)\n\t\t_closes.append(closed)\n\t\t_lows.append(low)\n\n\treturn _dates, _opens, _highs, _closes, _lows\n\n\ndef calculateMaxDD(cumRet):\n\t# 在累计收益率的基础上计算最大挫跌和最长挫跌期\n\t# 将高水位线初始化为0, 挫跌初始化为0, 最长挫跌期初始化为0\n\thighWaterMark = np.zeros(np.size(cumRet))\n\tdrawDown = np.zeros(np.size(cumRet))\n\tdrawDownDuration = np.zeros(np.size(cumRet))\n\n\tfor t in range(1, len(cumRet)):\n\t\thighWaterMark[t] = max(highWaterMark[t - 1], cumRet[t])\n\t\t# 计算每日挫跌(相对于高水位)\n\t\tdrawDown[t] = (1 + highWaterMark[t]) / (1 + cumRet[t]) - 1\n\n\t\tif drawDown[t] == 0:\n\t\t\tdrawDownDuration[t] = 0\n\t\telse:\n\t\t\tdrawDownDuration[t] = drawDownDuration[t - 1] + 1\n\n\tx = [value for value in range(len(cumRet))]\n\ty = cumRet\n\n\tplt.title(u'300018\\'s MaxDD and MaxDDD')\n\tplt.xlabel(u'Number of days')\n\tplt.ylabel(u'Accumulated return (%)')\n\n\tplt.plot(x, y)\n\tplt.show()\n\n\t# return maxDD, maxDDD\n\treturn max(drawDown), max(drawDownDuration)\n\n\n\"\"\"\nMain function\nDescription: \n\"\"\"\ndef _main():\n\t# FIXME: 最后存货偏差的没找到。像停牌很久的啊,没资料的啊,都没有过滤掉\n\tmyTicker = \"300018\"\n\tbegDate = datetime.date(2017, 5, 1)\n\tendDate = datetime.date.today()\n\n\tquote = ts.get_hist_data(myTicker, start=begDate.__str__(), end=endDate.__str__())\n\tquote = quote.sort_index(axis='index')\n\n\tif len(quote) == 0:\n\t\tprint(\"Found no data\")\n\t\traise SystemExit\n\n\tdates, opens, highs, closes, lows = ts2mpf_dohcl(quote)\n\n\tdates = np.array(dates)\n\topens = np.array(opens)\n\thighs = np.array(highs)\n\tcloses = np.array(closes)\n\tlows = np.array(lows)\n\n\tdailyRet = (closes[2: ] - closes[:-2] ) / closes[:-2]\n\t# 假设无风险利率是0.04, 每年252个交易日, 计算超额收益率\n\texcessRet = dailyRet - 0.04 / 252\n\n\tsharpeRatio = np.sqrt(252) * np.mean(excessRet) / np.std(excessRet)\n\n\tprint(\"The sharpe ratio of stock {} is: {}\".format(myTicker, sharpeRatio))\n\tprint(\"Adding market neutral strategy short side...\")\n\n\tshortTicker = \"159915\"\n\tshortQuote = ts.get_hist_data(shortTicker, start=begDate.__str__(), end=endDate.__str__())\n\tshortQuote = shortQuote.sort_index(axis='index')\n\n\tif len(shortQuote) == 0:\n\t\tprint(\"Found no data\")\n\t\traise SystemExit\n\n\tshortDates, shortOpens, shortHighs, shortCloses, shortLows = ts2mpf_dohcl(shortQuote)\n\n\tshortDates = np.array(shortDates)\n\tshortOpens = np.array(shortOpens)\n\tshortHighs = np.array(shortHighs)\n\tshortCloses = np.array(shortCloses)\n\tshortLows = np.array(shortLows)\n\n\tdailyEntRet = (shortCloses[2:] - shortCloses[:-2]) / shortCloses[:-2]\n\n\t# 日净收益率 (除以2是因为使用了两倍的资金)\n\tnetRet = (dailyRet - dailyEntRet) / 2\n\n\tnetSharpeRatio = np.sqrt(252) * np.mean(netRet) / np.std(netRet)\n\n\tprint(\"Then the neutral strategy sharpe ratio is: {}\".format(netSharpeRatio))\n\n\tcumRet = np.cumprod(1 + netRet) - 1\n\tmaxDD, maxDDD = calculateMaxDD(cumRet)\n\tprint(\"MaxDD: {}, MaxDDD: {}\".format(maxDD, maxDDD))\n\tprint(\"============================\")\n\n\nif __name__ == \"__main__\":\n\t_main()\n","sub_path":"quantitativeStrategy/strategySharpeRatio.py","file_name":"strategySharpeRatio.py","file_ext":"py","file_size_in_byte":5004,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"59973151","text":"\n\n\nfrom reproduction.seqence_labelling.chinese_ner.data.ChineseNER import ChineseNERLoader\nfrom fastNLP.embeddings import StaticEmbedding\n\nfrom torch import nn\nimport torch\nfrom fastNLP.embeddings.utils import get_embeddings\nfrom fastNLP.modules import LSTM\nfrom fastNLP.modules import ConditionalRandomField\nfrom fastNLP.modules import allowed_transitions\nimport torch.nn.functional as F\nfrom fastNLP import seq_len_to_mask\nfrom fastNLP.core.const import Const as C\nfrom fastNLP import SpanFPreRecMetric, Trainer\nfrom fastNLP import cache_results\n\nclass CNBiLSTMCRFNER(nn.Module):\n def __init__(self, char_embed, num_classes, bigram_embed=None, trigram_embed=None, num_layers=1, hidden_size=100,\n dropout=0.5, target_vocab=None, encoding_type=None):\n super().__init__()\n\n self.char_embed = get_embeddings(char_embed)\n embed_size = self.char_embed.embedding_dim\n if bigram_embed:\n self.bigram_embed = get_embeddings(bigram_embed)\n embed_size += self.bigram_embed.embedding_dim\n if trigram_embed:\n self.trigram_ebmbed = get_embeddings(trigram_embed)\n embed_size += self.bigram_embed.embedding_dim\n\n if num_layers>1:\n self.lstm = LSTM(embed_size, num_layers=num_layers, hidden_size=hidden_size//2, bidirectional=True,\n batch_first=True, dropout=dropout)\n else:\n self.lstm = LSTM(embed_size, num_layers=num_layers, hidden_size=hidden_size//2, bidirectional=True,\n batch_first=True)\n\n self.dropout = nn.Dropout(dropout)\n self.fc = nn.Linear(hidden_size, num_classes)\n\n trans = None\n if target_vocab is not None and encoding_type is not None:\n trans = allowed_transitions(target_vocab.idx2word, encoding_type=encoding_type, include_start_end=True)\n\n self.crf = ConditionalRandomField(num_classes, include_start_end_trans=True, allowed_transitions=trans)\n\n def _forward(self, chars, bigrams=None, trigrams=None, seq_len=None, target=None):\n chars = self.char_embed(chars)\n if hasattr(self, 'bigram_embed'):\n bigrams = self.bigram_embed(bigrams)\n chars = torch.cat((chars, bigrams), dim=-1)\n if hasattr(self, 'trigram_embed'):\n trigrams = self.trigram_embed(trigrams)\n chars = torch.cat((chars, trigrams), dim=-1)\n feats, _ = self.lstm(chars, seq_len=seq_len)\n feats = self.fc(feats)\n feats = self.dropout(feats)\n logits = F.log_softmax(feats, dim=-1)\n mask = seq_len_to_mask(seq_len)\n if target is None:\n pred, _ = self.crf.viterbi_decode(logits, mask)\n return {C.OUTPUT: pred}\n else:\n loss = self.crf(logits, target, mask).mean()\n return {C.LOSS:loss}\n\n def forward(self, chars, target, bigrams=None, trigrams=None, seq_len=None):\n return self._forward(chars, bigrams, trigrams, seq_len, target)\n\n def predict(self, chars, seq_len=None, bigrams=None, trigrams=None):\n return self._forward(chars, bigrams, trigrams, seq_len)\n\n# data_bundle = pickle.load(open('caches/msra.pkl', 'rb'))\n@cache_results('caches/msra.pkl', _refresh=True)\ndef get_data():\n data_bundle = ChineseNERLoader().process('MSRA-NER/', bigrams=True)\n char_embed = StaticEmbedding(data_bundle.vocabs['chars'],\n model_dir_or_name='cn-char')\n bigram_embed = StaticEmbedding(data_bundle.vocabs['bigrams'],\n model_dir_or_name='cn-bigram')\n return data_bundle, char_embed, bigram_embed\ndata_bundle, char_embed, bigram_embed = get_data()\nprint(data_bundle)\n# exit(0)\ndata_bundle.datasets['train'].set_input('target')\ndata_bundle.datasets['dev'].set_input('target')\nmodel = CNBiLSTMCRFNER(char_embed, num_classes=len(data_bundle.vocabs['target']), bigram_embed=bigram_embed)\n\nTrainer(data_bundle.datasets['train'], model, batch_size=640,\n metrics=SpanFPreRecMetric(data_bundle.vocabs['target'], encoding_type='bioes'),\n num_workers=2, dev_data=data_bundle. datasets['dev'], device=3).train()\n\n","sub_path":"reproduction/seqence_labelling/chinese_ner/train_cn_ner.py","file_name":"train_cn_ner.py","file_ext":"py","file_size_in_byte":4151,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"288088216","text":"# usage: file updates...\n\nimport matplotlib\nmatplotlib.use('Agg')\nimport matplotlib.pyplot as plt\nfrom matplotlib.collections import LineCollection\nimport matplotlib.pyplot as plt\nfrom matplotlib import cm\nfrom matplotlib.colors import ListedColormap, LinearSegmentedColormap\n\nimport sys\nimport os\nimport pandas as pd\nimport numpy as np\nfrom keyname import keyname as kn\nfrom fileshash import fileshash as fsh\nimport h5py\nimport colorsys\nfrom tqdm import tqdm\nfrom joblib import delayed, Parallel\nfrom collections import defaultdict\n\nmatplotlib.rcParams['pdf.fonttype'] = 42\n\nfilename = sys.argv[1]\nupdates = [int(v) for v in sys.argv[2:]]\n\nNONE = 0\nP_PARENT = -3\nP_CHILD = -4\n\ndef ColorMap(val):\n return (\n (1.0, 0.0, 0.0) if val == P_PARENT else\n (1.0, 1.0, 0.0) if val == P_CHILD else\n (val, val, val)\n )\n\ndef RenderTriangles(\n right_val,\n left_val,\n bottom_val,\n top_val,\n live_val,\n radius=21\n ):\n\n top = ColorMap(top_val)\n bottom = ColorMap(bottom_val)\n left = ColorMap(left_val)\n right = ColorMap(right_val)\n\n return np.array([\n [top] * (radius * 2)\n ] + [\n [left]\n + [left] * idx\n + [top]\n + [top] * (2 * (radius - idx) - 3)\n + [right]\n + [right] * idx\n for idx in range(radius - 1)\n ] + [\n [left] + [left] * (radius - 1) + [top] + [right] * (radius - 1)\n ] + [\n [left]\n + [left] * (radius - idx - 1)\n + [bottom]\n + [bottom] * (2 * idx - 1)\n + [right]\n + [right] * (radius - idx - 1)\n for idx in range(1, radius)\n ]) if live_val else np.full((radius*2, radius*2, 3), 0.0)\n\n\ndef RenderAndSave(upd, filename):\n\n file = h5py.File(filename, 'r')\n nlev = int(file.attrs.get('NLEV'))\n\n own = np.array(file['Index']['own']).flatten()\n dirs = {\n 'top' : np.array(file['Index']['dir_0']).flatten(),\n 'bottom' : np.array(file['Index']['dir_1']).flatten(),\n 'left' : np.array(file['Index']['dir_3']).flatten(),\n 'right' : np.array(file['Index']['dir_2']).flatten(),\n }\n\n chans = [\n np.array(file['Channel']['lev_'+str(lev)]['upd_'+str(upd)]).flatten()\n for lev in range(nlev)\n ]\n cage = np.array(file['CellAge']['upd_'+str(upd)]).flatten()\n pvch = np.array(file['PrevChan']['upd_'+str(upd)]).flatten()\n ppos = np.array(file['ParentPos']['upd_'+str(upd)]).flatten()\n\n live = np.array(file['Live']['upd_'+str(upd)])\n\n data_0 = np.array(file['Channel']['lev_0']['upd_'+str(upd)])\n data_1 = (\n np.array(file['Channel']['lev_0']['upd_'+str(upd)])\n if nlev == 1 else\n np.array(file['Channel']['lev_1']['upd_'+str(upd)])\n )\n\n res = defaultdict(dict)\n for idx in range(own.size):\n for dir, drct in dirs.items():\n type = NONE\n if pvch[idx] == chans[-1][drct[idx]]:\n type = P_CHILD\n elif pvch[drct[idx]] == chans[-1][idx]:\n type = P_PARENT\n else:\n # grayscale channel ID\n type = (chans[-1][idx] / 2**64) * 0.8\n\n res[own[idx]][dir] = type\n\n\n own = np.array(file['Index']['own'])\n live = np.array(file['Live']['upd_'+str(upd)])\n\n image = np.flip(np.rot90(np.transpose(np.block([\n [\n np.transpose(RenderTriangles(\n res[val_own]['top'],\n res[val_own]['bottom'],\n res[val_own]['right'],\n res[val_own]['left'],\n val_live\n )) for val_own, val_live in zip(\n row_own,\n row_live\n )\n ]\n for row_own, row_live\n in zip(\n own,\n live\n )\n ])),k=1),axis=0)\n\n plt.figure(figsize=(18,18))\n\n plt.imshow(\n image,\n extent = (0, image.shape[1], image.shape[0], 0)\n )\n\n plt.axis('off')\n plt.grid(b=None)\n\n rescale = lambda coord: [v * 42 for v in coord]\n lines_0 = LineCollection([\n [ rescale(coord) for coord in ((x,y), dest) ]\n for x in range(data_0.shape[0])\n for y in range(data_0.shape[1])\n for dest in ((x+1,y), (x,y+1))\n if data_0[y][x] != data_0[dest[1]-1][dest[0]-1]\n ], linestyle=(0, (1, 3)), colors='0.5')\n plt.gca().add_collection(lines_0)\n\n lines_1 = LineCollection([\n [ rescale(coord) for coord in ((x,y), dest) ]\n for x in range(data_1.shape[0])\n for y in range(data_1.shape[1])\n for dest in ((x+1,y), (x,y+1))\n if data_1[y][x] != data_1[dest[1]-1][dest[0]-1]\n ], linestyle='solid', colors='black')\n plt.gca().add_collection(lines_1)\n\n plt.savefig(\n kn.pack({\n 'title' : 'directional_propagule_viz',\n 'update' : str(upd),\n 'seed' : kn.unpack(filename)['seed'],\n 'treat' : kn.unpack(filename)['treat'],\n '_data_hathash_hash' : fsh.FilesHash().hash_files([filename]),\n '_script_fullcat_hash' : fsh.FilesHash(\n file_parcel=\"full_parcel\",\n files_join=\"cat_join\"\n ).hash_files([sys.argv[0]]),\n '_source_hash' :kn.unpack(filename)['_source_hash'],\n 'ext' : '.png'\n }),\n transparent=True,\n bbox_inches='tight',\n pad_inches=0\n )\n\n plt.clf()\n plt.close(plt.gcf())\n\nParallel(n_jobs=-1)(\n delayed(RenderAndSave)(upd, filename) for upd in tqdm(updates)\n)\n","sub_path":"old/script/AnimateRenderPropaguleDirectional.py","file_name":"AnimateRenderPropaguleDirectional.py","file_ext":"py","file_size_in_byte":5567,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"88310487","text":"from tensorflow.python.tools import freeze_graph\n\nMODEL_DIR = 'graph_data/'\n\n##########################3\nimport tensorflow as tf\nfrom tensorflow.core.framework import graph_pb2 as gpb\nfrom google.protobuf import text_format as pbtf\n\ngdef = gpb.GraphDef()\n\nwith open(MODEL_DIR + 'graph.pbtxt', 'r') as fh:\n graph_str = fh.read()\n\npbtf.Parse(graph_str, gdef)\n\ntf.import_graph_def(gdef)\n\n# print([n.name for n in tf.get_default_graph().as_graph_def().node])\n\n###############################\n\n\n# Freeze the graph\ninput_graph_path = MODEL_DIR + 'graph.pbtxt'\ncheckpoint_path = MODEL_DIR + 'test_graph'\ninput_saver_def_path = \"\"\ninput_binary = False\noutput_node_names = \"final_prediction\"\nrestore_op_name = \"\"\nfilename_tensor_name = \"\"\noutput_frozen_graph_name = MODEL_DIR + 'frozen_graph.pb'\nclear_devices = True\n\nfreeze_graph.freeze_graph(input_graph_path, input_saver_def_path,\n input_binary, checkpoint_path, output_node_names,\n restore_op_name, filename_tensor_name,\n output_frozen_graph_name, clear_devices, \"\")\n","sub_path":"freeze_graph.py","file_name":"freeze_graph.py","file_ext":"py","file_size_in_byte":1090,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"99250241","text":"from sqlalchemy import MetaData, Table, Column, Integer, UnicodeText, Date, NVARCHAR\nfrom sqlalchemy.sql.schema import ForeignKey\nfrom lbrc_flask.security.migrations import get_audit_mixin_columns\n\nmeta = MetaData()\n\ndef upgrade(migrate_engine):\n meta.bind = migrate_engine\n\n p = Table(\"publication\", meta, autoload=True)\n\n t = Table(\n \"author\",\n meta,\n Column(\"id\", Integer, primary_key=True),\n Column(\"publication_id\", Integer, ForeignKey(p.c.id), index=True, nullable=False),\n Column(\"last_name\", NVARCHAR(100)),\n Column(\"fore_name\", NVARCHAR(100)),\n Column(\"initials\", NVARCHAR(100)),\n Column(\"affiliation\", UnicodeText),\n *get_audit_mixin_columns(),\n )\n\n t.create()\n\n\ndef downgrade(migrate_engine):\n meta.bind = migrate_engine\n t = Table(\"author\", meta, autoload=True)\n t.drop()","sub_path":"migrations/versions/006__Create__Author.py","file_name":"006__Create__Author.py","file_ext":"py","file_size_in_byte":870,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"647190782","text":"\n# 'LocateNTU' Bot\n# Designed to get you around NTU\n\n\n# Import Modules\n\nimport math\n\nimport telepot\nimport telepot.aio\nimport asyncio\nfrom telepot.namedtuple import ReplyKeyboardMarkup, KeyboardButton, ReplyKeyboardHide\n\nimport pymysql # To connect with MySQL database\n\n# Global Constants\n\nGPS_multiplier = 10**7 # MySQL is unable to store floats\n # Values from the database will be divided by value: 10^7 to obtain coordinates\n\nR = 6378137 # Earth’s radius\n # Used in coordinates calculation\n\nhelpMessage = \"\"\" Welcome! I am 'LocateNTU'. I have access to vast amounts of crowd-sourced geo-data at my fingertips!\n\nI can do the the following:\n1. To help you find Points Of Interest (POI) such as ATMs and Food Areas.\n2. I can also search for specific locations based on name\n3. If you wish to contribute to our ever-growing geo-data database to help others in need, I can do that too!\n\nType /'exit' anytime to restart the conversation!\n\nHope you enjoy using my services! Glad to be of help! \"\"\"\n\n\n# Global Variables\ncontent_type = \"\" # Message Type, Used for error checking\nchat_type = \"\"\nchat_id = \"\" # User ID\nfinite_state = \"\" # Conversation Branch Variable\n # Enable bot to determine which conversation branch to follow up with\nhandle_counter = 0 # Conversation Counter\ninput_list = [] # Stores user inputs as bot is supplied with more information\n\n\n# Telegram Bot\ntoken = '282030698:AAGww5XJItONcd7EYphZijAUxT1cQoeDgiw'\nbot = telepot.aio.Bot(token)\n\n\n# Information to connect to MySQL\n\n### IMPT: Please input to own host, port, username and password\nhost = \"(host)\"\nport = \"(port)\" # An integer here\nuser = \"(user)\"\npasswd = \"(passwd)\"\ndb = \"api07_test\" # Name of table\n\n# queryMYSQL\n# Handles connection to and querying of MySQL database\n# Takes in 4 strings -\n# 1) Type of result to obtain (column name(s) or * (all))\n# 2) Value to search (i.e. name of location)\n# 3) Name of table to look in\n# 4) Name of column to search (hardcoded)\n\n# Returns list of values that match the search query\ndef queryMYSQL(elementToLocate,tableToSearch, columnToSearch, nameToFind,):\n conn = pymysql.connect(host=host, port=port, user=user, passwd=passwd, db=db) # Connect to MySQL\n\n #creates cursor object that is required for mysql query\n cur = conn.cursor()\n\n # works like 'exec' function in python i.e. runs the string as a MYSQL command\n stringToExecuteInMYSQL = \"SELECT {} FROM api07_test.{} WHERE {} LIKE %s\".format(elementToLocate,tableToSearch,columnToSearch)\n cur.execute(stringToExecuteInMYSQL,nameToFind)\n\n #declare a list to store the results from the MYSQL query\n local_list = []\n\n #cursor objects have a data type of 'tuple', basically they are immutable lists (cannot be modified)\n #each row in 'cur' is a list containing the MYSQL database row that has been retrieved (e.g. ['id','value','latitude','longitude'])\n if(cur.rowcount > 0): # check if results is not zero\n for row in cur:\n #If we were looking for multiple elements per row, append them all to the list\n if(elementToLocate == \"*\" or elementToLocate.__contains__(\",\")):\n local_list.append(row)\n else:\n #If we were only looking for 1 element, just append that value instead of a list containing te value\n local_list.append(row[0])\n\n #close connections and return the list containing the results\n cur.close()\n conn.close()\n return removeDuplicatesInList(local_list);\n\n\n#Extension of previous function, this limits the search with the following condition: column `k` must have the value 'name'\n#this prevents us from picking up any matches in the additional information, which creates list index errors\n#tl;dr: fixes the list index out of range error\ndef queryMYSQL_name(elementToLocate,tableToSearch, columnToSearch, nameToFind,):\n conn = pymysql.connect(host=host, port=port, user=user, passwd=passwd, db=db) # Connect to MySQL\n\n cur = conn.cursor()\n\n stringToExecuteInMYSQL = \"SELECT {} FROM api07_test.{} WHERE {} LIKE %s AND `k` = 'name'\".format(elementToLocate,tableToSearch,columnToSearch)\n cur.execute(stringToExecuteInMYSQL,nameToFind)\n\n local_list = []\n\n if(cur.rowcount > 0): # check if results is not zero\n for row in cur:\n if(elementToLocate == \"*\" or elementToLocate.__contains__(\",\")):\n local_list.append(row)\n else:\n local_list.append(row[0])\n\n cur.close()\n conn.close()\n return local_list;\n\n#given a list of node_ids (int), grabs their [node_id, latatiude, longitude ] as a row\n#then adds all rows into a list and returns that list\n\n#mainly used to grab the latitude and longitude data for GPS calculations\ndef searchNodeInfo(node_id_list):\n\n #declare output list\n #that wil contain each node as a row of format:[node_id, lat, long]\n node_info_list = []\n\n if(len(node_id_list) > 0):\n for i in node_id_list: #for every ID in the given list\n #uses the queryMYSQL function described above\n node_info_list += queryMYSQL(\"node_id,latitude,longitude\",\"nodes\",\"node_id\",i)\n #and adds them to the output list\n #note: to add a list to another list in python, use +=! not append()!\n else: #error: input list is empty\n global chat_id\n bot.sendMessage(chat_id, \"An error has occured\")\n\n #return results\n return node_info_list #where each row has the format: [node_id, lat, long]\n\n\n\n#BOt Function 1:\n# to find the nearest Point of Interest (POI)\n# outputs a bot message to the user containing the search results\n# =========1. Find Nearest Function =======================================\nasync def findNearest(inputListFromUser):\n\n\n #inputListFromUser is the global 'input_list' variable that contains all the input the user has keyed in\n data_list = inputListFromUser\n\n # split up the input data\n user_lat = float(data_list[1]) #user's latitude\n user_long = float(data_list[2]) #user's longitude\n facility_name = data_list[0] #type of POI user is searching for e.g. ATM\n\n # using 'facility_name' from user input, run a MYSQL query\n # get all 'node_ids' that match the search query from database from 'node_tags' table using 'queryMYSQL'\n #('node_tags' MYSQL table contains the node_id and name of POI (under column 'v'))\n node_id_list = queryMYSQL(\"node_id\",\"node_tags\", \"v\", facility_name)\n # (does not contain lat long info! that comes in the next search!)\n #outputs a list of relevant node IDs\n\n length_of_node_id_list = len(node_id_list) #finding length of results list\n dist_list = []; #declare a distance list to compare distances later\n\n if(length_of_node_id_list > 0):\n\n # given the list of node ids from the previous seach (node_id_list)\n # use 'searchNodeInfo' function to gather latitude, longitude data\n node_data_list = searchNodeInfo(node_id_list)\n\n #now we have a node_data_list of relevant results, where each row is in the format: [node_id, latitude, longitude]\n #(refer to 'searchNodeInfo' function above)\n\n length_of_node_data_list = len(node_data_list) # finding length\n\n\n #iterate through the list of node data\n if(length_of_node_data_list > 0):\n for i in range(length_of_node_data_list):\n\n # when geo data was added to MYSQL databases,\n # decimal places were removed, hence need to place them back maually in Python\n target_lat = node_data_list[i][1] / GPS_multiplier #index 1 would be latitude of POI\n target_long = node_data_list[i][2] / GPS_multiplier #index 2 would be laongitude of POI\n\n #function for calculating distance between 2 GPS points, takes in 2 objects of (lat,long)\n #user_lat and user_long is from the start of the function\n distance = calculate_dist((user_lat, user_long),(target_lat, target_long))\n\n dist_list.append(distance)\n # for retrieval later when comparing shortest distance\n\n #Function for finding shortest distance out of all and returning its index in 'dist_list'\n a = findSmallestFloatInListandReturnItsIndex(dist_list)\n\n #b and c are the lat and long of the nearest POI (multiple assignment)\n b, c = node_data_list[a][1]/ GPS_multiplier, node_data_list[a][2] / GPS_multiplier\n\n #prepare a string to be sent in the bot message\n d = \"Nearest {} is as above, at a distance of {} metres from you\".format(facility_name,math.floor(dist_list[a] * 1000))\n\n #sendLocation is a Telepot function that sends a map containing the POI\n await bot.sendLocation(chat_id, b, c)\n\n #bot sends the message stating where the nearest POI is\n await bot.sendMessage(chat_id, d)\n\n #function that displays any additional info (e.g. descriptions on its location)\n #in the MYSQL database about the POI\n await retrieveAdditionalInfo(node_data_list[a][0]) #node_id for retrieiving additional info\n else:\n # node_data_list has length = 0 i.e. no results found\n await bot.sendMessage(chat_id, \"No results found! Sorry!\")\n else:\n #node_id_list has length = 0 i.e. no results found\n await bot.sendMessage(chat_id, \"No results found! Sorry!\")\n\n#end of findNearest function\n\n\n\n\n\n\n\n#Bot Function 2:\n# Given a name of a specific location, finds that location in the MYSQL database (any results containing the input pattern)\n# outputs a map of the location\n# Used for finding specific locations e.g. classroom, research lab etc\n# which may not be shown in conventional online maps e.g. Google Maps\n# ====================2.FIND LOCATION FUNCTION ==============================\nasync def findLocation(input):\n # name of location can contain spaces, alphanumerical or even punctuations, so we accept any string\n\n\n # OpenStreetMap Data contains geodata in 3 object models:\n # Relations, Ways and Nodes\n # Relations contain ways and nodes, ways contain nodes, and nodes contain latitudes and longitudes\n # So we need to search them all\n\n\n #declare local variables to be used\n inputStrFromUser = '%{}%'.format(input) #MYSQL syntax thing\n way_tag_list = []; #list of ways\n relation_tag_list = []; #list of relations\n node_id_list = []; #list of relevant node ids\n name_list = []; #list of names of relevant results, to be displayed at the end\n relation_member_list = [];\n\n\n\n #Previously, POIs are usually small so only searching nodes is sufficient\n #However, this function can search for larger structures e.g. buildings\n\n #############SEARCH RELATION DATABASE\n\n #Search for all 'relation' for names matching the input\n #column 'v' contains all the names\n #retrieve every row in the database where the name matches\n relation_tag_list = queryMYSQL_name(\"*\",\"relation_tags\", \"v\", inputStrFromUser)\n\n tempBool = False #bool var: if we already obtained a node, theres no need to check ways to obtain nodes\n\n # for every relevant result in 'relations'\n if (len(relation_tag_list) > 0): # error catching, if there is at least 1 result\n for i in relation_tag_list:\n\n\n # getting the name to display later\n name_list.append(i[2])\n\n #retrieve all ways and nodes from the \" relation_members\" table\n a = queryMYSQL(\"*\",\"relation_members\", \"relation_id\", i[0]) # get the respective members by matching the relation_id\n relation_member_list.append(a[0])\n\n #for every relevant results in relation_members_list(i.e. ways and ndoes)\n if (len(relation_member_list) > 0): # error checking\n for row in relation_member_list:\n # if member is a node, append the node to the node_id_list for accessing later\n tempBool = False\n if (row[1] == \"Node\"): #index 1 contains the type of member (i.e. 'node' or 'way')\n node_id_list.append(row[2]) #index 2 contains the node_id / way_id\n\n tempBool = True\n continue; # we only need one node per relation\n\n elif(tempBool != True and row[1] == \"Way\"): # if we already have a node, theres no need to check the ways #short circuiting here\n #Search the 'way' member for a node\n a = queryMYSQL(\"node_id\",\"way_nodes\",\"way_id\",row[2]) #index 2 contains the node_id / way_id\n if(len(a) > 0):\n node_id_list.append(a[0])\n continue; # we only need one node per relation\n continue; # we only need one node per relation\n\n\n ##############CHECK WAYS DATABASE\n #search \"way_tags\" table for relevant results\n way_tag_list = queryMYSQL_name(\"*\",\"way_tags\",\"v\",inputStrFromUser)\n\n #for every relevant 'way', get 1 node and append it to the node_id_list\n if (len(way_tag_list) > 0): # error catching, if there is at least 1 result\n for i in way_tag_list:\n name_list.append(i[2]) # getting the name to display later\n a = queryMYSQL(\"node_id\",\"way_nodes\",\"way_id\", i[0])\n node_id_list.append(a[0])\n\n #SEARCH NODES DATABASE\n # Search \"node_tags\" table for relevant results\n # and append them to node_id_list, and append their names too\n node_id_list += queryMYSQL_name(\"node_id\",\"node_tags\",\"v\", inputStrFromUser)\n name_list += queryMYSQL_name(\"v\", \"node_tags\", \"v\", inputStrFromUser)\n\n\n await bot.sendMessage(chat_id,\"Sorry for the wait, here are the results from our database!\")\n\n ###### Using all the nodes found, retrieve their coordinates from nodes database\n #using 'searchNodeInfo' function\n\n if (len(node_id_list) > 0):\n node_info_list = searchNodeInfo(node_id_list)\n\n for i in range(len(node_info_list)):\n lat = node_info_list[i][1] / (10 ** 7) #plcing back the decimal place due to MYSQL somehow removing the decimal place\n long = node_info_list[i][2] / (10 ** 7)\n msg = \"Name of Location: {}\".format(name_list[i]) #prepare an output string\n await bot.sendLocation(chat_id, lat, long) #send a map of the location\n await bot.sendMessage(chat_id,msg) #send a bot message containing the results\n await retrieveAdditionalInfo(node_info_list[i][0]) #display any additional descriptors of the location in our database\n\n\n #Sends 'End of results!' to signify end of search\n await bot.sendMessage(chat_id,\"End of results!\")\n else:\n # Standard Error Message\n await bot.sendMessage(chat_id,\"Sorry, we found no matching results, please try again!\")\n\n\n\n#Bot function 3:\n#Allows users to register their current location, and name of location, to the database\n#allowing the community to contribute to our database of NTU geodata (crowd-sourcing)\n# Hence, we could potentially build a detailed database of NTU POIs\n# and users can search for these locations using this bot\n\n#this function takes in a list (the global 'input_list',\n# which contains [user's latitude, user's longitude', name of POI]\n# ====================3.CHECK IN FUNCTIONS =====================================\nasync def checkIn(inputListFromUser):\n # \n data_list = inputListFromUser\n\n #opens a connection to our MYSQL database\n conn = pymysql.connect(host=host, port=port, user=user, passwd=passwd, db=db) # Connect to MySQL\n # create a 'cursor' object necessary for MYSQL query\n cur = conn.cursor()\n\n\n # our MYSQL database does not support decimal place, so we remove them by multiplying\n a = float(float(data_list[0]) * (10 ** 7))\n b = float(float(data_list[1]) * (10 ** 7))\n\n\n # find the last id of the 'nodes' table\n cur.execute(\"SELECT @last_id := MAX(node_id) FROM api07_test.nodes\")\n\n #cur should only contain a list containing the value of last id\n for row in cur:\n last_id = int(row[0]) + 1 #increment to get the next id (to write to)\n\n #MYSQL command to insert in the data, into 'nodes' table.\n #Only the node-id, latitude, longitude is relevant for this project, the remaining is dummy data\n cur.execute(\"INSERT INTO api07_test.nodes (node_id,latitude,longitude, changeset_id, visible, timestamp,tile,version) VALUES(%s,%s,%s,%s,%s,%s,%s, %s)\",(last_id, a, b,9876410,1,\"2011-11-20 03:30:51\",3766913681,15))\n conn.commit() #command to commit the change. Required for 'insert' MYSQL commands\n\n #insert the name in and node_id into 'node_tags' table\n cur.execute(\"INSERT INTO api07_test.node_tags (node_id,version,k,v) VALUES(%s,%s,%s,%s)\",(last_id,1,\"name\",str(data_list[2])))\n conn.commit() #command to commit the change. Required for 'insert' MYSQL commands\n\n #Mysql recognises typical inputs as strings, use '%s' for parameters, not %d %f even if its a number\n #math values may be rounded off, so we use all strings for simplicity\n\n #successful message\n await bot.sendMessage(chat_id, \"Alright! Location {} has been registered in the database! Thanks for contributing to NTU Finder bot!\".format(data_list[2]))\n\n cur.close()\n conn.close()\n return last_id;\n#END OF CHECK IN FUNCTION\n\n\n#=Function for registeromg additional descriptors of the POI location\n#=============================== 4. Key in additional info =========================================\nasync def RegisterAdditionalInfo(node_id, info):\n #open mysql connection\n conn = pymysql.connect(host=host, port=port, user=user, passwd=passwd, db=db) # Connect to MySQL\n cur = conn.cursor()\n\n #MYSQL command to insert into 'node_tags' database\n #2 columns, column 'k': 'info', column 'v': descriptor string\n cur.execute(\"INSERT IGNORE INTO api07_test.node_tags (node_id,version,k,v) VALUES(%s,%s,%s,%s)\",\n (node_id, 1, \"info\", info))\n conn.commit() #command to commit the change. Required for 'insert' MYSQL commands\n\n #bot sends successful message\n await bot.sendMessage(chat_id,\"Alright! {} has been registered in the database! Thanks for contributing to NTU Finder bot!\".format(info))\n cur.close()\n conn.close()\n #close connection\n\n#retrieves any additional descriptor we have in our database\n#bot displays such descriptors\n#=====================5. Get additional info =========================================\nasync def retrieveAdditionalInfo(node_id):\n global chat_id\n\n #query for column 'v' in 'node_tags' database, using 'node_id' as the query\n #retrieve the string value of any relevant matches\n #appends them to a list\n info_list = queryMYSQL(\"v\", \"node_tags\",\"node_id\",node_id)\n for i in info_list:\n s = \"Description: {}\".format(i)\n #for every match, bot displays the string value of the descriptor\n await bot.sendMessage(chat_id,s)\n\n\n\n\n############## Helper functions ################################################\n\n# calculate gps distance function, returns shortest distance between 2 GPS points in km\ndef calculate_dist(origin, destination):\n lat1, lon1 = origin\n lat2, lon2 = destination\n radius = 6371 # km. This is a constant\n\n dlat = math.radians(lat2 - lat1)\n dlon = math.radians(lon2 - lon1)\n a = math.sin(dlat / 2) * math.sin(dlat / 2) + math.cos(math.radians(lat1)) \\\n * math.cos(math.radians(lat2)) * math.sin(\n dlon / 2) * math.sin(dlon / 2)\n c = 2 * math.atan2(math.sqrt(a), math.sqrt(1 - a))\n d = radius * c\n return (d) #distance is returned\n\n\n\n# find the smallest float value in input list and\n# returns its index\n# used for finding the NEAREST POI\n# we need the index as part of the 'findNearest' function, to access the lat, long values\n# to create a map showing the POI\ndef findSmallestFloatInListandReturnItsIndex(arg_list):\n index_of_smallest_value = 0\n # compares value at index 1 with value of index 0 for the smallest, and increments\n for i in range(1, len(arg_list)):\n if (arg_list[i] < arg_list[i - 1]):\n index_of_smallest_value = i\n return index_of_smallest_value\n\n\n# Removes duplicate values in a list, used to correct any duplicity\ndef removeDuplicatesInList(arg_list):\n # method is similar to a bubble sort, using a nested loop\n # we are iterating from the end of the list to the front\n i = len(arg_list) -1\n while i > 0:\n for j in range(i-1, -1, -1):\n if(arg_list[i] == arg_list[j]):\n del arg_list[j]\n i -= 1 #as values are being deleted from the list, update 'i'\n i -= 1\n return(arg_list)\n# end of helper functions\n\n\n# Bot Functions\n\n# Reset\n# Used to reset handle_counter and, clear finite_state and input_list\nasync def reset():\n global handle_counter, finite_state, input_list\n\n handle_counter = 0\n finite_state = \"\"\n input_list = []\n\n escape_keyboard = ReplyKeyboardMarkup(keyboard=[ # End conversation keyboard\n [\"I wanna start over again!\"],\n ])\n await bot.sendMessage(chat_id, \"Conversation ended!\", reply_markup=escape_keyboard)\n\n\n# Handle\n# Conversation handler, Decides its function with user inputs as parameters\n# Able to call the main functions of the bot:\n# Option 1: Locate nearest facility - Locate nearest desired facility\n# Option 2: Find location - List all possible entries\n# Option 3: Add your current location - Add user-defined locations to the database\n# Option 4: Help - Help Menu\nasync def handle(msg):\n global content_type, chat_type, chat_id, handle_counter, finite_state, input_list # Use Global Variables within 'define'\n\n content_type, chat_type, chat_id = telepot.glance(msg) # Retrieve information on user input\n\n if content_type == \"text\" and msg['text'] == \"/exit\": # Reset bot with input: '/exit'\n await reset()\n\n # Stage 1\n # Bot introduces itself and await user input from the provided keyboard\n elif content_type == \"text\" and finite_state == \"\" and handle_counter == 0:\n main_menu = ReplyKeyboardMarkup(keyboard=[ # 'Main menu' of 4 functions for user to choose\n [\"Locate nearest facility\", \"Location Listing\"],\n [\"Add your current location\", \"Help\"],\n ])\n await bot.sendMessage(chat_id, \"Howdy! I'm 'LocateNTU' and ready to serve you. What would you like to know?\", reply_markup=main_menu)\n handle_counter += 1\n\n # Stage 2\n elif content_type == \"text\" and handle_counter == 1:\n user_reply = msg['text']\n\n if user_reply == \"Locate nearest facility\" or user_reply == \"Location Listing\" or \\\n user_reply == \"Add your current location\" or user_reply == \"Help\":\n handle_counter += 1\n # Option 1\n if user_reply == \"Locate nearest facility\":\n select_facility = ReplyKeyboardMarkup(keyboard=[ # 'Menu' of 4 different types of facilities\n [\"ATM\", \"Food Areas\"],\n [\"Bus Stop\", \"Library\"],\n ])\n await bot.sendMessage(chat_id, \"Alright, where do you want to go?\", reply_markup=select_facility)\n finite_state = \"/nearest\"\n\n # Option 2\n elif user_reply == \"Location Listing\":\n clear = ReplyKeyboardHide() # Hide 'main_menu'\n await bot.sendMessage(chat_id, \"Whither does thee wish to go?\", reply_markup=clear)\n finite_state = \"/location\"\n\n\n # Option 3\n elif user_reply == \"Add your current location\":\n add_me = ReplyKeyboardMarkup(keyboard=[ # 'Button' to request user's coordinates\n [KeyboardButton(text=\"Tap to send me your location\", request_location=True)],\n ])\n await bot.sendMessage(chat_id, \"Wow! Much kind! Your contribution is\"\n \" highly appreciated!\", reply_markup=add_me)\n finite_state = \"/checkin\"\n\n # Option 4\n else:\n handle_counter = 1\n main_menu = ReplyKeyboardMarkup(keyboard=[ # 'Main menu' of 4 functions for user to choose\n [\"Locate nearest facility\", \"Location Listing\"],\n [\"Add your current location\", \"Help\"],\n ])\n await bot.sendMessage(chat_id, helpMessage, reply_markup=main_menu)\n\n else: # Input Exception handler\n select_facility = ReplyKeyboardMarkup(keyboard=[\n [\"Locate nearest facility\", \"Location Listing\"],\n [\"Add your current location\", \"Help\"],\n ])\n await bot.sendMessage(chat_id, \"I don't get you. Retry, please?\", reply_markup=select_facility)\n\n # Stage 3\n elif handle_counter == 2 :\n\n # Option 1 Conversation\n if finite_state == \"/nearest\" and content_type == \"text\": # Ensure user sent a text\n user_reply = msg['text'] # User input stored to be compared with database\n\n if user_reply == \"ATM\" or user_reply == \"Food Areas\" or user_reply == \"Bus Stop\" or user_reply == \"Library\":\n if user_reply == \"Food Areas\":\n user_reply = \"restaurant\" # Convert user input to equivalent in database\n\n elif user_reply == \"Bus Stop\":\n user_reply = \"bus_stop\" # Convert user input to equivalent in database\n\n\n input_list.append(user_reply)\n handle_counter += 1\n locate_me = ReplyKeyboardMarkup(keyboard=[[\n KeyboardButton(text=\"Tap to send me your location\", request_location=True)\n ]])\n await bot.sendMessage(chat_id, \"Roger, now I need your location.\", reply_markup=locate_me)\n\n else: # Input Exception handler\n select_facility = ReplyKeyboardMarkup(keyboard=[ # 'Menu' of 4 different types of facilities\n [\"ATM\", \"Food Areas\"],\n [\"Bus Stop\", \"Library\"],\n ])\n await bot.sendMessage(chat_id, \"Hmm, I did not understand what you keyed in. Retry?\", reply_markup=select_facility)\n\n\n # Option 2\n elif finite_state == \"/location\":\n if(content_type == \"text\"):\n #send a 'wait' message\n await bot.sendMessage(chat_id, \"I will now find all relevant places, please be patient.\")\n #run the findLocation function, using user input as the argument (location name)\n #to display any locations matching what the user had input\n\n # this function will have the bot either display the results or give an error message if no results were found\n\n await findLocation(msg['text'])\n\n await reset() #clear all variables, restart the chat\n\n ######## AT THIS POINT, FIND LOCATION WOULD HAVE BEEN COMPLETED, CHAT IS RESET\n\n # Option 3\n elif finite_state == \"/checkin\":\n if content_type == \"location\":\n # Stores coordinates of user's current location\n input_list.append(msg['location'][\"latitude\"])\n input_list.append(msg['location'][\"longitude\"])\n clear = ReplyKeyboardHide()\n await bot.sendMessage(chat_id, \"What is the location called?\",reply_markup=clear)\n # Prompts user to provide location name and hide previous keyboard\n handle_counter += 1\n\n else: # Input Exception handler\n locate_me = ReplyKeyboardMarkup(keyboard=[[ # 'Button' to request user's coordinates\n KeyboardButton(text=\"Tap to send me your location\", request_location=True),\n ]])\n await bot.sendMessage(chat_id, \"Hey! I need a location, not that! Retry?\", reply_markup=locate_me)\n\n # Stage 3\n elif handle_counter == 3:\n\n # Option 1\n if finite_state == '/nearest':\n if content_type == \"location\":\n # Stores coordinates of user's current location\n input_list.append(msg['location'][\"latitude\"])\n input_list.append(msg['location'][\"longitude\"])\n\n await findNearest(input_list)\n await reset() # Reset conversation\n\n else: # Input Exception handler\n locate_me = ReplyKeyboardMarkup(keyboard=[[ # 'Button' to request user's coordinates\n KeyboardButton(text=\"Tap to send me your location\", request_location=True),\n ]])\n await bot.sendMessage(chat_id, \"I need to know where you are. Otherwise, I can't\"\n \" help you. Retry?.\", reply_markup=locate_me)\n\n # Option 3\n elif finite_state == '/checkin':\n input_list.append(msg['text']) # Store name of location from user input\n try:\n node_id = await checkIn(input_list)\n\n # Function registers location name in MySQL database\n # Displays 'success' or 'error'\n\n\n # Find node_id to allow user to add additional descriptions about the user's current location\n # register node ID to input_list for retrieval later\n input_list.append(node_id)\n\n done_keyboard = ReplyKeyboardMarkup(keyboard=[\n [\"Done\"],\n ])\n await bot.sendMessage(chat_id, \"Noted! If you'd like, type additional details in the message bar. Click\"\n \" 'Done' if you're finished!\", reply_markup=done_keyboard)\n handle_counter += 1\n except:\n await bot.sendMessage(chat_id, \"Something about your reply seems inappropriate! Please try again!\")\n await reset()\n\n # Stage 4 (Only for Option 3)\n elif content_type == \"text\" and handle_counter == 4 and finite_state ==\"/checkin\":\n if msg['text'].lower() == \"done\":\n await bot.sendMessage(chat_id,\"Thank you for contributing to my database! Here's a cookie for you. Hope to serve you again!\")\n await reset() # Reset conversation\n else:\n try:\n await RegisterAdditionalInfo(input_list[3],msg['text']) # Continually adds in details until 'Done' is received by the bot\n except:\n await bot.sendMessage(chat_id, \"Something about your reply seems inappropriate! Please try again!\")\n else: # Input Exception Error\n await bot.sendMessage(chat_id,\"It's me, not you!\")\n await reset() # Reset conversation\n\nloop = asyncio.get_event_loop()\nloop.create_task(bot.message_loop({'chat': handle})) # Create async loop for 'handle' function\nloop.run_forever()","sub_path":"LocateNTU.py","file_name":"LocateNTU.py","file_ext":"py","file_size_in_byte":31160,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"573577526","text":"import math\n\ncount = 0\nx = 0\n\nfor x in range(10**20, 10*400):\n y = sum(map(int, str(x)))\n for i in range(2, 10^100000):\n if (y % i) == 0:\n count += 1\n x += 1\n else:\n x += 1\nanswer = count % (10**9 + 9)\n\na=2\nnum=13\nwhile num > a :\n if num%a==0 & a!=num:\n print('not prime')\n break\n i += 1\nelse: # loop not exited via break\n print('prime')\n","sub_path":"HSCTF 2017/LargePrimes.py","file_name":"LargePrimes.py","file_ext":"py","file_size_in_byte":400,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"373830523","text":"import datetime\nimport os\nimport traceback\nfrom pathlib import Path\n\nfrom pyexcelerate import Workbook\n\nfrom goldfnd.lib.aws import get_boto3_client\nfrom goldfnd.lib.database import Database\nfrom goldfnd.lib.date import datetime_to_string\nfrom goldfnd.lib.serverless import with_lambda_response\nfrom goldfnd.models.Customer import Customer\nfrom goldfnd.models.SendingSchedule import SendingSchedule\nfrom goldfnd.models.SurveyHistory import SurveyHistory\nfrom goldfnd.models.User import User\n\n\ndef create_s3_url(filename, bucket_name):\n return f'https://s3.{os.environ.get(\"AWS_REGION\")}.amazonaws.com/{bucket_name}/{filename}'\n\n\ndef array_or_none(cols, index, query_result):\n if query_result:\n return query_result.to_array()\n return [None] * cols[index].__len__()\n\n\n@with_lambda_response\ndef main(event, context):\n print(event)\n try:\n database = Database()\n histories = SurveyHistory.get_all_histories(database.session)\n s3_client = get_boto3_client('s3')\n bucket_name = os.environ.get('EXCEL_BUCKET')\n rows = []\n columns = [\n [*SurveyHistory.__table__.columns],\n [*User.__table__.columns],\n [*Customer.__table__.columns],\n [*SendingSchedule.__table__.columns],\n ]\n for history in histories:\n acc = []\n for index, data in enumerate(history):\n acc.append(array_or_none(columns, index, data))\n rows.append(acc)\n\n flatten_rows = []\n flatten_columns = sum(columns, [])\n flatten_columns = [col.name for col in flatten_columns]\n rows.reverse()\n\n while rows:\n row = rows.pop()\n flatten_row = sum(row, [])\n flatten_row.reverse()\n new_row = []\n while flatten_row:\n fmt = \"%Y-%m-%d %H:%M:%S\"\n cell = flatten_row.pop()\n if isinstance(cell, datetime.datetime):\n cell = cell.strftime(fmt)\n new_row.append(cell)\n\n flatten_rows.append(new_row)\n\n wb = Workbook()\n wb.new_sheet('user_data', data=[flatten_columns, *flatten_rows])\n fmt = '%Y-%m-%d_%H-%M'\n filename = f'user_data_{datetime_to_string(datetime.datetime.now(), fmt=fmt)}.xlsx'\n path = Path('/tmp').joinpath(filename)\n # [_, tmp] = tempfile.mkstemp(dir='/tmp', suffix='.xlsx')\n wb.save(path)\n\n with open(path, 'rb') as stream:\n response = s3_client.put_object(\n ACL='public-read',\n Body=stream,\n Bucket=bucket_name,\n Key=filename\n )\n print(response)\n url = create_s3_url(filename, bucket_name)\n return url\n\n except Exception:\n error_message = traceback.format_exc()\n print(error_message)\n return error_message, 500\n","sub_path":"goldfnd/functions/user/excel.py","file_name":"excel.py","file_ext":"py","file_size_in_byte":2904,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"647762666","text":"######### importing numpy\r\nimport numpy as np\r\n\r\n#######basic array characteristics\r\narray = np.array([[1,2,0], # Creating array object\r\n [3,4,1],\r\n (1,2,3)])\r\n\r\n#print(array)\r\n'''\r\n# Printing type of arr object \r\nprint(type(array))\r\n# Printing array dimensions (axes) \r\nprint(array.ndim)\r\n# Printing shape of array \r\nprint(array.shape)\r\n# Printing size (total number of elements) of array \r\nprint(array.size)\r\n# Printing type of elements in array \r\nprint(array.dtype)\r\n'''\r\n\r\n###lests form a dict to specify characteristics of array\r\nmy_dict = dict(array_type=type(array),\r\n dimension = array.ndim,\r\n shape_of_array= array.shape,\r\n size = array.size,\r\n type_of_elements=array.dtype)\r\n#print(my_dict)\r\n'''\r\nlet's plot some values'''\r\nimport matplotlib.pyplot as plt\r\nplt.plot(array)\r\nplt.show()\r\n###############################\r\n'''\r\nindices are taken as x coordinates &\r\nvalues are taken as y coordinates\r\n'''","sub_path":"Numpy_/intro.py","file_name":"intro.py","file_ext":"py","file_size_in_byte":985,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"221513202","text":"from keras.preprocessing.text import Tokenizer\nimport numpy as np\n\ndocs = ['너무 재밋어요', '참 최고에요', '참 잘 만든 영화에요',\n '추천하고 싶은 영화입니다', '한 번 더 보고 싶네요', # '최고에요' -> '참 최고에요'\n '글쎄요', '별로에요', '생각보다 지루해요', '연기가 어색해요',\n '재미없어요', '너무 재미없다', '참 재밋네요']\n\n# 긍정 1, 부정 0\nlabels = np.array([1,1,1,1,1,0,0,0,0,0,0,1])\n\n# 토큰화\ntoken = Tokenizer()\n# token.fit_on_texts([docs]) 리스트 자체에서 토큰화\ntoken.fit_on_texts(docs) # 리스트네의 요소들에 대해 토큰화\nprint(token.word_index)\n\n# 많이 나온 순서 '참'을 리스트의 요소에 추가하면 '너무' 와 '참'의 순서가 바뀐다\n\nx = token.texts_to_sequences(docs)\nprint(x)\n\n'''{'참': 1, '너무': 2, '재밋어요': 3, '최고에요': 4, '잘': 5, '만든': 6, '영화에요': 7, '추천하고': 8, '싶은': 9, '영화입니다': 10,\n '한번': 11, '더': 12, '보고': 13, '싶네요': 14, '글쎄요': 15, '별로에요': 16, '생각보다': 17, '지루해요': 18, '연기가': 19,\n '어색해요': 20, '재미없어요': 21, '재미없다': 22, '재밋네요': 23}\n \n[[2, 3], [1, 4], [1, 5, 6, 7], [8, 9, 10], [11, 12, 13, 14], [15], [16], [17, 18], [19, 20], [21], [2, 22], [1, 23]]'''\n\n# 원핫 인코딩?\nfrom keras.preprocessing.sequence import pad_sequences \n# default 0과 pre // float은 안되는데.. \n# pad_x = pad_sequences(x, padding='pre', value=np.nan) # 0이 앞으로\npad_x = pad_sequences(x, padding='pre') # 0이 앞으로\n# pad_x = pad_sequences(x, padding='post') # 0이 뒤로\n# pad_x = pad_sequences(x, padding='post', value=0) # 0이 뒤로\n\nprint(pad_x)\n\nword_size = len(token.word_index) +1\nprint(f'전체 토큰 사이즈 : {word_size}') # 24 공백이 있어야 하는데 난 없어..... -> 25가 되야함 \n\nfrom keras.models import Sequential\nfrom keras.layers import Dense, Flatten, Embedding, LSTM\n\nmodel = Sequential()\n #전체 단어의 수(벡터화의 개수?), 노드 수(레이어의 아웃풋), 인풋 shape (12,5)의 input_length\n# model.add(Embedding(word_size, 10, input_length=5)) # 레이어의 간격 벡터화 하는 연산을 수행한다 shape 맞춰주는 과정? 이미 pad_sequences이거를 했는데?\n# model.add(Embedding(250, 10, input_length=5)) # (None, 5, 10) // 레이어의 간격 벡터화 하는 연산을 수행한다 shape 맞춰주는 과정? 이미 pad_sequences이거를 했는데?\nmodel.add(Embedding(25, 10)) # 레이어의 간격 벡터화 하는 연산을 수행한다 shape 맞춰주는 과정? 이미 pad_sequences이거를 했는데?\n# model.add(Embedding(25, 10, input_length=5)) # (None, 5, 10) // 레이어의 간격 벡터화 하는 연산을 수행한다 shape 맞춰주는 과정? 이미 pad_sequences이거를 했는데?\n# Embedding에 parameter calc -> 사이즈(토큰의 개수) * 아웃풋 노드\n# input_length를 지정해줘도 파라미터의 ※연산※에는 반영되지 않는다. \n\n# model.add(Flatten())\nmodel.add(LSTM(3))\n# 4* (3 + 5 + 1)\nmodel.add(Dense(1,activation='sigmoid'))\n\nmodel.summary()\n\n# 파라미터 계산 -> 서머리 이기떄문에?\n\n# compile, fit\nmodel.compile(optimizer='adam', loss='binary_crossentropy', metrics=['acc'])\nmodel.fit(pad_x, labels, epochs=30)\n\nacc = model.evaluate(pad_x, labels)[1]\nprint(f'acc : {acc}')\n\nmodel.summary()","sub_path":"keras/keras122_embedding3.py","file_name":"keras122_embedding3.py","file_ext":"py","file_size_in_byte":3453,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"30661735","text":"from data import *\nfrom functions import *\nimport unittest\n\nclass TestCast(unittest.TestCase):\n\tdef test_circles_overlap_0(self):\n\t\tcircle1 = Circle(Point(0,0),4)\n\t\tcircle2 = Circle(Point(0,0),2)\n\t\tb = circles_overlap(circle1,circle2)\n\t\tself.assertEqual(b,-1)\n\tdef test_circles_overlap_1(self):\n\t\tcircle1 = Circle(Point(400,213),1)\n\t\tcircle2 = Circle(Point(0,0),1)\n\t\tb = circles_overlap(circle1,circle2)\n\t\tself.assertEqual(b,1)\n\tdef test_circles_overlap_2(self):\n\t\tcircle1 = Circle(Point(4,0),1)\n\t\tcircle2 = Circle(Point(0,0),3)\n\t\tb = circles_overlap(circle1,circle2)\n\t\tself.assertEqual(b,0)\n\tdef test_circles_lists(self):\n\t\tcircle_list_1 = [\n\t\tCircle(Point(400,213),1)\n\t\t]\n\t\tcircle_list_2 = [\n\t\tCircle(Point(0,0),1)\t\t]\n\t\ta = circles_only(circle_list_1,circle_list_2)\n\t\tb = []\n\t\tself.assertEqual(a,b)\n\tdef test_circles_lists(self):\n\t\tcircle_list_1 = [\n\t\tCircle(Point(0,0),200)\n\t\t]\n\t\tcircle_list_2 = [\n\t\tCircle(Point(0,0),20)\t\t]\n\t\ta = circles_only(circle_list_1,circle_list_2)\n\t\tb = [(((0,0),200),(0,0),20)]\n\t\tself.assertEqual(a,b)\n\tdef test_circles_list1(self):\n\t\tcircle_list_1 = [Circle(Point(0,0),302)]\n\t\tcircle_list_2 = [Circle(Point(1,1),24)]\n\t\ta = circles_only(circle_list_1,circle_list_2)\n\t\tb = [(Circle(Point(0,0),302),Circle(Point(1,1),24))]\n\t\tself.assertEqual(a,b)\n\tdef test_first_function(self):\n\t\tx = 1\n\t\ty = 2\n\t\ta = first_function(x,y)\n\t\tself.assertEqual(a,3)\n\tdef test_second_function(self):\n\t\tx = 1\n\t\ty = 1\n\t\ta = second_function(x,y)\n\t\tself.assertEqual(a,0)\n\tdef test_third_function(self):\n\t\tx = 2\n\t\ty = 2\n\t\ta = third_function(x,y)\n\t\tself.assertEqual(a,3)\n\tdef test_composition(self):\n\t\tthe_list = [(0,0),(2,2),(4,4),(1,2),(3,4)]\n\t\ta = composition(the_list)\n\t\tb = [0,3,14,0,7]\n\t\tself.assertEqual(a,b)\n\n\nif __name__ == '__main__':\n unittest.main()","sub_path":"practice Lab Quiz/Practice Lab Quiz 2/tests.py","file_name":"tests.py","file_ext":"py","file_size_in_byte":1762,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"293834677","text":"class Solution:\n def plusOne(self, digits):\n \"\"\"\n :type digits: List[int]\n :rtype: List[int]\n \"\"\"\n digits[-1]+=1\n p=0\n for i in range(len(digits)-1,-1,-1):\n digits[i]+=p\n p=0\n if digits[i]>9:\n digits[i]-=10\n p=1\n if p==1:digits=[1]+digits\n return digits\n \ndigits=[0]\nprint(Solution.plusOne('self',digits))","sub_path":"066.plus-one加一/加一(48ms).py","file_name":"加一(48ms).py","file_ext":"py","file_size_in_byte":441,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"139409146","text":"# Time Complexity : O(m * n * 3^l)[m = num of rows, n = num of cols, l = length of word]\n# Space Complexity : O(m * n)[m = num of rows, n = num of cols]\n# Did this code successfully run on Leetcode : Yes\n# Any problem you faced while coding this : No\n\n# Problem Approach(DFS + backtracking)\n# 1. Traverse over the board in DFS manner and compare the char in word at each step\n# 2. If the current char from board matches the char in word, proceed to the next char\n# 3. If the current char from board does not match, backtrack and explore other directions \nclass Solution:\n def exist(self, board: List[List[str]], word: str) -> bool:\n if not board:return False\n def backtrack(board, word, i, j, index):\n ## base\n if index == len(word):\n return True\n if i < 0 or i >= m or j < 0 or j >=n: # No need to add board[i][j] == '#' since it is already taken care on line 22 \n return False\n \n ## body\n \n if word[index] == board[i][j]:\n \n # action\n temp = board[i][j]\n board[i][j] = '#'\n \n # recurse\n for direction in directions:\n x = i + direction[0]\n y = j + direction[1]\n \n if backtrack(board, word, x, y, index+1):\n return True\n \n # backtrack\n board[i][j] = temp\n return False\n \n m = len(board)\n n = len(board[0])\n directions = [[-1,0],[1,0],[0,1],[0,-1]]\n for i, row in enumerate(board):\n for j, col in enumerate(row):\n if backtrack(board, word, i, j, 0):\n return True\n return False\n\n\n\n\"\"\"\nTestcase where DFS will not work\n\nboard:\nA B R O\nS F C L\nL D R O\n\nword:\nSFCROLORB\n\nTestcase to understand complexity\n\nboard:\nA A A A A A\nA A L A A A\nA A A A A A\n\n\nword:\nAAAAAAAAAAAAAAAAAL\n\n\"\"\"","sub_path":"79_Word_Search.py","file_name":"79_Word_Search.py","file_ext":"py","file_size_in_byte":2046,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"273499762","text":"\nclass Tree():\n def __init__(self, value=None):\n self.value = value\n self.left = None\n self.right = None\n\n def addTo(self, tree, element, infix=''): # adding the element to the right or left side of the tree\n if tree.value is None:\n tree.value = element\n else:\n node = Tree(element)\n if infix.find(element) < infix.find(tree.value):\n if tree.left is None:\n tree.left = node\n else:\n self.addTo(tree.left, element, infix=infix)\n else:\n if tree.right is None:\n tree.right = node\n else:\n self.addTo(tree.right, element, infix=infix)\n\n# printing the tree\n def print(self, tree):\n stepNum = 0\n result = []\n nodeLevel = self.get_levels(tree)\n spaceLeft = 1000\n \n for level in nodeLevel[::-1]:\n nextStep = stepNum *2 + 1\n result.append(stepNum * ' ', nextStep * ' ').join(level)\n stepNum = nextStep\n\n for level in result:\n for i in result(len(level)):\n if level[i] != ' ':\n if i < spaceLeft:\n spaceLeft = i\n break\n\n # for place in result[::-1]:\n # print(place[spaceLeft:]).replace('_', ' ')\n\n def startBuild(self, infix, prefix): # building the tree\n tree = Tree()\n for element in prefix:\n Tree.addTo(tree, element, infix=infix)\n return tree\n\n def get_levels(self, tree): # checking how many levels we have\n data = []\n numOfLevels = 0\n while True:\n nodes = []\n Tree.singleLevel(tree, nodes, numOfLevels)\n if nodes.count(' ') == len(nodes):\n break\n else:\n data.append(nodes.copy())\n numOfLevels += 1;\n return data\n\n def singleLevel(self, tree, checkNodes, level): # checking what we have in every level and if he on left or right\n if level == 0:\n checkNodes.append(str(tree.value))\n else:\n if tree.left in None:\n self.singleLevel(Tree(' '), checkNodes, level - 1)\n else:\n self.singleLevel(Tree.left, checkNodes, level - 1)\n if tree.right in None:\n self.singleLevel(Tree(' '), checkNodes, level - 1)\n else:\n self.singleLevel(Tree.right, checkNodes, level - 1)\n\n\ndef build(infix, prefix):\n tree = Tree.startBuild(infix, prefix)\n Tree.print(tree)\n\n\ndef main():\n while True:\n try:\n infix = input()\n prefix = input()\n build( infix, prefix)\n\n except EOFError:\n break\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"tree/src/Tree.py","file_name":"Tree.py","file_ext":"py","file_size_in_byte":2863,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"75787808","text":"from Linked_List import Linked_List\n\n\ndef Josephus(ll):\n # while there is at least one survivor, rotate the list once to the left, kill the first index, then print results of the round\n while len(ll) > 1:\n ll.rotate_left()\n ll.remove_element_at(0)\n print(ll)\n survivor = ll.get_element_at(0)\n print('The survivor is: ' + str(survivor))\n \n\nif __name__ == '__main__':\n # asks for user input and saves it to n\n n = int(input(\"Input the total number of people: \"))\n # creates and populates the Josephus list\n ll = Linked_List()\n for k in range(1, n + 1):\n ll.append_element(k) \n print(\"Initial order:\", ll)\n Josephus(ll)\n","sub_path":"literally-loving-linked-lists/Josephus.py","file_name":"Josephus.py","file_ext":"py","file_size_in_byte":643,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"520837566","text":"#License\n#This code is under [The BSD 3-Clause License]\n#(http://opensource.org/licenses/BSD-3-Clause)\n\nfrom __future__ import division\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport pylab\nimport matplotlib.patches as mpatches\n\nfrom matplotlib.patches import Circle\nfrom matplotlib.collections import PatchCollection\n\n\nSYM_LINES = (((0,0,0),(0.5,0.5,0)),((0.5,0.5,0),(0.5,0,0)),((0.5,0,0),(0,0,0)))\nLINE_NAMES = [r'$\\bar \\Gamma$', r'$\\bar M$', r'$\\bar X$']\n\ndef graph_band_data(input_f,num_kpt,num_bands,num_symmetry_lines, ymin, ymax):\n with open(input_f) as f:\n fig = plt.figure()\n ax = fig.add_axes([0.1, 0.1, 0.8, 0.8]) \n ax.set_ylim([ymin,ymax])\n ax.set_xlim([0.0,0.6])\n ax.get_xaxis().set_visible(False)\n ax.set_ylabel(r'$E-E_F (eV)$') \n for band in range(num_bands): \n x_vals = []\n y_vals = []\n r_vals_1 = []\n r_vals_2 = []\n r_vals_3 = []\n r_vals_4 = []\n r_vals_5 = []\n f.readline()\n y_vals_new = []\n for line_num in range(num_symmetry_lines): \n #print(line_num)\n k = 0 \n for kpt in range(num_kpt):\n k += 1\n #print(k)\n line_data=f.readline().strip().split()\n x_vals.append(float(line_data[0]))\n y_vals.append(float(line_data[1]))\n r_vals_1.append(0.03*float(line_data[2]))\n r_vals_2.append(0.03*float(line_data[3]))\n r_vals_3.append(0.03*float(line_data[4]))\n r_vals_4.append(0.03*float(line_data[5]))\n r_vals_5.append(0.03*float(line_data[6]))\n #f.readline()\n #f.readline()\n plt.axvline(x=x_vals[line_num*num_kpt],ymin=0,ymax=1,linewidth=0.125, color='0.75')\n plt.text(x_vals[line_num*num_kpt], ymin+(ymin/10), LINE_NAMES[line_num])\n circles(x_vals,y_vals, r_vals_1, c='r', ax=ax) \n circles(x_vals,y_vals, r_vals_2, c='b', ax=ax)\n circles(x_vals,y_vals, r_vals_3, c='y', ax=ax)\n circles(x_vals,y_vals, r_vals_4, c='g', ax=ax)\n circles(x_vals,y_vals, r_vals_5, c='m', ax=ax)\n plt.axhline(y=0,xmin=0,xmax=1,linewidth=0.25,color='k')\n plt.axvline(x=x_vals[-1],ymin=0,ymax=1,linewidth=0.125, color='0.75')\n plt.text(x_vals[-2],ymin+(ymin/10), LINE_NAMES[0]) \n ax.plot(x_vals,y_vals,color='k')\n f.readline()\n #print(x_vals) \n r_patch = mpatches.Patch(color='r', label='dxy')\n #plt.legend(handles=[red_patch]) \n b_patch = mpatches.Patch(color='b', label='dyz')\n #plt.legend(handles=[blue_patch]) \n y_patch = mpatches.Patch(color='y', label='dz2')\n #plt.legend(handles=[y_patch]) \n g_patch = mpatches.Patch(color='g', label='dxz')\n #plt.legend(handles=[g_patch]) \n m_patch = mpatches.Patch(color='m', label='dx2-y2')\n #plt.legend(handles=[r_patch, b_patch, y_patch,g_patch, m_patch])\n plt.legend(bbox_to_anchor=(0., 1.02, 1., .102), loc=3, ncol=5, mode=\"expand\", borderaxespad=0., handles=[r_patch, b_patch, y_patch,g_patch, m_patch]) \n #plt.legend(bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.) \n plt.show()\n\n \ndef scale_line_length(sym_point_path):\n \"\"\" Takes a list or tuple of symmetry points corresponding to the BZ path followed\n in the band-structure calculations. Returns a list of scaling factors.\n \"\"\"\n distance_tot = 0\n distances = []\n for num in range(len(sym_point_path)):\n x_diff = SYM_LINES[num][1][0]-SYM_LINES[num][0][0]\n y_diff = SYM_LINES[num][1][1]-SYM_LINES[num][0][1]\n z_diff = SYM_LINES[num][1][2]-SYM_LINES[num][0][2]\n distance = np.sqrt(x_diff**2 + y_diff**2 + z_diff**2)\n distance_tot += distance\n distances.append(distance)\n scaling_factors = []\n for item in distances:\n scale = (item / distance_tot) * len(sym_point_path)\n scaling_factors.append(scale)\n return scaling_factors\n \ndef circles(x, y, s, c='b', ax=None, vmin=None, vmax=None, **kwargs): \n if ax is None:\n ax = plt.gca() \n\n if isinstance(c, str):\n color = c # ie. use colors.colorConverter.to_rgba_array(c)\n else:\n color = None # use cmap, norm after collection is created\n kwargs.update(color=color)\n\n if isinstance(x, (int, float)):\n patches = [Circle((x, y), s),]\n elif isinstance(s, (int, float)):\n patches = [Circle((x_,y_), s) for x_,y_ in zip(x,y)]\n else:\n patches = [Circle((x_,y_), s_) for x_,y_,s_ in zip(x,y,s)]\n collection = PatchCollection(patches, **kwargs)\n if color is None:\n collection.set_array(np.asarray(c))\n if vmin is not None or vmax is not None:\n collection.set_clim(vmin, vmax)\n ax.add_collection(collection)\n return collection\n\n\nif __name__ == \"__main__\":\n graph_band_data('band2.dat', 80, 48, 3, -1, 1)\n","sub_path":"orbitalbands.py","file_name":"orbitalbands.py","file_ext":"py","file_size_in_byte":5219,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"1752838","text":"import json\nimport logging\n\nfrom elections.models import NomineeLink\nfrom elections.views.Constants import PRE_EXISTING_ELECTION\nfrom elections.views.create_context.nominee_links.utils.display_errors_html import \\\n create_context_for_display_errors_html\nfrom elections.views.create_context.nominee_links.utils.election_nominee_names_html import \\\n create_context_for_election_nominee_names_html\nfrom elections.views.create_context.nominee_links.utils.make_context_value_serializable_to_json import \\\n make_json_serializable_context_dictionary\nfrom elections.views.create_context.nominee_links.utils.submission_buttons_html import \\\n create_context_for_submission_buttons_html\nfrom elections.views.create_context.webform_format.create_context_for_election_date_html import \\\n create_context_for_election_date_html\nfrom elections.views.create_context.webform_format.create_context_for_election_time_html import \\\n create_context_for_election_time_html\nfrom elections.views.create_context.webform_format.create_context_for_election_type_html import \\\n create_context_for_election_type_html\nfrom elections.views.create_context.webform_format.create_context_for_election_websurvey_html import \\\n create_context_for_election_websurvey_html\n\nlogger = logging.getLogger('csss_site')\n\n\ndef create_context_for_create_election_nominee_links_html(context, election_date=None, election_time=None,\n election_type=None, create_new_election=False,\n websurvey_link=None, error_messages=None,\n nominee_names=None):\n pre_existing_election = False\n if create_new_election:\n nominee_links = NomineeLink.objects.all()\n if len(nominee_links) > 0:\n pre_existing_election = True\n error_messages = [(\n f\"Please delete the nominee links for the {nominee_links[0].election.human_friendly_name} \"\n f\"election before creating a new election via nominee link\"\n )]\n create_context_for_display_errors_html(context, error_messages)\n context[PRE_EXISTING_ELECTION] = pre_existing_election\n if pre_existing_election is False:\n create_context_for_election_date_html(context, election_date=election_date)\n create_context_for_election_time_html(context, election_time=election_time)\n create_context_for_election_type_html(context, election_type=election_type)\n create_context_for_election_websurvey_html(context, websurvey_link=websurvey_link)\n create_context_for_election_nominee_names_html(context, nominee_names=nominee_names)\n create_context_for_submission_buttons_html(context, create_new_election=create_new_election)\n logger.info(\n \"[elections/create_election_nominee_links_html.py\"\n \" create_context_for_create_election_nominee_links_html()] \"\n \"context=\"\n )\n json_serializable_context = make_json_serializable_context_dictionary(context)\n logger.info(json.dumps(json_serializable_context, indent=3))\n","sub_path":"csss-site/src/elections/views/create_context/nominee_links/create_election_nominee_links_html.py","file_name":"create_election_nominee_links_html.py","file_ext":"py","file_size_in_byte":3132,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"598748987","text":"# This is a sample Python script.\nimport numpy as np\nfrom flask import Flask, request, jsonify, render_template\nimport pickle\n# Press Shift+F10 to execute it or replace it with your code.\n# Press Double Shift to search everywhere for classes, files, tool windows, actions, and settings.\napp = Flask(__name__)\nmodel = pickle.load(open('model.pkl', 'rb'))\n\n@app.route('/')\ndef home():\n return render_template('index.html')\n\ndef ValuePredictor(to_predict_list):\n to_predict = np.array(to_predict_list).reshape(1, 12)\n loaded_model = pickle.load(open(\"model.pkl\", \"rb\"))\n result = loaded_model.predict(to_predict)\n return result[0]\n\n\n@app.route('/result', methods=['POST'])\ndef result():\n if request.method == 'POST':\n to_predict_list = request.form.to_dict()\n to_predict_list = list(to_predict_list.values())\n to_predict_list = list(map(int, to_predict_list))\n result = ValuePredictor(to_predict_list)\n if int(result) == 1:\n prediction = 'Income more than 50K'\n else:\n prediction = 'Income less that 50K'\n return render_template('index.html', prediction_text='your {}'.format(prediction))\n\n # return render_template(\"result.html\", prediction=prediction)\n\n\napp.run(debug=True)\n\n # def print_hi(name):\n # Use a breakpoint in the code line below to debug your script.\n # print(f'Hi, {name}') # Press Ctrl+F8 to toggle the breakpoint.\n\n\n# Press the green button in the gutter to run the script.\n# if __name__ == '__main__':\n# print_hi('PyCharm')\n\n# See PyCharm help at https://www.jetbrains.com/help/pycharm/\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1612,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"307250292","text":"#!/usr/bin/env python3\n\nfrom tkinter import *\nfrom threading import *\nfrom multiprocessing import *\nimport locale\nimport time\nimport shelve\nimport atexit\n\nmoney = 0\n\np_method = print\n\nITEMS = list()\n\nclass Item:\n def __init__(self, itemlist, name, pay, cost, keyword = None):\n self.name = name\n self.pay = pay\n self.cost = cost\n self.count = 0\n if keyword == None:\n self.keyword = self.name\n else:\n self.keyword = keyword \n itemlist.append(self)\n\n def mps(self):\n return self.count * self.pay\n\n def do_profit(self):\n global money\n money += (self.count * self.pay)\n\n def buy(self):\n global money\n if self.cost <= money:\n self.count += 1\n money -= self.cost\n newcost = round((self.cost * 0.25), 2)\n self.cost += newcost\n\n if money >= self.cost:\n self.buy_button.update()\n else:\n p_method(\"There ain't enough money for that.\")\n\ndef profit_thread_target():\n global money\n while True:\n for i in ITEMS:\n i.do_profit()\n time.sleep(0.25)\n\nprofit_thread = Thread(target=profit_thread_target, name=\"Profit Thread\")\nprofit_thread.daemon = True\nprofit_thread.start()\n\ndef get_combined_mps():\n o = 0\n for i in ITEMS:\n o += i.mps()\n return o\n\n# For some dumbass reason, you have to define items as being in this library:\n # cookielib.potato = cookielib.Item('potato', 5, 10)\n","sub_path":"cookielib.py","file_name":"cookielib.py","file_ext":"py","file_size_in_byte":1548,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"143659280","text":"\"\"\"This module focuses on getting and validating the path for the RTM\nworksheet.\"\"\"\n\n# --- Standard Library Imports ------------------------------------------------\nimport datetime\nimport tkinter as tk\nfrom pathlib import Path\nfrom tkinter import filedialog\nimport os\n\n# --- Third Party Imports -----------------------------------------------------\nimport click\nimport openpyxl\nfrom openpyxl.styles import Alignment, Color, Font, PatternFill\nfrom openpyxl.comments import Comment\n\n# --- Intra-Package Imports ---------------------------------------------------\nfrom rtm.containers.markup import CellMarkup\nfrom rtm.main import exceptions as exc\nfrom rtm.main.versions import get_version_check_message\n\n\ndef get_rtm_path(path_option='default') -> Path:\n \"\"\"Prompt user for RTM workbook location. Return path object.\"\"\"\n if path_option == 'default':\n path = get_new_path_from_dialog()\n required_extensions = '.xlsx .xls'.split()\n if str(path) == '.':\n raise exc.RTMValidatorFileError(\"\\nError: You didn't select a file\")\n if path.suffix not in required_extensions:\n raise exc.RTMValidatorFileError(\n f\"\\nError: You didn't select a file with \"\n f\"a proper extension: {required_extensions}\"\n )\n click.echo(f\"\\nThe RTM you selected is {path}\")\n return path\n elif isinstance(path_option, Path):\n return path_option\n\n\ndef get_new_path_from_dialog() -> Path:\n \"\"\"Provide user with dialog box so they can select the RTM Workbook\"\"\"\n root = tk.Tk()\n root.withdraw()\n path = Path(filedialog.askopenfilename())\n return path\n\n\ndef get_workbook(path):\n return openpyxl.load_workbook(filename=str(path), data_only=True)\n\n\ndef get_worksheet(workbook, worksheet_name):\n ws = None\n for sheetname in workbook.sheetnames:\n if sheetname.lower() == worksheet_name.lower():\n ws = workbook[sheetname]\n if ws is None:\n raise exc.RTMValidatorFileError(\n f\"\\nError: Workbook does not contain a '{worksheet_name}' worksheet\"\n )\n return ws\n\n\ndef now_str(pretty=False):\n if pretty:\n return datetime.datetime.now().strftime(\"%d %B %Y, %I:%M %p\")\n else:\n return datetime.datetime.now().strftime(\"%Y%m%d_%H%M%S\")\n\n\ndef get_save_path(original_path, modify_original_file=False):\n \"\"\"Get the full file path. Create subdirectory if necessary.\"\"\"\n\n # --- When modifying original file ----------------------------------------\n if modify_original_file:\n return original_path\n\n # --- When saving a copy of the original file -----------------------------\n original_path = Path(original_path)\n original_directory = original_path.parent\n subdirectory = original_directory/'rtm_validator_results'\n subdirectory.mkdir(exist_ok=True)\n file_name = f'{now_str()}_{original_path.name}'\n return subdirectory / file_name\n\n\ndef get_cell_comment_string(comments):\n # comments is a list of title/explanation pairs\n titles_and_comments = [f\"{comment[0].upper()}\\n{comment[1]}\" for comment in comments]\n comments_string = '\\n\\n'.join(titles_and_comments)\n return f\"{now_str(pretty=True)}\\n\\n{comments_string}\"\n\n\ndef mark_up_excel(path, wb, ws_procedure, markup_content: dict, modify_original_file=False):\n # Comments fall in two categories:\n # PROCEDURE BASED REQUIREMENTS: These are comments bound to record in a specific field.\n # The cell gets highlighted orange and a comment explains the error.\n # README: These are not directed at a specific cell.\n # These will generate new rows inserted at the top of the worksheet.\n\n bg_error = Color('00edc953') # yellow-ish background color for procedure errors\n fg_good = Color('0025c254') # green-ish text color for readme comments\n fg_error = Color('00bf2f24') # red-ish text color for readme errors\n\n # --- Procedure markup ----------------------------------------------------\n for location, comments in markup_content.items():\n if not isinstance(location, str): # i.e. if this items has a row/col location\n cell = ws_procedure.cell(*location)\n # cell.style = style_error\n cell.fill = PatternFill(patternType='solid', fgColor=bg_error)\n cell.comment = Comment(get_cell_comment_string(comments), \"RTM Validator\")\n\n # --- Set up README errors ------------------------------------------------\n general_errors = []\n for field_name, comments in markup_content.items():\n if isinstance(field_name, str):\n general_errors.append(CellMarkup(field_name.upper(), is_error=True))\n for comment in comments:\n comment_str = f\"{comment[0].upper()}: {comment[1]}\"\n general_errors.append(CellMarkup(comment_str, is_error=True, indent=True))\n\n # --- Set up README comments ----------------------------------------------\n readme_text = [\n CellMarkup(\"RTM VALIDATOR\", size=24),\n CellMarkup(f\"{now_str(pretty=True)}\"),\n CellMarkup(\"All images and attachments have been removed from this workbook.\"),\n CellMarkup(),\n CellMarkup(\"Cells highlighted orange require attention.\"),\n CellMarkup(\"See the cell's note/comment for details.\"),\n CellMarkup(),\n CellMarkup(\"To improve readability, convert notes to comments:\"),\n CellMarkup(\"Go to the Review tab\", indent=True),\n CellMarkup(\"Click on Notes, select Convert to Comments\", indent=True),\n CellMarkup(),\n ] + get_version_check_message()\n if general_errors:\n readme_text += [\n CellMarkup(),\n CellMarkup(\"General Errors:\"),\n CellMarkup(),\n ] + general_errors\n\n # --- create and write to README sheet ------------------------------------\n readme = 'README'\n ws_readme = wb.create_sheet(readme, 0)\n for row, comment in enumerate(readme_text, 1):\n cell = ws_readme.cell(row, 1, comment.comment)\n cell.alignment = Alignment(\n wrapText=False,\n indent=3 if comment.indent else 0,\n )\n cell.font = Font(\n color=fg_error if comment.is_error else fg_good,\n size=comment.size,\n bold=True,\n )\n if comment.size:\n ws_readme.row_dimensions[row].height = comment.size * 1.4\n\n # --- Delete Unmarked Sheets ----------------------------------------------\n if not modify_original_file:\n for worksheet in wb.worksheets:\n if worksheet not in [ws_procedure, ws_readme]:\n wb.remove(worksheet)\n sheet_index = 0\n wb.active = sheet_index\n\n # --- Save ----------------------------------------------------------------\n save_path = get_save_path(path, modify_original_file)\n wb.save(save_path)\n os.startfile(save_path)\n # open(save_path)\n\n\ndef row_heights(ws):\n heights = [ws.row_dimensions[index+1].height for index in range(ws.max_row)]\n return [15 if height is None else height for height in heights]\n\n\nif __name__ == '__main__':\n pass\n","sub_path":"rtm/main/excel.py","file_name":"excel.py","file_ext":"py","file_size_in_byte":7151,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"342994477","text":"import datetime\n\nfrom django.test import TestCase\nfrom django.urls import resolve\nfrom django.http import HttpRequest\n\nfrom .views import home_page\n\n\nclass HomePageTest(TestCase):\n\n def test_root_url_resolves_to_homepage(self):\n found = resolve('/')\n self.assertEqual(found.func, home_page)\n\n def test_home_page_returns_correct_html(self):\n response = self.client.get('/')\n self.assertTemplateUsed(response, 'bookings/index.html')\n\n def test_can_send_POST_request(self):\n start_date = datetime.datetime(2018, 5, 20, 10, 0).strftime('%Y/%m/%d %H:%M')\n end_date = datetime.datetime(2018, 5, 20, 10, 30).strftime('%Y/%m/%d %H:%M')\n response = self.client.post('/', data={'booking_name': 'New Booking',\n 'start_date': start_date,\n 'end_date': end_date})\n self.assertIn('New Booking', response.content.decode())\n self.assertTemplateUsed(response, 'bookings/index.html')\n","sub_path":"bookings/tests.py","file_name":"tests.py","file_ext":"py","file_size_in_byte":1029,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"485061210","text":"# -*- coding: utf-8 -*-\nfrom odoo import models, fields, _\nfrom datetime import datetime\nfrom xlrd import open_workbook\nfrom odoo.exceptions import UserError\nfrom odoo.tools import float_compare\n# import base64\nfrom operator import itemgetter\nimport logging\n_logger = logging.getLogger(__name__)\n\n\n# récuperer la valeur d'une cellule [row, col] sur la feuille excel sh\ndef _get_cell(sh, row, col, st=False):\n if col == -1:\n return None\n else:\n value = sh.cell(row, col).value\n if st:\n value = str(value)\n if value[-2:] == '.0':\n value = value[:-2]\n return value\n\n\n# récuperer la valeur d'une cellule [row, col] sur la feuille excel sh\ndef _supp_dot_0(val):\n v = str(val)\n if len(v) > 2:\n if v[-2:] == '.0':\n v = v[:-2]\n return v\n\n\n# vérifie si la valeur de la cellule est vide\ndef _check_not_null(sh, row, col, name_field):\n value = sh.cell(row, col).value\n if value and value != '':\n return True\n else:\n raise UserError(_(\n 'Erreur a la ligne '+str(row+1)+', Le champs ('+name_field+') est vide, veuillez corriger sur le fichier excel et relancer l\\'importation'))\n\n\ndef read_file(fname):\n # copier le fichier selectionné (Field.Binary) dans un fichier temporaire (avec un chemin connu) et utiliser le fichier tmp\n file_path = 'tmp/file.xlsx'\n data = fname\n f = open(file_path, 'wb')\n f.write(data.decode('base64'))\n # f.write(base64.b64decode(data)) - pour python 3, rajouter aussi import base64\n\n f.close()\n return file_path\n\n\nclass ImportBalanceWizard(models.TransientModel):\n _name = 'import.balance.wizard'\n\n name = fields.Many2one('dl.report.balance', required=1)\n w_file_name = fields.Binary(u'Sélectionnez le document', required=1)\n filename = fields.Char('Filename')\n print_report = fields.Boolean ('Afficher un rapport d\\'erreur')\n error = fields.Boolean ('Erreur')\n cntrl_only = fields.Boolean ('Faire un controle seulement')\n\n def action_import(self):\n\n def elem_exist_req(model, nfield, value):\n req = \"select count(*) as nbr from \"+model+\" where \"+nfield+\"=%s;\"\n rub = (value,)\n self._cr.execute(req, rub)\n res = self._cr.dictfetchall()\n num = res[0].get('nbr')\n if not num or num == 0 :\n return False\n else:\n return True\n\n def _check_exist(sh, row, col, model, nfield, name_field, print_rep=False):\n field_val = str(sh.cell(row, col).value)\n if field_val:\n if field_val[-2:] == '.0':\n field_val = field_val[:-2]\n # mat = self.env[model].search([(nfield, '=', field_val)])\n if not elem_exist_req(model, nfield, field_val):\n self.error = True\n msg = 'Erreur a la ligne ' + str(\n row + 1) + ', Le ' + name_field + ' ['+ _supp_dot_0(field_val)+u'] n\\'existe pas sur la base Odoo, veuillez corriger sur le fichier excel ou créer cet élément puis relancer l\\'importation'\n if not print_rep:\n raise UserError(_(msg))\n else:\n fid.write(str(row + 1) + ';' + name_field + ';' + _supp_dot_0(field_val) + '\\n')\n # fid.wl(str(row + 1) + ';' + name_field + ';' + _supp_dot_0(field_val))\n\n def verify_data(sh):\n for row in range(1, sh.nrows):\n _check_not_null(sh, row, MODEL_COMPTE, 'Compte')\n _check_exist(sh, row, MODEL_COMPTE, 'account_account', 'code', 'Compte', self.print_report)\n\n def _get_field_id(sh, row, col, model, nfield):\n if col == -1: # la colonne n'existe pas sur excel\n return None\n else:\n field_val = str(sh.cell(row, col).value)\n if field_val:\n if field_val[-2:] == '.0':\n field_val = field_val[:-2]\n mat = self.env[model].search([(nfield, '=', field_val)])\n if mat.exists():\n return mat[0].id\n else:\n return None\n else: # la colonne existe mais elle n'est pas renseignée\n return None\n\n def create_line(sheet): # , vjournal, vpiece):\n # recuperer la marque et la référence pour verifier si le produit existe ou pas\n compte_id = _get_field_id(sheet, row_index, MODEL_COMPTE, 'account.account', 'code')\n compte_code = _get_cell(sheet, row_index, MODEL_COMPTE, True)\n\n # creation de la piece\n\n prd_id = self.env['dl.report.balance.line'].create({\n 'name' : compte_id,\n 'code' : compte_code,\n 'report_id' : self.name.id,\n 'init_debit' : float(_get_cell(sheet, row_index, MODEL_I_DEBIT)),\n 'inti_credit' : float(_get_cell(sheet, row_index, MODEL_I_CREDIT)),\n 'periode_debit' : float(_get_cell(sheet, row_index, MODEL_P_DEBIT)),\n 'periode_credit': float(_get_cell(sheet, row_index, MODEL_P_CREDIT)),\n 'solde_debit' : float(_get_cell(sheet, row_index, MODEL_S_DEBIT)),\n 'solde_credit' : float(_get_cell(sheet, row_index, MODEL_S_CREDIT)),\n })\n return prd_id\n\n\n # début opération\n # parametre des numeros des colonnes\n MODEL_I_DEBIT = 2\n MODEL_I_CREDIT = 3\n MODEL_P_DEBIT = 4\n MODEL_P_CREDIT = 5\n MODEL_S_DEBIT = 6\n MODEL_S_CREDIT = 7\n MODEL_COMPTE = 0\n\n # ouvrir excel\n book = open_workbook(read_file(self.w_file_name))\n xsheet = book.sheet_by_index(0)\n\n # pour ecrire les erreurs d'importation sur un fichier csv\n if self.print_report:\n fid = open('erreur_importation.csv', 'w')\n fid.write(u'Ligne;Table;Valeur Non trouvé \\n')\n\n # _logger.info(\"--------------------lancement------------------------------------- \")\n # verifier s'il n y a d'erreur ou de manque dans le fichier excel a importer\n\n self.error = False\n verify_data(xsheet)\n if self.print_report:\n fid.close()\n # _logger.info(\"--------------------verification-----ok------------------------------ \")\n\n # debut du traitmnt\n if self.error:\n if self.print_report:\n raise UserError(_(u'Fichier contient des anomalies, veuillez consulter le fichier log généré [erreur_importation.csv]'))\n else:\n if self.cntrl_only:\n raise UserError(_('Tout est OK'))\n\n for row_index in range(1, xsheet.nrows):\n create_line(xsheet)\n\n return True\n","sub_path":"l10n_dz_reports/wizard/import_balance.py","file_name":"import_balance.py","file_ext":"py","file_size_in_byte":6933,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"400426588","text":"#!/usr/bin/python3.5\nimport networkx as nx\nimport matplotlib.pyplot as plt\nimport sys\n\n\ndef getInDegrees(graph, origin):\n with open(\"./metrics/\" +origin + \"inDegrees.csv\", \"w\") as w:\n for node in graph.nodes():\n w.write(str(node) + \",\" +str(graph.in_degree(node))+ \"\\n\")\n\ndef getOutDegrees(graph, origin):\n with open(\"./metrics/\"+ origin + \"/outDegrees.csv\", \"w\") as w:\n for node in graph.nodes():\n w.write(str(node) + \",\" +str(graph.out_degree(node))+ \"\\n\")\n\ndef getDegreeCentralities(graph, origin):\n with open(\"./metrics/\" + origin + \"degreeCentralities.csv\", \"w\") as w:\n raw = nx.degree_centrality(graph)\n for x in raw:\n node = x\n degree = raw.get(x)\n w.write(str(node) + \",\" + str(degree)+\"\\n\")\n \n","sub_path":"analysis.py","file_name":"analysis.py","file_ext":"py","file_size_in_byte":808,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"257507450","text":"import numpy as np\nimport itertools\nimport random\nimport string\n\ndirections = [(x*j,y*j) for x,y in [(0, 1), (1, 0), (1, 1), (-1, 1)] for j in [-1, 1]]\n\n\nclass board():\n\n def __init__(self, size):\n self.board = np.empty([size, size], dtype=\"U1\")\n self.board[:] = \" \"\n self.words = []\n self.size = size\n \n def add_word(self, word_o):\n \n put = False\n \"\"\"\n This code must run multiple times until it finds a place for the word\n \"\"\"\n while not put:\n # pick a random direction\n word_o.direction = random.choice(directions)\n # get a list from the word object of possible places\n starts = word_o.posstarts(self.size)\n # pick a random starting point\n start = random.choice(starts)\n # get all the indexes that it will use\n takes = word_o.getlocs(start)\n\n # make sure the locations don't have a different characters\n if board.is_good_locs(self, word_o, takes):\n for x, y in zip(word_o.word, takes):\n self.board[y] = x\n self.words.append(word_o.word)\n\n # to break out of the loop\n put = True\n else:\n print(\"retrying\")\n\n\n def is_good_locs(self, word_o, locs):\n #mini function for each character\n\n def cl(char, loc):\n if self.board[loc] == \" \" or self.board[loc] == char:\n return True\n\n #make sure it holds true for all instances\n return all([cl(x, y) for x, y in zip(word_o.word, locs)])\n\n\n\n def fill(self):\n ltrs = [i for i in np.nditer(self.board) if i != \" \"]\n indexes = [(x, y) for x in range(self.size) for y in range(self.size)]\n for i in indexes:\n if self.board[i] == \" \":\n self.board[i] = random.choice(ltrs)\n else:\n pass\n \n\n def __str__(self):\n return self.board.__str__()\n\n\n\n\n\n\n\n\nclass word_obj():\n def __init__(self, word):\n self.word = word\n self.direction = random.choice(directions)\n self.length = len(self.word)\n\n\n def posstarts(self, board_size):\n ln = self.length - 1\n rd, cd = self.direction\n rl = word_obj.getlimr(rd, ln, board_size)\n cl = word_obj.getlimr(cd, ln, board_size)\n return [(x, y) for x in rl for y in cl]\n\n\n\n @staticmethod\n def getlimr(di, ln, size):\n if di == 0:\n return list(range(size))\n elif di == 1:\n return list(range(size-ln))\n elif di == -1:\n return list(range(ln, size))\n\n\n def getlocs(self, origin):\n \"\"\" Gets the indexes given a direction, starting point, length\"\"\"\n x, y = origin\n d, b = self.direction\n return [(x+i*d, y+i*b) for i in range(self.length)]\n\n\n\n\n\n def __str__(self):\n return self.word\n \n\ndef getwordlist(mesg):\n wordlist = input(mesg).strip().split(\",\")\n letlist = [list(x) for x in wordlist]\n flatletlist = list(itertools.chain(*letlist))\n if all([x in itertools.chain(string.ascii_uppercase, string.ascii_lowercase) for x in flatletlist]):\n return wordlist\n else:\n getwordlist(\"Try again:\\n\")\n wordlist = input(\"\").strip().split(\",\")\n\n\n\n\ndef getbsize(mesg):\n bsize = input(mesg)\n try:\n int(bsize)\n return int(bsize)\n except:\n getbsize(\"Try again:\\n\")\n \n\n\nif __name__ == \"__main__\":\n bsize = getbsize(\"Enter Board size:\\n\")\n wordlist = getwordlist(\"Enter comma separated words: \\n\")\n\n\n\n brd = board(bsize)\n for i in wordlist:\n j = word_obj(i)\n brd.add_word(j)\n # print(brd)\n brd.fill()\n print(brd)\n with open(\"page.html\", \"w\") as f:\n for i in range(brd.size):\n x = \" \".join(brd.board[i,])\n f.write(f\"

{x}

\")\n","sub_path":"class.py","file_name":"class.py","file_ext":"py","file_size_in_byte":3962,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"641554995","text":"# This python file helps to get the data from the files, format and make it ready for transformers\nfrom .tools import *\nfrom transformers import BertTokenizer\nfrom multiprocessing import Pool, cpu_count\nimport pickle, copy\nimport logging\nfrom torch.utils.data import (DataLoader, RandomSampler, SequentialSampler,\n TensorDataset)\nfrom tqdm import tqdm , trange\nimport torch\n\n\nCONFIG_FOLDER = 'config/'\nid_label_file = 'id_2_label.json'\nlable_2_id_file = 'label2_2_id.json'\n\n\nclass InputExample(object):\n \"\"\"A single training/test example for simple sequence classification.\"\"\"\n\n def __init__(self, guid, text_a, text_b=None, label=None):\n \"\"\"\n guid: Unique id for the example.\n text_a: string. The untokenized text of the first sequence. For single\n sequence tasks, only this sequence must be specified.\n text_b: (Optional) string. The untokenized text of the second sequence.\n Only must be specified for sequence pair tasks.\n label: (Optional) string. The label of the example. This should be\n specified for train and dev examples, but not for test examples.\n \"\"\"\n self.guid = guid\n self.text_a = text_a\n self.text_b = text_b\n self.label = label\n \nclass InputFeatures(object):\n \"\"\"A single set of features of data.\"\"\"\n\n def __init__(self, input_ids, input_mask, segment_ids, label_id):\n self.input_ids = input_ids\n self.input_mask = input_mask\n self.segment_ids = segment_ids\n self.label_id = label_id\n\n\ndef _truncate_seq_pair(tokens_a, tokens_b, max_length):\n \"\"\"Truncates a sequence pair in place to the maximum length.\"\"\"\n\n # This is a simple heuristic which will always truncate the longer sequence\n # one token at a time. This makes more sense than truncating an equal percent\n # of tokens from each, since if one sequence is very short then each token\n # that's truncated likely contains more information than a longer sequence.\n while True:\n total_length = len(tokens_a) + len(tokens_b)\n if total_length <= max_length:\n break\n if len(tokens_a) > len(tokens_b):\n tokens_a.pop()\n else:\n tokens_b.pop()\n\n\ndef convert_example_to_feature(example_row):\n # return example_row\n example, max_seq_length, tokenizer = example_row\n\n tokens_a = tokenizer.tokenize(example.text_a)\n\n tokens_b = None\n if example.text_b:\n tokens_b = tokenizer.tokenize(example.text_b)\n # Modifies `tokens_a` and `tokens_b` in place so that the total\n # length is less than the specified length.\n # Account for [CLS], [SEP], [SEP] with \"- 3\"\n _truncate_seq_pair(tokens_a, tokens_b, max_seq_length - 3)\n else:\n # Account for [CLS] and [SEP] with \"- 2\"\n if len(tokens_a) > max_seq_length - 2:\n tokens_a = tokens_a[:(max_seq_length - 2)]\n\n tokens = [\"[CLS]\"] + tokens_a + [\"[SEP]\"]\n segment_ids = [0] * len(tokens)\n\n if tokens_b:\n tokens += tokens_b + [\"[SEP]\"]\n segment_ids += [1] * (len(tokens_b) + 1)\n\n input_ids = tokenizer.convert_tokens_to_ids(tokens)\n\n # The mask has 1 for real tokens and 0 for padding tokens. Only real\n # tokens are attended to.\n input_mask = [1] * len(input_ids)\n\n # Zero-pad up to the sequence length.\n padding = [0] * (max_seq_length - len(input_ids))\n input_ids += padding\n input_mask += padding\n segment_ids += padding\n\n assert len(input_ids) == max_seq_length\n assert len(input_mask) == max_seq_length\n assert len(segment_ids) == max_seq_length\n\n\n return InputFeatures(input_ids=input_ids,\n input_mask=input_mask,\n segment_ids=segment_ids,\n label_id=example.label)\n \n \nclass DataProcessor(object):\n \"\"\"Base class for data converters for sequence classification data sets.\"\"\"\n\n def get_train_examples(self, data_dir):\n \"\"\"Gets a collection of `InputExample`s for the train set.\"\"\"\n raise NotImplementedError()\n\n def get_dev_examples(self, data_dir):\n \"\"\"Gets a collection of `InputExample`s for the dev set.\"\"\"\n raise NotImplementedError()\n\n def get_test_examples(self, data_dir):\n \"\"\"Gets a collection of `InputExample`s for the dev set.\"\"\"\n \n raise NotImplementedError()\n def get_labels(self):\n \"\"\"Gets the list of labels for this data set.\"\"\"\n raise NotImplementedError()\n\n @classmethod\n def createDirectories(cls,config):\n report_dir = config.programsettings.REPORTS_DIR\n# if os.path.exists(report_dir) and os.listdir(report_dir):\n# report_dir += f'/report_{len(os.listdir(report_dir))}'\n# os.makedirs(report_dir)\n\n output_dir = config.programsettings.OUTPUT_DIR\n if not os.path.exists(output_dir):\n os.makedirs(output_dir)\n\n @classmethod\n def _read_tsv(cls, input_file, quotechar=None):\n \"\"\"Reads a tab separated value file.\"\"\"\n with open(input_file, \"r\", encoding=\"utf-8\") as f:\n reader = csv.reader(f, delimiter=\"\\t\", quotechar=quotechar)\n lines = []\n for line in reader:\n if sys.version_info[0] == 2:\n line = list(unicode(cell, 'utf-8') for cell in line)\n lines.append(line)\n return lines\n\n \n\nclass MultiClassificationProcessor(DataProcessor):\n \"\"\"Processor for binary classification dataset.\"\"\"\n \n def get_train_examples(self, data_dir):\n \"\"\"See base class.\"\"\"\n return self._create_examples(\n self._read_tsv(os.path.join(data_dir, \"train.tsv\")), \"train\")\n\n def get_dev_examples(self, data_dir):\n \"\"\"See base class.\"\"\"\n return self._create_examples(\n self._read_tsv(os.path.join(data_dir, \"dev.tsv\")), \"dev\")\n\n def get_test_examples(self, data_dir):\n \"\"\"See base class.\"\"\"\n return self._create_examples(\n self._read_tsv(os.path.join(data_dir, \"test.tsv\")), \"test\")\n\n def get_labels(self):\n \"\"\"See base class.\"\"\"\n \n return ['Reason-Drug', 'Route-Drug', 'Strength-Drug', 'Frequency-Drug',\n 'Duration-Drug', 'Form-Drug', 'Dosage-Drug', 'ADE-Drug',\n 'no relation']\n\n# return ['no_relation' , 'org:subsidiaries' , 'org:city_of_headquarters' , 'per:title',\n# 'per:origin' , 'per:employee_of' , 'org:top_members/employees',\n# 'org:alternate_names' , 'org:shareholders' , 'org:country_of_headquarters',\n# 'per:countries_of_residence' , 'per:date_of_death',\n# 'per:cities_of_residence' , 'per:city_of_death' , 'per:age' , 'org:founded_by',\n# 'org:parents' , 'org:member_of' , 'per:stateorprovinces_of_residence',\n# 'per:religion' , 'org:founded' , 'org:stateorprovince_of_headquarters',\n# 'per:alternate_names' , 'per:siblings' , 'per:charges',\n# 'org:number_of_employees/members' , 'per:stateorprovince_of_death',\n# 'org:members' , 'per:cause_of_death' , 'per:parents' , 'per:other_family',\n# 'per:schools_attended' , 'per:children' , 'per:spouse' , 'per:country_of_birth',\n# 'org:political/religious_affiliation' , 'per:country_of_death',\n# 'per:date_of_birth' , 'per:city_of_birth' , 'org:website' , 'org:dissolved',\n# 'per:stateorprovince_of_birth']\n\n def _create_examples(self, lines, set_type):\n \"\"\"Creates examples for the training and dev sets.\"\"\"\n examples = []\n for (i, line) in enumerate(lines):\n guid = \"%s-%s\" % (set_type, i)\n text_a = line[3]\n label = line[1]\n examples.append(\n InputExample(guid=guid, text_a=text_a, text_b=None, label=label))\n return examples\n \n \n def get_data_loader(self, config, source=\"train\"):\n\n logging.basicConfig(level=logging.INFO)\n \n self.config = config\n \n # Create output, report directories, if doesn't exist already\n self.createDirectories(config)\n # This is to read input data and process them \n\n if source == \"train\":\n data = self.get_train_examples(config.programsettings.DATA_DIR)\n elif source == \"dev\":\n data = self.get_dev_examples(config.programsettings.DATA_DIR)\n elif source == \"test\":\n data = self.get_test_examples(config.programsettings.DATA_DIR)\n\n data_len = len(data)\n\n label_list = self.get_labels() # [0, 1] for binary classification\n num_labels = len(label_list)\n num_train_optimization_steps = int(\n data_len / config.hyperparams.TRAIN_BATCH_SIZE / config.hyperparams.GRADIENT_ACCUMULATION_STEPS) * config.hyperparams.NUM_TRAIN_EPOCHS\n\n seq_length = str(config.hyperparams.MAX_SEQ_LENGTH)\n \n if source == \"train\":\n feature_pickle_file = config.programsettings.DATA_DIR + \"train_features_\" + seq_length + \".pkl\"\n elif source == \"dev\":\n feature_pickle_file = config.programsettings.DATA_DIR + \"dev_features_\" + seq_length + \".pkl\"\n elif source == \"test\":\n feature_pickle_file = config.programsettings.DATA_DIR + \"test_features_\" + seq_length + \".pkl\"\n \n print(\"Looking for cached feature pickle file\", feature_pickle_file)\n \n if not os.path.exists(feature_pickle_file):\n\n tokenizer = BertTokenizer.from_pretrained('bert-base-cased', do_lower_case=False)\n\n examples_for_processing = [(example, config.hyperparams.MAX_SEQ_LENGTH, tokenizer) for example in data]\n\n process_count = cpu_count() - 1\n \n with Pool(process_count) as p:\n features = list(tqdm(p.imap(convert_example_to_feature, examples_for_processing), total=data_len))\n \n with open(feature_pickle_file, \"wb\") as f:\n pickle.dump(features, f) \n \n with open(feature_pickle_file, \"rb\") as f:\n features = pickle.load(f)\n\n\n logger.info(\" Num examples = %d\", data_len)\n logger.info(\" Batch size = %d\", config.hyperparams.TRAIN_BATCH_SIZE)\n logger.info(\" Num steps = %d\", num_train_optimization_steps)\n \n all_input_ids = torch.tensor([f.input_ids for f in features], dtype=torch.long)\n all_input_mask = torch.tensor([f.input_mask for f in features], dtype=torch.long)\n all_segment_ids = torch.tensor([f.segment_ids for f in features], dtype=torch.long)\n all_label_ids = torch.tensor([int(f.label_id) for f in features], dtype=torch.long) \n\n tensor_data = TensorDataset(all_input_ids, all_input_mask, all_segment_ids, all_label_ids)\n t_sampler = RandomSampler(tensor_data)\n dataloader = DataLoader(tensor_data, sampler=t_sampler, batch_size=config.hyperparams.TRAIN_BATCH_SIZE)\n\n return dataloader, data_len, num_labels, num_train_optimization_steps, all_label_ids","sub_path":"util/DataLoader.py","file_name":"DataLoader.py","file_ext":"py","file_size_in_byte":11117,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"626902838","text":"import FWCore.ParameterSet.Config as cms\n\n\nprocess = cms.Process(\"Demo\")\n\nprocess.source = cms.Source(\"PoolSource\",\n fileNames = cms.untracked.vstring(\n ''\n )\n)\n\n\nprocess.maxEvents = cms.untracked.PSet(input = cms.untracked.int32(-1))\n\nprocess.MessageLogger = cms.Service(\"MessageLogger\",\n destinations = cms.untracked.vstring(\"cout\"),\n cout = cms.untracked.PSet(threshold = cms.untracked.string(\"INFO\")))\n\nprocess.load(\"Configuration.StandardSequences.FrontierConditions_GlobalTag_cff\")\nprocess.GlobalTag.globaltag = \"GR_R_53_V16A::All\"\n\nprocess.load(\"Configuration.StandardSequences.Reconstruction_cff\")\nprocess.load(\"Configuration.StandardSequences.GeometryDB_cff\")\nprocess.load(\"Configuration.StandardSequences.MagneticField_38T_cff\")\n\nprocess.MuonTracksAnalyzer = cms.EDAnalyzer(\"MuonTracksAnalyzer\",\n tracksTag = cms.InputTag(\"globalMuons\"),\n muonsTag = cms.InputTag(\"muons\"),\n minTrackPt = cms.double(20.),\n minTrackEta = cms.double(-2.4),\n maxTrackEta = cms.double(2.4),\n minTrackerHits = cms.int32(10),\n minDTHits = cms.int32(1),\n minCSCHits = cms.int32(1),\n dtWheel = cms.int32(-3), # = -2,-1, 0, 1, 2; use \"-3\" to exclude DT\n dtStation = cms.int32(1),\n dtSector = cms.int32(1),\n cscEndcap = cms.int32(1), # = 1, 2; use \"0\" to exclude CSC\n cscStation = cms.int32(1),\n cscRing = cms.int32(3),\n cscChamber = cms.int32(17)\n)\n\n\n# process.output = cms.OutputModule(\"PoolOutputModule\",\n # fileName = cms.untracked.string(\"output.root\"), #SingleMu_Run2012A_MuAlCalIsolatedMu-13Jul2012-v1_MEp_1_3_1.root\"),\n # outputCommands = cms.untracked.vstring( \"drop *\" ) \n\n # #SelectEvents = cms.untracked.PSet(SelectEvents = cms.vstring(\"Path\"))\n# )\n\n\nprocess.p = cms.Path(process.MuonTracksAnalyzer)\n\nprocess.TFileService = cms.Service(\"TFileService\",\n fileName = cms.string(\"histo.root\")\n )\n\n# process.EndPath = cms.EndPath(process.output)\n","sub_path":"analyzer/Analyzers/MuonTracksAnalyzer/hitanalyzersingle_cfg.py","file_name":"hitanalyzersingle_cfg.py","file_ext":"py","file_size_in_byte":2011,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"57859466","text":"import time\nimport paho.mqtt.client as paho\n\nbroker=\"bm.dvbrazil.com.br\"\n\ndef on_message(client, userdata, message):\n time.sleep(1)\n print(\"received message = \",str(message.payload.decode(\"utf-16le\")))\n\nclient= paho.Client(\"mqtt-test\")\n\nclient.on_message=on_message\n\nprint(\"connecting to broker \",broker)\nclient.connect(broker)#connect\nclient.loop_start() #start loop to process received messages\nprint(\"subscribing \")\nclient.subscribe(\"Master/7242/+/Message/#\")#subscribe\ntime.sleep(2)\nprint(\"publishing \")\nclient.publish(\"Master/7242/Outgoing/Message/724990/7240021\",\"Test\".encode(\"utf-16le\"))#publish\ntime.sleep(4)\nclient.disconnect() #disconnect\nclient.loop_stop() #stop loop","sub_path":"mqtt.py","file_name":"mqtt.py","file_ext":"py","file_size_in_byte":685,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"618464569","text":"from math import ceil, floor\n\nwith open('Q14.txt', 'r') as f:\n prompt = f.read()\n\nprompt = prompt.strip()\nprompt = prompt.split('\\n')\n\nrecs = {}\nfor k in prompt:\n rec = k.split('=>')\n rec[0] = rec[0].strip().split(',')\n rec[1] = rec[1].strip().split(' ')\n assert rec[1][1] not in recs\n recs[rec[1][1]] = {'num': int(rec[1][0]), 'ing': {}}\n for p in rec[0]:\n p = p.strip().split(' ')\n recs[rec[1][1]]['ing'][p[1]] = int(p[0])\n\n\ndef get_level(r, recs):\n if 'ORE' in recs[r]['ing']:\n return 1\n else:\n ret = 0\n for i in recs[r]['ing']:\n ret = max(ret, get_level(i, recs)) + 1\n return ret\n\n\nfor r in recs:\n recs[r]['lvl'] = get_level(r, recs)\n\n\ndef combine_ings(ingsa, ingsb):\n for k, v in ingsb.items():\n if k in ingsa:\n ingsa[k] += v\n else:\n ingsa[k] = v\n return ingsa\n\n\nMAX_LVL = recs['FUEL']['lvl']\nMAX_ORE = 1000000000000\n\nnum_fuel = floor(MAX_ORE/720484) #from part a\nlast = None\n\nwhile True:\n ings = recs['FUEL']['ing'].copy()\n\n for k in ings:\n ings[k] = ings[k] * ceil(num_fuel/recs['FUEL']['num'])\n for i in range(MAX_LVL, 0, -1):\n dec = {}\n rem = set()\n for k in ings:\n if recs[k]['lvl'] == i:\n brk = recs[k]['ing'].copy()\n for p in brk:\n brk[p] = brk[p] * ceil(ings[k]/recs[k]['num'])\n dec = combine_ings(dec, brk)\n rem.add(k)\n for k in rem:\n ings.pop(k)\n ings = combine_ings(ings, dec)\n if ings['ORE'] > MAX_ORE:\n num_fuel -= 1\n break\n else:\n num_fuel += 1\n\nprint(num_fuel)\n","sub_path":"2019/Day 14/Q14b.py","file_name":"Q14b.py","file_ext":"py","file_size_in_byte":1685,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"616438669","text":"'''\ncsfa\nCreator:\n\tAustin \"The Man\" Talbot\nCreation Date:\n\t1/1/19\nVersion history\n---------------\nVersion 1.1-1.7\n\tOnly God Himself knows\nVersion 1.8, 2/20/19:\n\tAdded hierarchical structure with CSFA_base. Also fixed typos and added\n\tflexible input shape for data\nVersion 1.9, 2/20/19:\n\tAdded multi-layer perceptron supervision\nVersion 1.10, 2/20/19:\n\tSplit off multi-layer perceptron, modified CSFA_base\nVersion 1.11, 3/4/19\n\tDocumentation as well as clearer options for encoder\nVersion 1.12, 3/8/19\n\tBugs fixed as well as issues with purely MLP\nVersion 1.13, 3/19/19\n\tModified the learning rate defaults to better values. Normally\n\tnot worthy of a new version but this one actually matters.\nVersion 1.14, 3/24/19\n\tNow we need distinct learning rates depending on global features. \n\tAlso added different iteration amouts for encoder vs global. \n\tFinally added saving abilities\nVersion 1.15 3/25/19\n\tAdded the weighting for the supervision as well as an ability to \n\tscale down the GPU usage.\nVersion 1.16 4/10/19\n\tAdded the proper batchnorm because Tensorflow is f***** stupid\nVersion 1.17 4/11/19\n\tAdded the generative weights \nVersion 1.18 4/24/19\n\tMade ability to supervise on group\nVersion 1.19 5/10/19\n\tAdded another stupid thing to batchnorm because Tensorflow is f******\n\tstupid. Also made a UKUnorm method\nObjects\n-------\nCSFA_base\n\tThe base encoded CSFA model. This has basic operations such as get \n\tparameters, set parameters, create tensors to compute log-likelihood\n\tand optimizer definitions. Used for inheritance so all the boilerplate\n\tcode goes in this object.\nCSFA_encoded_dense\n\tBasic CSFA model with the scores replaced by an encoder. No supervision\n\tincluded. Various options for type of activation function number\n\tof iterations to optimize learning rate etc. Literally most of the \n\tcode to implement this put in CSFA_base.\ndCSFA_encoded_dense\n\tInital supervised CSFA model where the first n factors are blessed to\n\tbe predictive. Cross-entropy loss with a reverse annealed supervision\n\tweight. Can choose start strength increment and max sup strength. The\n\tloss is (1-alpha)NLL + alpha*pred to avoid instability.\ndCSFA_L1_encoded_dense\n\tRather than choosing a number of factors to bless we instead make all\n\tfactors predictive with a strong L1 penalty on the supervision \n\tcoefficients to ensure sparsity.\ndCSFA_L1adaptive_encoded_dense\n\tForces sparsity and adapts regularization\ndCSFA_L1adaptive_encoded_dense_double\n\tAllows for supervision on group as well\n'''\n\nimport numpy as np\nimport numpy.random as rand\nfrom numpy.random import normal,multinomial,binomial\nfrom numpy.random import gamma as gg\nimport tensorflow as tf\nimport pickle as pp\nfrom base import SpectralGaussian,Kernels,MatComplex,Mats\nfrom utils import shape,fully_connected_layer,variables_from_scope,toeplitz\nfrom lmc import LMC_DFT,LMC\nimport os,sys,time\nfrom datetime import datetime as dt\n\nconfig = tf.ConfigProto()\nconfig.gpu_options.allow_growth = True\nconfig.log_device_placement = True\n\nversion = '1.19'\n\nclass CSFA_base(object):\n\tr'''Defines basic methods for encoded CSFA models\n\t\n\tParameters\n\t----------\n\tL : int\n\t\tNumber of components\n\n\treg : float, default=.01\n\t\tRegularization for both scores and global parameters\n\n\teta : float, default=5.0\n\t\tFixed noise stength\n\n\tQ : int,default=3\n\t\tNumber of spectral gaussians\n\n\tnIter : int,default=1000\n\t\tNumber of training iterations\n\n\tlr_encoder : float,default=1e-4\n\t\tThe learning rate for our neural network A(X)\n\n\tlr_features : float,default=1e-2\n\t\tLearning rate for the means and coregionalization matrices\n\n\tencoderIter : int,default=25 \n\t\tNumber of optimization steps for the encoder \n\n\tfeatureIter : int,default=2\n\t\tNumber of optimization steps for the features\n\n\tR : int, default=2\n\t\tRank of coregionalization matrix\n\n\tname: str, default=\"Default\"\n\t\tName of the object. Will be used later when I add saving \n\t\tcapabilites\n\t\n\tdirName : str, default=\"./tmp\"\n\t\tName of the directory where the model should be saved to. Will be \n\t\tadded later\n\t\n\tdevice : int, default=0\n\t\tWhich GPU to place the jobs on\n\n\tpercGPU : float, default=.49\n\t\tThe percent of GPU memory that should be allocated for the object\n\n\ttrainingMethod : str, default=\"GradientDescent\"\n\t\tThe gradient descent method to use for optimization. Options are \n\t\t'GradientDescent', 'Momentum', or 'Adam'\n\n\tmonitorIter : int,default=100\n\t\tWhen I want to see how the parameters change over time this will\n\t\tbe how often to print them out\n\n\tmomentum : float, default=.9\n\t\tIf trainingMethod='Momentum' this is the momentum to use for the\n\t\toptimizer\n \n\tbeta1 : float, default=.9\n\t\tIf trainingMethod='Adam' this is one of the parameters to use \n\t\tfor the optimizer\n\n\tprintIter : int, default=1000\n\t\tHow often to print out the loss\n\n\tbatchSize : int, default=100\n\t\tFor stochastic training the number of data points to include,\n\t\tsampled without replacement\n\n\tk1 : int, default=256\n\t\tNumber of nodes in the first layer\n\n\tk2 : int, default=64\n\t\tNumber of nodes in the second layer\n\n\tnlayer : int, default=1\n\t\tOption for choosing the number of layers (1 or 2)\n\n\tactivationFunction: str, default='sigmoid'\n\t\tThe activation function to use, options are 'sigmoid' or 'relu'\n\t\n\tinit_style : str,default='Uniform'\n\t\tInitialization of the means for the spectral Gaussians \n\n\tunif_bounds : array(2,),default=(1,55)\n\t\tBounds on the means with uniform initialization\n\n\tbatch_size : int,default=100\n\t\tThe number of datapoints used for gradient descent\n\n\tbatch_s :\n\t\tNumber of frequencies to use for stochastic by frequency. Ignored\n\t\tfor now\n\n\tlearn_var : bool,default=False\n\t\tLearn the variances of the spectral gaussian\n\n\tvarLam : float,default=1.0\n\t\tPut a penatly on the side of the variances per David's suggestion\n\t\n\tpercGPU : floatin [0,1],default=0.5\n\t\tThe amount of the GPU to use, can run multiple jobs on single GPU\n\t\n\tAttributes\n\t----------\n\tcreationDate\n\t\tDate and time the object was created\n\n\tversion\n\t\tWhat version the model as being created as\n\n\tMethods\n\t-------\n\tdef getParams(self,session):\n\t\tGets the parameters and takes them out of the graph\n\t\n\tdef setParams(self,session,params):\n\t\tTakes a session and dictionary and sets the graph\n\t\n\tdef clip(self,session):\n\t\tClips the parameters to be within their bounds\n\n\tdef bNorm(self):\n\t\tReturns the normalization parameter on the coregionalization \n\t\tmatricies to ensure identifiability\n\t\n\tdef _batch(self,Nw):\n\t\tOur batching method for the data\n\n\tdef _definePlaceholdersEncoderLikelihood(self,s,Ns,Nc,Nw):\n\t\tDefines all the placeholders and operations to compute the \n\t\tnegative log-likelihood\n\n\tdef _defineOptimization(self):\n\t\tDefines the optimizer given specification\n\n\tdef _defineInitialization(self):\n\t\tJust defines the tensorflow init method\n\n\tdef _defineMonitor(self,monitor):\t\t\t\n\t\tCreates the dictionary needed to monitor parameter learning\n\n\tdef _updateMonitor(self,sess,myDict,count,monitor):\n\t\tUpdates the monitor variable with the current variable values\n\n\tdef _saveGraph(self,saver,session):\n\t\tSaves the graph with all the trained parameters\n\t\n\tdef transform(self,data_real):\n\t\tTransforms the data and returns a numpy array\n\t\n\tdef save_transform(self,data_real,fileName=None):\n\t\tTransforms the data and saves it to a file\n\n\tExamples\n\t--------\n\n\tReferences\n\t----------\n\tGallagher, Neil, et al. \"Cross-spectral factor analysis.\" \n\t\t\tAdvances in Neural Information Processing Systems. 2017.\n\t'''\n\tdef __init__(self,L,reg=.01,eta=5.0,Q=3,nIter=2000,lr_encoder=1e-4,\n\t\t\t\t\tlr_features=1e-2,encoderIter=25,featureIter=2,R=2,\n\t\t\t\t\tname='Default',dirName='./tmp',device=0,percGPU=0.49,\n\t\t\t\t\ttrainingMethod='GradientDescent',monitorIter=100,\n\t\t\t\t\tmomentum=.9,beta1=.9,printIter=1000,k1=256,k2=64,\n\t\t\t\t\tnlayer=1,activationFunction='sigmoid',\n\t\t\t\t\tinit_style='Uniform',unif_bounds=(1,55),\n\t\t\t\t\tbatch_size=100,learnVar=False,varLam=1.0):\n\t\tself.L = int(L) \n\t\tself.Q = int(Q) \n\t\tself.eta = float(eta) \n\t\tself.nIter = int(nIter) \n\t\tself.lr_encoder = float(lr_encoder) \n\t\tself.lr_features = float(lr_features) \n\t\tself.encoderIter = int(encoderIter)\n\t\tself.featureIter = int(featureIter)\n\t\tself.R = int(R)\n\t\tself.reg = float(reg)\n\t\t\n\t\tself.name = str(name) \n\t\tself.dirName = str(dirName) \n\t\tself.device = int(device) \n\t\tself.version = version \n\t\t\n\t\tself.creationDate = dt.now() \n\t\tself.trainingMethod = str(trainingMethod) \n\t\tself.monitorIter = int(monitorIter) \n\t\tself.momentum = float(momentum) \n\t\tself.beta1 = float(beta1) \n\t\tself.printIter = int(printIter) \n\n\t\tself.k1 = int(k1) \n\t\tself.k2 = int(k2) \n\t\tself.batch_size = int(batch_size) \n\n\t\tself.gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=float(percGPU))\n\n\t\tself.learnVar = learnVar \n\t\tself.varLam = float(varLam) \n\n\t\tself.init_style = init_style\n\t\tself.unif_bounds = unif_bounds\n\t\tself.activationFunction = str(activationFunction)\n\t\tself.nlayer = int(nlayer)\n\n\tdef getParams(self,session):\n\t\t'''get the model parameters out of graph\n\t\tParameters\n\t\t----------\n\t\tsession: tf.Session\n\t\t\tsession containing all the variables of the model\n\t\tReturns \n\t\t-------\n\t\tparams: dict\n\t\t\tcontains the global parameters as 'LMC'\n\t\t'''\n\t\tparams = {}\n\t\tparams['LMC'] = [self.LMCkernels[l].getParams(session) for l in range(self.L)]\n\t\treturn params\n\t\n\tdef setParams(self,session,params):\n\t\t'''\n\t\tParameters\n\t\t----------\n\t\tsession: tf.Session\n\t\t\tsession containing all the variables of the model\n\t\tparams: dict\n\t\t\tDictionary containing the parameters from getParams\n\t\tReturns\n\t\t-------\n\t\tNone\n\t\t'''\n\t\tif 'LMC' in params:\n\t\t\tfor l in range(self.L):\n\t\t\t\tself.LMCkernels[l].setParams(session,params['LMC'][l])\n\t\tif 'encoder' in params:\n\t\t\tpass\n\t\n\tdef clip(self,session):\n\t\t\"\"\" Forces the parameters to be within preset bounds\n\t\tParameters\n\t\t----------\n\t\tsession : tf.Session\n\t\t\tSession that we are currently running tensorflow in \n\t\tReturns\n\t\t-------\n\t\tNone\n\t\t\"\"\"\n\t\tfor l in range(self.L):\n\t\t\tself.LMCkernels[l].clip(session)\n\n\tdef bNorm(self):\n\t\t'''\n\t\tParameters\n\t\t----------\n\t\tNone\n\t\tReturns\n\t\t-------\n\t\tBdiagstack : tf.tensor,like=(self.L,self.Q,self.C)\n\t\t\tGets the diagonals of the coregionalization matrices which we\n\t\t\tuse for identifiability due to regularization\n\t\t'''\n\t\tfor l in range(self.L):\n\t\t\tBdiag = [self.LMCkernels[l].getBdiag() for l in range(self.L)]\n\t\tBdiagStack = tf.stack(Bdiag) # L x Q x C\n\t\treturn BdiagStack\n\t\n\tdef _batch(self,Nw):\n\t\t'''This ramdomly selects data indices. Potentially will be modified\n\t\t\tto cycle through all the data\n\t\tParameters\n\t\t----------\n\t\tNw : int\n\t\t\tNumber of windows\n\t\tReturns\n\t\t-------\n\t\tidx : bool-like (Nw,)\n\t\t\tSelects batch_size windows used for gradient descent\n\t\t'''\n\t\tbatch = rand.choice(Nw,size=self.batch_size,replace=False)\n\t\tidx = np.zeros(Nw)\n\t\tidx[batch] = 1\n\t\treturn (idx==1)\n\n\tdef _definePlaceholdersEncoderLikelihood(self,s,Ns,Nc,Nw,Np):\n\t\t'''\n\t\tParameters\n\t\t----------\n\t\ts : tensor-like (Ns)\n\t\t\tThe frequencies we wish to evaluate the likelihood not used yet\n\t\tNs : int\n\t\t\tNumber of frequencies\n\t\tNc : int\n\t\t\tNumber of channels\n\t\tNw : int\n\t\t\tNumber of windows\n\t\tReturns\n\t\t-------\n\t\tNone:\n\t\t\tCreates a bunch of object attributes related to computing NLL\n\t\t'''\n\t\t########\n\t\t# Data #\n\t\t########\n\n\t\t# Frequencies are a placeholder in case we decide to do stochastic \n\t\tself.s = tf.placeholder(tf.float32,shape=[Ns],name='s_')\n\t\t#The fourier transformed data\n\t\tself.y_fft = tf.placeholder(tf.complex64,\n\t\t\t\t\tshape=[Ns,Nc,None],name='y_fft')\n\t\t#Data used for the encoder. Reals\n\t\tself.Y_flat = tf.placeholder(tf.float32,\n\t\t\t\t\t\t\t\tshape=[None,Np],name='Y_flat')\n\n\t\t###########\n\t\t# Kernels #\n\t\t###########\n\t\twith tf.variable_scope('global'):\n\t\t\tself.LMCkernels = [LMC(Nc,self.Q,self.R,learnVar=self.learnVar,\n\t\t\t\t\tinit_style=self.init_style,\n\t\t\t\t\tunif_bounds=self.unif_bounds) for i in range(self.L)]\n\n\t\t###########\n\t\t# Encoder #\n\t\t###########\n\t\twith tf.variable_scope('encoder'):\n\t\t\tself.A_enc = tf.Variable(1/self.L*rand.randn(Np,self.L).astype(np.float32),name='A_enc')\n\t\t\tself.B_enc = tf.Variable(1/self.L*rand.randn(self.L).astype(np.float32),name='B_enc')\n\t\t\tself.phi = tf.Variable(1/self.L*rand.randn(3,1).astype(np.float32),name='phi')\n\t\t\tself.bias = tf.Variable(0.1*rand.randn(1).astype(np.float32),\n\t\t\t\t\t\t\t\t\tname='bias')\n\t\t\tself.out_mul = tf.matmul(self.Y_flat,self.A_enc)\n\t\t\tself.out = tf.add(self.out_mul,self.B_enc,name='out')\n\t\t\tself.scores = tf.nn.softplus(self.out,name='scores')\n\t\t\tself.logits = tf.matmul(self.scores[:,:3],self.phi) + self.bias\n\n\t\t###########################\n\t\t# Evaluate log-likelihood #\n\t\t###########################\n\t\t#Combine the factor UKU matrices\n\t\tself.UKUL = [self.LMCkernels[l].UKU(self.s) for l in range(self.L)]\n\t\tself.UKUstore = tf.stack(self.UKUL)\n\t\tself.UKUstorep = tf.transpose(self.UKUstore,perm=[2,3,1,0])\n\n\t\t# Make scores proper dimension\n\t\tself.scores_c = tf.cast(tf.transpose(self.scores),\n\t\t\t\t\t\t\t\t\tdtype=tf.complex64)\n\t\tself.scores_c1 = tf.expand_dims(self.scores_c,axis=0)\n\t\tself.scores_c2 = tf.expand_dims(self.scores_c1,axis=0)\n\t\tself.scores_c3 = tf.expand_dims(self.scores_c2,axis=0)\n\t\tself.UKUe = tf.expand_dims(self.UKUstorep,axis=-1)\n\n\t\t#Multiply scores\n\t\tself.prod_uku = tf.multiply(self.scores_c3,self.UKUe)\n\t\tself.prod_ukuT = tf.transpose(self.prod_uku,perm=[4,3,2,0,1])\n\t\tself.UKUscores = tf.reduce_sum(self.prod_ukuT,axis=1)\n\n\t\t#Add in the noise \n\t\tself.noise = tf.cast(1/self.eta*tf.eye(self.C),tf.complex64)\n\t\tself.UKUnoise_half = tf.add(self.UKUscores,self.noise)\n\t\tself.UKUnoise = 2*self.UKUnoise_half\n\n\t\t#Transform Y into the proper shape\n\t\tself.Yp = tf.transpose(self.y_fft,perm=[2,0,1])\n\t\tself.Yp1 = tf.expand_dims(self.Yp,axis=-1)\n\t\tself.Yc = tf.squeeze(tf.conj(self.Yp))\n\n\t\t#Get the quadratic form\n\t\tself.SLV = tf.linalg.solve(self.UKUnoise,self.Yp1)\n\t\tself.SLVs = tf.squeeze(self.SLV)\n\t\tself.Quad = tf.multiply(self.SLVs,self.Yc)\n\t\tself.QL = tf.reduce_sum(self.Quad,axis=-1) #Nw x Ns\n\n\t\t#This is where we do the proper weighting \n\t\tself.llk = tf.reduce_mean(self.QL) \n\n\t\t#Get log determinant\n\t\tself.LD = tf.linalg.logdet(self.UKUnoise)\n\n\t\t#Normalization constant\n\t\tself.const = tf.cast(Nc*np.log(np.pi)*-1,tf.complex64)\n\n\t\t#Add together for final likelihood\n\t\tself.logDet = tf.cast(tf.reduce_mean(self.LD),tf.complex64)\n\t\tself.LogLikelihood = self.const - self.logDet - self.llk\n\t\tself.eval = tf.real(self.LogLikelihood)\n\n\t\t#################################\n\t\t# Final negative log-likelihood #\n\t\t#################################\n\t\tself.NLL = -1.0*self.eval\n\t\tself.logLikelihood = tf.identity(self.NLL,name='LL_')\n\n\t\t# Regularization of scores\n\t\tself.reg_scores = 0.01*tf.nn.l2_loss(self.scores)\n\n\t\t#Regularization of factors\n\t\tself.reg_features = tf.nn.l2_loss(tf.real(self.bNorm()))\n\n\t\t#################################################\n\t\t# Define the final loss with classification etc #\n\t\t# somewhere else #\n\t\t#################################################\n\n\t\t#####################\n\t\t# Stuff for UKUnorm #\n\t\t#####################\n\t\t#self.sf = tf.placeholder(tf.float32,shape=[None],name='sf_')\n\t\ts_fine = np.arange(1000)/1000*55\n\t\tself.sf = tf.constant(s_fine.astype(np.float32))\n\t\tself.UKUL2_norm = [self.LMCkernels[l].UKU(self.sf) for l in range(self.L)]\n\t\tself.UKUstore_norm = tf.stack(self.UKUL2_norm)\n\t\tself.UKUstorep_norm = tf.transpose(self.UKUstore_norm,perm=[2,3,1,0])\n\t\tself.UKUndiv = tf.reduce_sum(tf.abs(self.UKUstorep_norm),axis=3,keepdims=True)\n\t\tself.UKUnorm = tf.divide(self.UKUstorep_norm,tf.cast(self.UKUndiv,tf.complex64))\n\n\tdef _defineOptimization(self):\n\t\t'''Defines our optimization algorithm and sets it as a \n\t\tobject attribute.\n\t\tParameters\n\t\t----------\n\t\tNone\n\t\tReturns\n\t\t-------\n\t\tNone\n\t\t'''\n\t\tle = self.lr_encoder\n\t\tlf = self.lr_features\n\t\tif self.trainingMethod == \"GradientDescent\":\n\t\t\tself.optimstep_l = tf.train.GradientDescentOptimizer(learning_rate=lf).minimize(self.loss,var_list=variables_from_scope('global'))\n\t\t\tself.optimstep_e = tf.train.GradientDescentOptimizer(learning_rate=le).minimize(self.loss,var_list=variables_from_scope('encoder'))\n\t\telif self.trainingMethod == \"Momentum\":\n\t\t\tself.optimstep_l = tf.train.MomentumOptimizer(learning_rate=lf,\n\t\t\t\t\tmomentum=self.momentum).minimize(self.loss,\n\t\t\t\t\tvar_list=variables_from_scope('global'))\n\t\t\tself.optimstep_e = tf.train.MomentumOptimizer(learning_rate=le,\n\t\t\t\t\tmomentum=self.momentum).minimize(self.loss,\n\t\t\t\t\tvar_list=variables_from_scope('encoder'))\n\t\telif self.trainingMethod == \"Adam\":\n\t\t\tself.optimstep_l = tf.train.AdamOptimizer(learning_rate=lf,\n\t\t\t\t\tbeta1=self.beta1).minimize(self.loss,\n\t\t\t\t\tvar_list=variables_from_scope('global'))\n\t\t\tself.optimstep_e = tf.train.AdamOptimizer(learning_rate=le,\n\t\t\t\t\tbeta1=self.beta1).minimize(self.loss,\n\t\t\t\t\tvar_list=variables_from_scope('encoder'))\n\t\t\tself.optimstep_p = tf.train.AdamOptimizer(learning_rate=le,\n\t\t\t\t\tbeta1=self.beta1).minimize(self.loss_pred,\n\t\t\t\t\tvar_list=variables_from_scope('encoder'))\n\t\telse:\n\t\t\tprint('Unrecognized training method')\n\n\tdef _defineInitialization(self):\n\t\t''' Makes the initialization method and sets it as an attribute\n\t\tParameters\n\t\t----------\n\t\tNone\n\t\tReturns\n\t\t-------\n\t\tNone\n\t\t'''\n\t\tinit_global = tf.global_variables_initializer()\n\t\tinit_local = tf.local_variables_initializer()\n\t\tself.init = tf.group(init_global,init_local)\n\t\n\tdef _saveGraph(self,saver,session):\n\t\t'''This saves the tensorflow graph and values to self.dirName\n\t\tParamters\n\t\t---------\n\t\tsaver : tf.Saver\n\t\t\tShould be defined in the fit method\n\t\tsession : tf.Session\n\t\t\tThe session where the model was trained\n\t\tReturns\n\t\t-------\n\t\tNone\n\t\t'''\n\t\tpn = self.dirName + '/' + self.name + '.ckpt'\n\t\tsave_path = saver.save(session,pn)\n\t\tself.chkpt = save_path\n\t\tself.meta = self.dirName + '/' + self.name + '.ckpt.meta'\n\t\n\tdef transform(self,data_real):\n\t\tr'''This takes a pretrained model and computes the network \n\t\tstregnths on new data.\n\t\tParameters\n\t\t----------\n\t\tdata_real : np.array-like, (n_samples,n_features)\n\t\t\tThe features we use to estimate the scores\n\t\tReturns\n\t\t-------\n\t\tS : np.array-like, (n_samples,n_components)\n\t\t\tTransformed scores of the latent features\n\t\t'''\n\t\tdata_real = data_real.astype(np.float32)\n\t\tcheckpoint = tf.train.latest_checkpoint(self.dirName)\n\t\twith tf.Session(config=tf.ConfigProto(gpu_options=self.gpu_options)) as sess:\n\t\t\tnew_saver = tf.train.import_meta_graph(self.meta)\n\n\t\t\t#This restores the graphs along with all the parameter values\n\t\t\tgraph = tf.get_default_graph()\n\t\t\tnew_saver.restore(sess,checkpoint)\n\n\t\t\t#Select the two variables we care about\n\t\t\tY_flat = graph.get_tensor_by_name('Y_flat:0')\n\t\t\tscores = graph.get_tensor_by_name('encoder/scores:0')\n\n\t\t\tS = sess.run(scores,feed_dict={Y_flat:data_real})\n\n\t\treturn S\n\n\tdef transform_withLikelihood(self,s,data_real,data_complex):\n\t\tbs = self.batch_size\n\t\tdata_real = data_real.astype(np.float32)\n\n\t\tcheckpoint = tf.train.latest_checkpoint(self.dirName)\n\t\twith tf.Session(config=tf.ConfigProto(gpu_options=self.gpu_options)) as sess:\n\t\t\tnew_saver = tf.train.import_meta_graph(self.meta)\n\n\t\t\t#This restores the graphs along with all the parameter values\n\t\t\tgraph = tf.get_default_graph()\n\t\t\tnew_saver.restore(sess,checkpoint)\n\n\t\t\t#Select the two variables we care about\n\t\t\tY_flat_ = graph.get_tensor_by_name('Y_flat:0')\n\t\t\ts_ = graph.get_tensor_by_name('s_:0')\n\t\t\ty_fft_ = graph.get_tensor_by_name('y_fft:0')\n\t\t\tscores = graph.get_tensor_by_name('encoder/scores:0')\n\t\t\tlogLikelihood = graph.get_tensor_by_name('LL_:0')\n\n\t\t\tN = data_real.shape[0]\n\t\t\tS = np.zeros((N,self.L))\n\n\t\t\tnChunk = int(np.floor(N/self.batch_size))\n\t\t\tnEnd = N-bs*nChunk\n\t\t\tLLs = np.zeros(nChunk)\n\t\t\tfor i in range(nChunk):\n\t\t\t\tprint('<<<<<<<<<,',i,nChunk)\n\t\t\t\tdr_b = data_real[i*bs:(i+1)*bs]\n\t\t\t\tdc_b = data_complex[:,:,i*bs:(i+1)*bs]\n\t\t\t\tprint('Transform ',i)\n\t\t\t\t[S_b,LL_b] = sess.run([scores,logLikelihood],\n\t\t\t\tfeed_dict={Y_flat_:dr_b,s_:s,y_fft_:dc_b})\n\t\t\t\tS[i*bs:(i+1)*bs] = S_b\n\t\t\t\tLLs[i] = LL_b \n\n\t\t\t#Get the last ones at the end\n\t\t\tdr_b = data_real[-bs:]\n\t\t\tdc_b = data_complex[:,:,-bs:]\n\t\t\t[S_b,LL_b] = sess.run([scores,logLikelihood],\n\t\t\tfeed_dict={Y_flat_:dr_b,s_:s,\n\t\t\ty_fft_:dc_b})\n\n\t\t\tS[bs*nChunk:] = S_b[:nEnd]\n\n\t\t\tLL = (bs*np.sum(LLs)+bs*LL_b)/N\n\n\t\treturn S,LL \n\n\t\n\tdef getUKUnorm(self,sess):\n\t\tr'''Returns ukunorm\n\t\tParamters\n\t\t---------\n\t\tsess : tf.Session\n\t\t\tThe session where we train the model\n\t\tReturns\n\t\t-------\n\t\tmyUKUnorm : np.array-like (n_channels,n_channels,Ns,self.L)\n\t\t'''\n\t\tmyUKUstore_norm = sess.run(self.UKUstore_norm)\n\t\tmyUKUstorep_norm = sess.run(self.UKUstorep_norm)\n\t\tmyUKUndiv = sess.run(self.UKUndiv)\n\t\tmyUKUnorm = sess.run(self.UKUnorm)\n\t\tmyDict = {'myUKUstore_norm':myUKUstore_norm,\n\t\t\t\t\t'myUKUstorep_norm':myUKUstorep_norm,\n\t\t\t\t\t'myUKUndiv':myUKUndiv,\n\t\t\t\t\t'myUKUnorm':myUKUnorm}\n\t\treturn myDict\n\n\tdef transform_inSession(self,data_real,sess):\n\t\tr'''This transforms the data without having to load the graph\n\t\tParameters\n\t\t----------\n\t\tdata_real : np.array-like, (n_samples,n_features)\n\t\t\tThe features we use to estimate the scores\n\t\tsess : tf.Session \n\t\t\tThe session where we train the model\n\t\tReturns\n\t\t-------\n\t\tS : np.array-like, (n_samples,n_components)\n\t\t\tTransformed scores of the latent features\n\t\t'''\n\t\tS = sess.run(self.scores,feed_dict={self.Y_flat:data_real})\n\t\treturn S\n\t\n\tdef save_transform(self,data_real,fileName=None):\n\t\tr'''This transforms the data and saves as csv. This saves all \n\t\tnetworks.\n\t\tParameters \n\t\t----------\n\t\tdata_real : np.array-like, (n_samples,n_features)\n\t\t\tThe features we use to estimate the scores\n\t\tfileName : string,optional\n\t\t\tThe file we ant to save the model to.\n\t\tReturns\n\t\t-------\n\t\tNone\n\t\t'''\n\t\tS = self.transform(data_real)\n\t\tif fileName is None:\n\t\t\tfName = self.name + '_scores.csv'\n\t\telse:\n\t\t\tfName = fileName\n\n\t\tnp.savetxt(fName,S,fmt='%0.8f',delimiter=',')\n\n\tdef _defineMonitor(self,monitor):\t\t\t\n\t\t'''This defines our dictionary we use to monitor parameter learning\n\t\tParameters\n\t\t----------\n\t\tReturns\n\t\t-------\n\t\t'''\n\n\t\tlosses = np.zeros(self.nIter)\n\t\tnSave = int(np.ceil(self.nIter/self.monitorIter))\n\t\tmyDict = {'losses':losses}\n\t\tif monitor is None:\n\t\t\tpass\n\t\telse:\n\t\t\tif 'means' in monitor:\n\t\t\t\t#Save spectral gausssian means\n\t\t\t\tmyDict['means'] = np.zeros((nSave,self.L,self.Q))\n\t\t\tif 'vars' in monitor:\n\t\t\t\t#Save spectral gaussian variances\n\t\t\t\tmyDict['vars'] = np.zeros((nSave,self.L,self.Q))\n\t\t\tif 'logWeights' in monitor:\n\t\t\t\t#Coregionalization matrices\n\t\t\t\tmyDict['coregs']= np.zeros((nSave,self.L,self.Q,\n\t\t\t\t\t\t\t\t\t\t\t\tself.C,self.R))\n\t\t\tif 'shifts' in monitor:\n\t\t\t\t#Shift matrices\n\t\t\t\tmyDict['shifts'] = np.zeros((nSave,self.L,self.Q,\n\t\t\t\t\t\t\t\t\t\t\t\tself.C,self.R))\n\n\t\treturn myDict\n\n\tdef _updateMonitor(self,session,myDict,count,monitor):\n\t\t'''This automatically updates our tracking\n\t\tParameters\n\t\t----------\n\t\tReturns\n\t\t-------\n\t\t'''\n\t\t#Added the losses earlier\n\t\tparams = self.getParams(session)\n\t\tLMC = params['LMC']\n\t\tfor l in range(self.L):\n\t\t\tfor q in range(self.Q):\n\t\t\t\tkernel = LMC[l]['kernels'][q]\n\t\t\t\tcoreg = LMC[l]['coregs'][q]\n\t\t\t\tif 'means' in monitor:\n\t\t\t\t\t#Save spectral gausssian means\n\t\t\t\t\tmyDict['means'][count,l,q] = kernel['mu']\n\t\t\t\tif 'vars' in monitor:\n\t\t\t\t\t#Save spectral gaussian variances\n\t\t\t\t\tmyDict['vars'][count,l,q] = kernel['var']\n\t\t\t\tif 'logWeights' in monitor:\n\t\t\t\t\t#Coregionalization matrices\n\t\t\t\t\tmyDict['coregs'][count,l,q,:,:] = coreg['logWeights']\n\t\t\t\tif 'shifts' in monitor:\n\t\t\t\t\t#Shift matrices\n\t\t\t\t\tmyDict['shifts'][count,l,q,:,:] = coreg['shifts']\n\t\n\tdef evaluate(self,s,data_real,data_complex):\n\t\tbs = self.batch_size\n\t\tNtimes = int(np.floor(data_real.shape[0]/bs))\n\t\tNw = data_real.shape[0]\n\t\tremainder = Nw - Ntimes*bs\n\n\t\tN_tot = 0\n\n\t\tfor i in range(Ntimes):\n\t\t\taa2 = data_complex[:,:,bs*i:(i+1)*bs]\n\t\t\tbb = data_real[bs*i:bs*(i+1)]\n\t\t\tloglike = sess.run(self.NLL,feed_dict={self.s:s,\n\t\t\t\t\t\t\t\tself.y_fft:aa2,\n\t\t\t\t\t\t\t\tself.Y_flat:bb})\n\t\t\tN_tot += loglike\n\n\t\treturn N_tot/Ntimes\n\nclass dCSFA_L1adaptive_encoded_dense(CSFA_base):\n\tdef __init__(self,L,reg=.01,eta=5.0,Q=3,nIter=1000,lr_encoder=1e-4,\n\t\t\t\t\tlr_features=1e-2,encoderIter=25,featureIter=2,R=2,\n\t\t\t\t\tname='Default',dirName='./tmp',device=0,percGPU=0.49,\n\t\t\t\t\ttrainingMethod='GradientDescent',monitorIter=100,\n\t\t\t\t\tmomentum=.9,beta1=.9,printIter=1000,\n\t\t\t\t\tinit_style='Uniform',unif_bounds=(1,55),n_feat=3,\n\t\t\t\t\tnlayer=1,activationFunction='sigmoid',phi_init=1.0,\n\t\t\t\t\tphi_increase=1.1,phi_decrease=.9,phi_monitor_iter=1,\n\t\t\t\t\tbatch_size=100,learnVar=False,varLam=1.0,\n\t\t\t\t\tmu_max=1.0,mu_start=.01,mu_increase=1.001):\n\t\tsuper(dCSFA_L1adaptive_encoded_dense,self).__init__(L,reg=reg,\n\t\t\t\teta=eta,Q=Q,nIter=nIter,R=R,name=name,dirName=dirName,\n\t\t\t\tdevice=device,percGPU=percGPU,\n\t\t\t\tlr_encoder=lr_encoder,lr_features=lr_features,\n\t\t\t\tencoderIter=encoderIter,featureIter=featureIter,\n\t\t\t\ttrainingMethod=trainingMethod,monitorIter=monitorIter,\n\t\t\t\tmomentum=momentum,beta1=beta1,printIter=printIter,\n\t\t\t\tinit_style=init_style,unif_bounds=unif_bounds,\n\t\t\t\tbatch_size=batch_size,learnVar=learnVar,\n\t\t\t\tvarLam=varLam,nlayer=nlayer,\n\t\t\t\tactivationFunction=activationFunction)\n\n\t\tself.mu_max = float(mu_max)\n\t\tself.mu_start = float(mu_start)\n\t\tself.mu_increase = float(mu_increase)\n\n\t\tself.init_style = init_style\n\t\tself.unif_bounds = unif_bounds\n\t\t\n\t\tself.phi_init = float(phi_init)\n\t\tself.phi_increase = float(phi_increase)\n\t\tself.phi_decrease = float(phi_decrease)\n\t\tself.phi_monitor_iter = int(phi_monitor_iter)\n\t\tself.n_feat = int(n_feat)\n\t\n\tdef __repr__(self):\n\t\treturn 'dCSFA_L1_encoded_dense\\nL=%d\\nQ=%d\\eta=%0.3f\\nnIter=%d\\nLR=%0.8f\\nR=%d\\nname=%s\\ndirName=%s\\ndevice=%d\\nversion=%s\\ntrainingMethod=%s\\nk1=%d\\nk2=%d\\nbatch_size=%d\\nn_blessed=%d\\nmu=%0.3f'%(self.L,self.Q,self.eta,self.nIter,self.LR,self.R,self.name,self.dirName,self.device,self.version,self.trainingMethod,self.k1,self.k2,self.batch_size,self.n_blessed,self.mu)\n\n\tdef getParams(self,session):\n\t\tparams = {}\n\t\tparams['LMC'] = [self.LMCkernels[l].getParams(session) for l in range(self.L)]\n\t\tparams['phi'] = session.run(self.phi)\n\t\treturn params\n\t\n\tdef setParams(self,session,params):\n\t\tif 'LMC' in params:\n\t\t\tfor l in range(self.L):\n\t\t\t\tself.LMCkernels[l].setParams(session,params['LMC'][l])\n\t\tif 'encoder' in params:\n\t\t\tpass\n\t\t\t\n\t\n\tdef _initialize(self,s,Ns,Nc,Nw,Np):\n\t\t#Limit ourselves to a particular gpu\n\t\tos.environ[\"CUDA_DEVICE_ORDER\"] = \"PCI_BUS_ID\"\n\t\tdev = str(int(self.device))\n\t\tos.environ[\"CUDA_VISIBLE_DEVICES\"] = dev\n\n\t\t##########################\n\t\t##########################\n\t\t### ###\n\t\t### DEFINING THE GRAPH ###\n\t\t### ###\n\t\t##########################\n\t\t##########################\n\t\ttf.reset_default_graph()\n\n\t\t#########################\n\t\t# Placeholders and data #\n\t\t#########################\n\t\tself._definePlaceholdersEncoderLikelihood(s,Ns,Nc,Nw,Np)\n\t\tself.z_ = tf.placeholder(tf.float32,[self.batch_size])\n\t\tself.mu = tf.placeholder(tf.float32,[])\n\t\tself.alpha = self.mu/(1. + self.mu)\n\n\t\t#######################\n\t\t# Reconstruction Loss #\n\t\t#######################\n\t\tself.ce = tf.nn.sigmoid_cross_entropy_with_logits(labels=self.z_,\n\t\t\t\t\t\t\t\t\tlogits=tf.squeeze(self.logits))\n\t\tself.dloss = tf.reduce_sum(self.ce)\n\n\t\tself.loss_pred = self.dloss + 1/Np*tf.nn.l2_loss(self.A_enc)\n\n\t\t############\n\t\t# Sparsity #\n\t\t############\n\t\tself.phi_loss = tf.reduce_mean(tf.abs(self.phi))\n\n\t\t###########################\n\t\t# Final optimization loss #\n\t\t###########################\n\t\tself.loss = self.alpha*self.NLL + 2*(1-self.alpha)*self.dloss + 0.5*self.reg*self.reg_scores + 0.01*self.reg*self.reg_features + 1*self.phi_loss\n\n\t\t###############################################\n\t\t# Define the optimizer based on learning type #\n\t\t###############################################\n\t\tself._defineOptimization()\n\t\t\t\n\t\t########################\n\t\t# Initialize the graph #\n\t\t########################\n\t\tself._defineInitialization()\n\n\tdef fit(self,s,data_real,data_complex,labels,monitor=None):\n\t\tNs,Nc,Nw = data_complex.shape\n\t\tself.C = Nc\n\t\tC,Q,R = self.C,self.Q,self.R\n\t\tNp = data_real.shape[1]\n\n\t\t####################\n\t\t# Create the Graph #\n\t\t####################\n\t\tself._initialize(s,Ns,Nc,Nw,Np)\n\n\t\t############################################################\n\t\t# Here we create the variables for monitoring the training #\n\t\t############################################################\n\t\tnSave = int(np.ceil(self.nIter/self.monitorIter))\n\t\ttraining = self._defineMonitor(monitor)\n\t\ttraining['dlosses'] = np.zeros(self.nIter)\n\t\ttraining['NLL'] = np.zeros(self.nIter)\n\t\ttraining['pr'] = np.zeros(self.nIter)\n\n\t\tif monitor is None:\n\t\t\tpass\n\t\telse:\n\t\t\tif 'phis' in monitor:\n\t\t\t\tmyPhi = np.zeros((nSave,self.L))\n\n\t\tcount = 0\n\t\tstartTime = time.time()\n\t\tsess = tf.Session(config=tf.ConfigProto(gpu_options=self.gpu_options))\n\n\t\tmu = self.mu_start\n\n\t\tsaver = tf.train.Saver()\n\n\t\tif 1 == 1:\n\t\t\tsess.run(self.init)\n\n\t\t\tfor i in range(self.nIter):\n\t\t\t\tprint('>>',i)\n\t\t\t\tmu = np.minimum(mu*self.mu_increase,self.mu_max)\n\t\t\t\tidx = self._batch(Nw)\n\t\t\t\talpha = mu/(1. + mu)\n\n\t\t\t\tbb = data_real[idx]\n\t\t\t\taa2 = data_complex[:,:,idx]\n\t\t\t\tzz = labels[idx]\n\n\t\t\t\tfor j in range(self.featureIter):\n\t\t\t\t\ttry:\n\t\t\t\t\t\tsess.run(self.optimstep_l,feed_dict={self.s:s,\n\t\t\t\t\t\t\t\tself.mu:mu,\n\t\t\t\t\t\t\t\tself.y_fft:aa2,\n\t\t\t\t\t\t\t\tself.Y_flat:bb,\n\t\t\t\t\t\t\t\tself.z_:zz})\n\t\t\t\t\texcept:\n\t\t\t\t\t\tprint('1')\n\t\t\t\t\t\ttraining['scores'] = sess.run(self.scores,\n\t\t\t\t\t\t\t\t\tfeed_dict={self.s:s,self.y_fft:aa2,\n\t\t\t\t\t\t\t\t\tself.mu:mu,\n\t\t\t\t\t\t\t\t\tself.Y_flat:bb,\n\t\t\t\t\t\t\t\t\tself.z_:zz})\n\t\t\t\t\t\tself._updateMonitor(sess,training,count,monitor)\n\t\t\t\t\t\tparams = self.getParams(sess)\n\t\t\t\t\t\ttraining['idxs'] = idx\n\t\t\t\t\t\treturn params,training,sess\n\n\t\t\t\tfor iii in range(self.encoderIter):\n\t\t\t\t\ttry:\n\t\t\t\t\t\t_,ll,dl,loglike = sess.run([self.optimstep_e,\n\t\t\t\t\t\t\t\tself.loss,self.dloss,self.NLL],\n\t\t\t\t\t\t\t\tfeed_dict={self.s:s,\n\t\t\t\t\t\t\t\tself.mu:mu,\n\t\t\t\t\t\t\t\tself.y_fft:aa2,\n\t\t\t\t\t\t\t\tself.Y_flat:bb,\n\t\t\t\t\t\t\t\tself.z_:zz})\n\t\t\t\t\texcept:\n\t\t\t\t\t\tprint('2')\n\t\t\t\t\t\ttraining['scores'] = sess.run(self.scores,\n\t\t\t\t\t\t\t\t\t\tfeed_dict={self.s:s,self.y_fft:aa2,\n\t\t\t\t\t\t\t\t\t\tself.mu:mu,\n\t\t\t\t\t\t\t\t\t\tself.Y_flat:bb,\n\t\t\t\t\t\t\t\t\t\tself.z_:zz})\n\t\t\t\t\t\tself._updateMonitor(sess,training,count,monitor)\n\t\t\t\t\t\tparams = self.getParams(sess)\n\t\t\t\t\t\ttraining['idxs'] = idx\n\t\t\t\t\t\treturn params,training,sess\n\n\t\t\t\tfor jj in range(20):\n\t\t\t\t\t_ = sess.run(self.optimstep_p,\n\t\t\t\t\t\t\t\tfeed_dict={self.s:s,\n\t\t\t\t\t\t\t\tself.mu:mu,\n\t\t\t\t\t\t\t\tself.y_fft:aa2,\n\t\t\t\t\t\t\t\tself.Y_flat:bb,\n\t\t\t\t\t\t\t\tself.z_:zz})\n\n\t\t\t\t#########################################################\n\t\t\t\t# Here is where we monitor the variables we've selected #\n\t\t\t\t#########################################################\n\t\t\t\ttraining['dlosses'][i] = dl\n\t\t\t\ttraining['losses'][i] = ll\n\t\t\t\ttraining['NLL'][i] = loglike\n\t\t\t\t#if i%self.monitorIter==0:\n\t\t\t\t#\tself._updateMonitor(sess,training,count,monitor)\n\t\t\t\t#\tif 'phis' in monitor:\n\t\t\t\t#\t\tmyPhi[count,:] = np.squeeze(sess.run(self.phi))\n\t\t\t\t#\tcount = count + 1\n\n\t\t\t\t#This is our adaptive sparsity\n\n\t\t\t\t#This prints the training progress\n\t\t\t\tif i%self.printIter == 0:\n\t\t\t\t\tel = time.time() - startTime\n\t\t\t\t\tsl = self.reg*sess.run(self.reg_scores,\n\t\t\t\t\t\t\t\t\t\t\tfeed_dict={self.s:s,\n\t\t\t\t\t\t\t\t\t\t\tself.mu:mu,\n\t\t\t\t\t\t\t\t\t\t\tself.y_fft:aa2,\n\t\t\t\t\t\t\t\t\t\t\tself.Y_flat:bb,\n\t\t\t\t\t\t\t\t\t\t\tself.z_:zz})\n\t\t\t\t\tfl = self.reg*sess.run(self.reg_features,\n\t\t\t\t\t\t\t\t\t\t\tfeed_dict={self.s:s,\n\t\t\t\t\t\t\t\t\t\t\tself.mu:mu,\n\t\t\t\t\t\t\t\t\t\t\tself.y_fft:aa2,\n\t\t\t\t\t\t\t\t\t\t\tself.Y_flat:bb,\n\t\t\t\t\t\t\t\t\t\t\tself.z_:zz})\n\t\t\t\t\tnl = self.reg*sess.run(self.NLL,\n\t\t\t\t\t\t\t\t\t\t\tfeed_dict={self.s:s,\n\t\t\t\t\t\t\t\t\t\t\tself.mu:mu,\n\t\t\t\t\t\t\t\t\t\t\tself.y_fft:aa2,\n\t\t\t\t\t\t\t\t\t\t\tself.Y_flat:bb,\n\t\t\t\t\t\t\t\t\t\t\tself.z_:zz})\n\t\t\t\t\tprint('Iteration %d,Time = %0.1f,Loss = %0.3f,dLoss=%0.3f,scores=%0.3f,feature=%0.3f,NLL=%0.3f'%(int(i),el,ll,dl,sl,fl,nl))\n\t\t\t\t\tprint(sess.run(self.phi))\n\n\t\t\tself._saveGraph(saver,sess)\n\n\t\tif 'phis' in monitor:\n\t\t\ttraining['phis'] = myPhi\n\t\tparams = self.getParams(sess) \n\t\treturn params,training,sess\n\t\n\ndef main():\n\ttf.reset_default_graph()\n\t#Load data\n\t#data = some complex data\n\ts = np.arange(1,120)\n\tN,C,W = data.shape\n\n\tmodel = CSFA_gd(N,C,W)\n\tmodel.fit_transform(s,data)\n\nif __name__ == \"__main__\":\n\tmain()\n\n\n","sub_path":"Code/CSFA/csfa_encoded.py","file_name":"csfa_encoded.py","file_ext":"py","file_size_in_byte":31739,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"235549554","text":"import importlib\nimport json\nimport pathlib\n\nimport httpx\nimport pytest\n\nfrom jobbergate_cli.exceptions import Abort\nfrom jobbergate_cli.schemas import ApplicationResponse, JobScriptResponse\nfrom jobbergate_cli.subapps.job_scripts.tools import (\n create_job_script,\n fetch_job_script_data,\n flatten_param_dict,\n remove_prefix_suffix,\n save_job_script_files,\n validate_parameter_file,\n)\n\n\ndef test_validate_parameter_file__success(tmp_path):\n parameter_path = tmp_path / \"dummy.json\"\n dummy_data = dict(\n foo=\"one\",\n bar=2,\n baz=False,\n )\n parameter_path.write_text(json.dumps(dummy_data))\n assert validate_parameter_file(parameter_path) == dummy_data\n\n\ndef test_validate_parameter_file__fails_if_file_does_not_exist():\n with pytest.raises(Abort, match=\"does not exist\"):\n validate_parameter_file(pathlib.Path(\"some/fake/path\"))\n\n\ndef test_validate_parameter_file__fails_if_file_is_not_valid_json(tmp_path):\n parameter_path = tmp_path / \"dummy.json\"\n parameter_path.write_text(\"clearly not json\")\n with pytest.raises(Abort, match=\"is not valid JSON\"):\n validate_parameter_file(parameter_path)\n\n\ndef test_fetch_job_script_data__success(\n respx_mock,\n dummy_context,\n dummy_job_script_data,\n dummy_domain,\n):\n respx_mock.get(f\"{dummy_domain}/jobbergate/job-scripts/1\").mock(\n return_value=httpx.Response(\n httpx.codes.OK,\n json=dummy_job_script_data[0],\n ),\n )\n job_script = fetch_job_script_data(dummy_context, 1)\n assert job_script == JobScriptResponse.parse_obj(dummy_job_script_data[0])\n\n\ndef test_create_job_script__providing_a_name(\n dummy_application_data,\n dummy_job_script_data,\n dummy_module_source,\n dummy_domain,\n dummy_context,\n dummy_render_class,\n attach_persona,\n respx_mock,\n mocker,\n):\n \"\"\"\n Test that we can create a job script with the desired name.\n \"\"\"\n attach_persona(\"dummy@dummy.com\")\n\n application_response = ApplicationResponse(**dummy_application_data[0])\n mocked_fetch_application_data = mocker.patch(\n \"jobbergate_cli.subapps.job_scripts.tools.fetch_application_data\",\n return_value=application_response,\n )\n assert application_response.workflow_files is not None\n get_workflow_route = respx_mock.get(f\"{dummy_domain}{application_response.workflow_files[0].path}\")\n get_workflow_route.mock(\n return_value=httpx.Response(\n httpx.codes.OK,\n content=dummy_module_source.encode(),\n ),\n )\n\n dummy_render_class.prepared_input = dict(\n foo=\"FOO\",\n bar=\"BAR\",\n baz=\"BAZ\",\n )\n\n desired_job_script_data = dummy_job_script_data[0]\n\n mocker.patch.object(\n importlib.import_module(\"inquirer.prompt\"),\n \"ConsoleRender\",\n new=dummy_render_class,\n )\n create_route = respx_mock.post(\n f\"{dummy_domain}/jobbergate/job-scripts/render-from-template/{application_response.id}\"\n )\n create_route.mock(\n return_value=httpx.Response(\n httpx.codes.CREATED,\n json=desired_job_script_data,\n ),\n )\n\n actual_job_script_data = create_job_script(\n dummy_context,\n name=desired_job_script_data[\"name\"],\n application_id=1,\n fast=True,\n )\n\n mocked_fetch_application_data.assert_called_once_with(\n dummy_context,\n id=1,\n identifier=None,\n )\n\n assert actual_job_script_data == JobScriptResponse.parse_obj(desired_job_script_data)\n\n\ndef test_create_job_script__without_a_name(\n dummy_application_data,\n dummy_job_script_data,\n dummy_module_source,\n dummy_domain,\n dummy_context,\n dummy_render_class,\n attach_persona,\n respx_mock,\n mocker,\n):\n \"\"\"\n Test that we can create a job script without providing a name.\n\n In this case, it should be created with the name of the base application.\n \"\"\"\n attach_persona(\"dummy@dummy.com\")\n\n application_response = ApplicationResponse(**dummy_application_data[0])\n mocked_fetch_application_data = mocker.patch(\n \"jobbergate_cli.subapps.job_scripts.tools.fetch_application_data\",\n return_value=application_response,\n )\n assert application_response.workflow_files is not None\n get_workflow_route = respx_mock.get(f\"{dummy_domain}{application_response.workflow_files[0].path}\")\n get_workflow_route.mock(\n return_value=httpx.Response(\n httpx.codes.OK,\n content=dummy_module_source.encode(),\n ),\n )\n\n dummy_render_class.prepared_input = dict(\n foo=\"FOO\",\n bar=\"BAR\",\n baz=\"BAZ\",\n )\n\n desired_job_script_data = dummy_job_script_data[0]\n desired_job_script_data[\"name\"] = application_response.name\n\n mocker.patch.object(\n importlib.import_module(\"inquirer.prompt\"),\n \"ConsoleRender\",\n new=dummy_render_class,\n )\n create_route = respx_mock.post(\n f\"{dummy_domain}/jobbergate/job-scripts/render-from-template/{application_response.id}\"\n )\n create_route.mock(\n return_value=httpx.Response(\n httpx.codes.CREATED,\n json=desired_job_script_data,\n ),\n )\n\n actual_job_script_data = create_job_script(\n dummy_context,\n name=None,\n application_id=application_response.id,\n fast=True,\n )\n\n mocked_fetch_application_data.assert_called_once_with(\n dummy_context,\n id=application_response.id,\n identifier=None,\n )\n\n assert actual_job_script_data == JobScriptResponse.parse_obj(desired_job_script_data)\n\n\nclass TestSaveJobScriptFiles:\n \"\"\"\n Test the save_job_script_files function.\n \"\"\"\n\n def test_save_job_scripts_files__all_files(\n self,\n tmp_path,\n respx_mock,\n dummy_context,\n dummy_domain,\n dummy_job_script_data,\n dummy_template_source,\n ):\n \"\"\"\n Test that we can download all the files from a job script.\n \"\"\"\n job_script_data = JobScriptResponse.parse_obj(dummy_job_script_data[0])\n\n get_file_routes = [respx_mock.get(f\"{dummy_domain}{f.path}\") for f in job_script_data.files]\n for route in get_file_routes:\n route.mock(\n return_value=httpx.Response(\n httpx.codes.OK,\n content=dummy_template_source.encode(),\n ),\n )\n desired_list_of_files = [tmp_path / f.filename for f in job_script_data.files]\n\n assert len(desired_list_of_files) >= 1\n\n actual_list_of_files = save_job_script_files(dummy_context, job_script_data, tmp_path)\n\n assert actual_list_of_files == desired_list_of_files\n assert set(tmp_path.rglob(\"*\")) == set(desired_list_of_files)\n assert all(r.called for r in get_file_routes)\n\n assert all(p.read_text() == dummy_template_source for p in actual_list_of_files)\n\n\ndef test_flatten_param_dict__success():\n param_dict = {\n \"application_config\": {\"job_name\": \"rats\", \"partitions\": [\"foo\", \"bar\"]},\n \"jobbergate_config\": {\n \"default_template\": \"test_job_script.sh.j2\",\n \"supporting_files\": [\"support-1.j2\", \"support-2.j2\"],\n \"supporting_files_output_name\": {\"support-1.j2\": \"support-10\", \"support-2.j2\": \"support-20\"},\n \"template_files\": [\"test_job_script.sh.j2\", \"support-1.j2\", \"support-2.j2\"],\n \"job_script_name\": None,\n \"output_directory\": \".\",\n \"partition\": \"debug\",\n \"job_name\": \"rats\",\n },\n }\n actual_result = flatten_param_dict(param_dict)\n expected_result = {\n \"default_template\": \"test_job_script.sh.j2\",\n \"job_name\": \"rats\",\n \"job_script_name\": None,\n \"output_directory\": \".\",\n \"partition\": \"debug\",\n \"partitions\": [\"foo\", \"bar\"],\n \"supporting_files\": [\"support-1.j2\", \"support-2.j2\"],\n \"supporting_files_output_name\": {\"support-1.j2\": \"support-10\", \"support-2.j2\": \"support-20\"},\n \"template_files\": [\"test_job_script.sh.j2\", \"support-1.j2\", \"support-2.j2\"],\n }\n\n assert actual_result == expected_result\n\n\n@pytest.mark.parametrize(\n \"input_string, expected_output\",\n [\n (\"templates/test.sh.j2\", \"test.sh\"),\n (\"path/to/file.py.j2\", \"path/to/file.py\"),\n (\"templates/file.py.jinja2\", \"file.py\"),\n (\"templates/test.sh.jinja2\", \"test.sh\"),\n (\"other/path/test.py\", \"other/path/test.py\"),\n ],\n)\ndef test_remove_prefix_suffix(input_string, expected_output):\n assert remove_prefix_suffix(input_string) == expected_output\n","sub_path":"jobbergate-cli/tests/subapps/job_scripts/test_tools.py","file_name":"test_tools.py","file_ext":"py","file_size_in_byte":8643,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"632010360","text":"# method_1\n# Definition for a binary tree node.\n# class TreeNode:\n# def __init__(self, x):\n# self.val = x\n# self.left = None\n# self.right = None\n\nclass Solution:\n def findMode(self, root: TreeNode) -> List[int]:\n if not root:return []\n def helper(node):\n if not node: return \n count[node.val] = count.setdefault(node.val,0) + 1\n helper(node.left)\n helper(node.right)\n \n count = dict()\n helper(root)\n count = sorted(count.items(),key=lambda x:x[1],reverse=True)\n res = []\n res.append(count[0][0])\n for i in range(1,len(count)):\n if count[i][1] == count[0][1]:\n res.append(count[i][0])\n return res\n \n\n# method_2\n# Definition for a binary tree node.\n# class TreeNode:\n# def __init__(self, x):\n# self.val = x\n# self.left = None\n# self.right = None\n\nclass Solution:\n def findMode(self, root: TreeNode) -> List[int]:\n \n def searchBST(cur):\n if not cur: return \n nonlocal pre,count,res,maxCount\n searchBST(cur.left)\n if pre == None:\n count += 1\n elif pre.val == cur.val:\n count += 1\n else:\n count = 1\n pre = cur\n if count == maxCount:\n res.append(cur.val)\n if count > maxCount:\n maxCount = count\n res = []\n res.append(cur.val)\n searchBST(cur.right)\n return\n \n count = 0\n maxCount = 0\n res = []\n pre = None\n searchBST(root)\n return res","sub_path":"Qustion Code/501. 二叉搜索树中的众数.py","file_name":"501. 二叉搜索树中的众数.py","file_ext":"py","file_size_in_byte":1725,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"590282635","text":"# -*- coding: utf-8 -*-\r\nimport logging\r\nimport pdb\r\nimport re\r\nimport json\r\n\r\nfrom spiders.common.constantFields import TYPE_URL, TYPE_ITEM, TYPE_REQUEST\r\nfrom spiders.spiders.base import BaseSpider\r\n\r\n\r\nclass Nsfocuspider(BaseSpider):\r\n name = 'nsfocus'\r\n allowed_domains = ['nsfocus.net']\r\n start_urls = ['http://www.nsfocus.net/index.php?act=sec_bug']\r\n parsePage = 'getList'\r\n maxPageCount = 5\r\n\r\n custom_settings = {\r\n 'CONCURRENT_REQUESTS': 2,\r\n 'DOWNLOAD_DELAY': 1,\r\n }\r\n\r\n def getList(self, response):\r\n logging.info('start getList')\r\n metaInfo = response.meta.get('metaInfo')\r\n itemInfoList = []\r\n self.pageCount += 1 # 统计页数\r\n\r\n cveItemInfoList = response.xpath('//div[@class=\"vulbar\"]/ul/li')\r\n for i, cveItemSel in enumerate(cveItemInfoList):\r\n detailUrl = response.urljoin(cveItemSel.xpath(\".//a/@href\").extract()[0].strip())\r\n pubTime = cveItemSel.xpath(\"./span/text()\").extract()[0].strip()\r\n if self.pageCount == 1 and i == 0: # 记录当天最新数据\r\n self.today_latest_item_data = {\r\n 'url': detailUrl,\r\n 'pubTime': pubTime\r\n }\r\n logging.info('lastest data is %s' % json.dumps(self.today_latest_item_data))\r\n\r\n if detailUrl == self.latestDataInfo.get('url') and pubTime == self.latestDataInfo.get(\r\n 'pubTime'): # 根据时间和url进行判断是否为新数据\r\n logging.info('find history data, stop spider')\r\n self.resInfo['endInfo'] = 'find history data, stop spider'\r\n break\r\n\r\n urlInfo = {\r\n 'itemType': TYPE_URL,\r\n 'parsePage': 'getCveItemInfo',\r\n 'metaInfo': metaInfo,\r\n 'item': detailUrl,\r\n }\r\n itemInfoList.append(urlInfo)\r\n else:\r\n # next page\r\n nextPageUrl = response.urljoin(response.xpath('//a[@title=\"Next\"]/@href').extract()[0])\r\n urlInfo = {\r\n 'itemType': TYPE_URL,\r\n 'parsePage': 'getList',\r\n 'metaInfo': metaInfo,\r\n 'item': nextPageUrl,\r\n }\r\n if self.pageCount < self.maxPageCount: # 防止出错停止不了\r\n itemInfoList.append(urlInfo)\r\n else:\r\n logging.info('stop spider mandatory, spider page count is %d' % self.maxPageCount)\r\n return itemInfoList\r\n\r\n def getCveItemInfo(self, response):\r\n logging.info('start getCveItemInfo')\r\n metaInfo = response.meta.get('metaInfo')\r\n itemInfoList = []\r\n # parse title\r\n title = response.xpath('//div[@align=\"center\"]/b/text()').extract()[0].strip()\r\n # parse detailUrl\r\n sourceBulletinUrl = response.url\r\n\r\n dataInfoStr = response.xpath('//div[@class=\"vulbar\"]').extract()[0]\r\n # parse release time\r\n releaseTime = re.findall('发布日期:(\\d+-\\d+-\\d+?)
', dataInfoStr, re.DOTALL)[0].strip()\r\n updateTime = re.findall('更新日期:(\\d+-\\d+-\\d+?)
', dataInfoStr, re.DOTALL)[0].strip()\r\n # data source\r\n dataSource = 'NSFOCUS'\r\n\r\n # 解析受影响产品\r\n affect_system_list = re.findall('受影响系统:
(.*?)
', dataInfoStr, re.DOTALL)[0].replace(\r\n '
', '').split('\\n')\r\n affect_system = ','.join(affect_system_list).replace('<', '<')\r\n # 描述\r\n desc = re.findall('描述:.*建议:', dataInfoStr, re.DOTALL)[0].strip()\r\n desc = self.delHtmlScript(desc) + '受影响系统如下:' + affect_system\r\n cveCode = re.findall('CVE\\(CAN\\) ID: (CVE-\\d+-\\d+)', dataInfoStr, re.DOTALL)[0].strip()\r\n\r\n item = {}\r\n item['cveDesc'] = desc\r\n # item['cveCode'] = title.split('(')[1].strip(')').strip()\r\n item['cveCode'] = cveCode\r\n item['cveItemTitle'] = title\r\n item['pubTime'] = releaseTime\r\n item['cveSource'] = dataSource\r\n item['cveItemUrl'] = sourceBulletinUrl\r\n item['cveUpdateTime'] = updateTime\r\n # item['affectedProduct'] = affect_system\r\n urlInfo = {\r\n 'itemType': TYPE_ITEM,\r\n 'item': item,\r\n }\r\n itemInfoList.append(urlInfo)\r\n return itemInfoList\r\n\r\n def delHtmlScript(self, orgStr):\r\n pattern = re.compile(r'<[^>]+>', re.S)\r\n result = pattern.sub('', orgStr)\r\n return result\r\n\r\n\r\n# 运行命令:scrapy crawl nsfocus -a taskType=spider -a taskId=1\r\n# 调试或者部分抓取:scrapy crawl nsfocus -a taskType=update -a taskId=1 -a spiderType=test -a sourceUrls=[\\\"http://www.nsfocus.net/vulndb/50158\\\"]\r\n\r\n","sub_path":"spiders/spiders/nsfocus.py","file_name":"nsfocus.py","file_ext":"py","file_size_in_byte":4817,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"295940266","text":"\"\"\"\r\nAuthor: Ho Trong Son\r\nDate: 05/09/2021\r\nProgram: Project_05_page203.py\r\nProblem:\r\n 5. A list is sorted in ascending order if it is empty or each item except the last one is less than or equal\r\n to its successor. Define a predicate isSorted that expects a list as an argument and returns True if the\r\n list is sorted, or returns False otherwise. (Hint: For a list of length 2 or greater, loop through the\r\n list and compare pairs of items, from left to right, and return False if the first item in a pair is greater.)\r\n\r\nSolution:\r\n Display result\r\n List is not sorted!\r\n\r\n\"\"\"\r\n#Code here\r\ndef isSorted(list):\r\n result = True\r\n for i in range(1, len(list)):\r\n if list[i] <= list[i - 1]:\r\n result = False\r\n return result\r\n\r\n\r\ndef main():\r\n list = [2, 4, 37, 12]\r\n if isSorted(list):\r\n print(\"List is sorted!\")\r\n else:\r\n print(\"List is not sorted!\")\r\n\r\n\r\nif __name__ == \"__main__\":\r\n main()\r\n\r\n","sub_path":"HoTrongSon_50150_chapter6/Chapter6/Project_page_203-204/Project_05_page_203.py","file_name":"Project_05_page_203.py","file_ext":"py","file_size_in_byte":984,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"559967582","text":"import random\nfrom typing import List\n\nimport numpy as np\n\nfrom game.lilys_garden_env import LilysGardenEnv\n\n\nclass MCTSNode:\n\n def __init__(self, actions: List[int], seed: int, env: LilysGardenEnv, parent: 'MCTSNode'):\n self.ressq = 0\n self.weight = None\n self.actions = actions\n self.seed = seed\n self.env = env\n self.visits = 0\n self.children: List[MCTSNode] = []\n self.sp_utc = 0.0\n self.trajectory = None\n self.parent = parent\n\n def get_state(self):\n self.restore()\n return self.trajectory[0]\n\n def restore(self):\n\n if self.actions == self.env.current_actions and self.trajectory is not None:\n return\n\n self.trajectory = self.env.restore_state(self.actions, self.seed)\n\n def get_action(self):\n return self.actions[-1]\n\n def is_terminal(self):\n if self.trajectory is None:\n self.restore()\n return self.is_terminal()\n return self.trajectory[2]\n\n def set_weight(self, weight):\n self.weight = weight\n\n def is_expanded(self):\n return len(self.children) > 0\n\n def add_child(self, child: 'MCTSNode'):\n self.children.append(child)\n child.parent = self\n\n def expand(self):\n obs = self.get_state()\n mask = self.env.create_action_mask(obs)\n\n if mask is not None:\n actions = np.where(mask == 1)[0]\n actions = actions.tolist()\n else:\n actions = [i for i in range(self.env.action_space.n+1)]\n\n for action in actions:\n all_actions = [] + self.actions\n all_actions.append(action)\n\n child = MCTSNode(all_actions, self.seed, self.env, parent=self)\n self.add_child(child)\n return self\n\n def _select_random_action(self, obs):\n mask = self.env.create_action_mask(obs)\n\n if mask is not None:\n actions = np.where(mask == 1)[0]\n actions = actions.tolist()\n else:\n actions = [i for i in range(self.env.action_space.n+1)]\n\n return random.choice(actions)\n\n def simulate(self):\n total_acc_reward = 0\n self.restore()\n observation = self.get_state()\n while True:\n if self.is_terminal():\n total_acc_reward += self.trajectory[1] # get reward\n return total_acc_reward\n\n observation, reward, done, info_dict = self.env.step(self._select_random_action(observation))\n\n total_acc_reward += reward\n\n if done:\n return total_acc_reward\n\n def __eq__(self, other):\n\n if isinstance(other, MCTSNode):\n return other.actions == self.actions\n else:\n return False\n","sub_path":"MCTS/MCTSNode.py","file_name":"MCTSNode.py","file_ext":"py","file_size_in_byte":2771,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"446641662","text":"import os\nos.environ['CUDA_VISIBLE_DEVICES'] = '1'\nimport pickle as pkl\nimport torch\nimport torch.nn as nn\nimport torch.optim as optim\nimport pandas as pd\nimport torch.nn.functional as F\nfrom torch.autograd import Variable\nfrom model import AttentionLSTMClassifier\nfrom torch.utils.data import Dataset, DataLoader\nfrom early_stop import EarlyStop\nfrom measurement import CalculateFM\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom sklearn.metrics import *\nimport itertools\n\nNUM_CLASS = 7\n\ndef isear_data():\n from py_isear.isear_loader import IsearLoader\n attributes = ['SIT']\n target = ['EMOT']\n loader = IsearLoader(attributes, target, True)\n data = loader.load_isear('data/isear.csv')\n txt = data.get_freetext_content() # returns attributes\n emo = data.get_target() # returns target\n return txt, emo\n\n\nclass DataSet(Dataset):\n def __init__(self, __X, __y, __pad_len, __word2id, __num_labels, max_size=None, use_unk=True):\n\n self.pad_len = __pad_len\n self.word2id = __word2id\n self.pad_int = __word2id['']\n if max_size is not None:\n self.source = self.source[:max_size]\n self.target = self.target[:max_size]\n self.tag = self.tag[:max_size]\n self.data = []\n self.label = []\n self.num_label = __num_labels\n self.seq_len = []\n self.only_single = True\n self.use_unk = use_unk\n\n self.read_data(__X, __y) # process data\n assert len(self.seq_len) == len(self.data) == len(self.label)\n\n def read_data(self, __X, __y):\n assert len(__X) == len(__y)\n num_empty_lines = 0\n for X, y in zip(__X, __y):\n tokens = X.split()\n if self.use_unk:\n tmp = [self.word2id[x] if x in self.word2id else self.word2id[''] for x in tokens]\n else:\n tmp = [self.word2id[x] for x in tokens if x in self.word2id]\n if len(tmp) == 0:\n num_empty_lines += 1\n continue\n self.seq_len.append(len(tmp) if len(tmp) < self.pad_len else self.pad_len)\n if len(tmp) > self.pad_len:\n tmp = tmp[: self.pad_len]\n self.data.append(tmp + [self.pad_int] * (self.pad_len - len(tmp)))\n\n a_label = [0] * self.num_label\n a_label[int(y)-1] = 1\n\n self.label.append(a_label)\n print(num_empty_lines, 'empty lines found')\n def __len__(self):\n return len(self.data)\n\n def __getitem__(self, idx):\n return torch.LongTensor(self.data[idx]), torch.LongTensor([self.seq_len[idx]]), torch.FloatTensor(self.label[idx])\n\n\ndef build_vocab(X_train, vocab_size, use_unk=True):\n word_count = {}\n word2id = {}\n id2word = {}\n for line in X_train:\n tokens = line.split()\n for word in tokens:\n if word in word_count:\n word_count[word] += 1\n else:\n word_count[word] = 1\n\n word_list = [x for x, _ in sorted(word_count.items(), key=lambda v: v[1], reverse=True)]\n if len(word_count) < vocab_size:\n raise Exception('Vocab less than requested!!!')\n\n # add first\n word2id[''] = 0\n id2word[0] = ''\n if use_unk:\n word2id[''] = 1\n id2word[1] = ''\n n = len(word2id)\n word_list = word_list[:vocab_size - n]\n\n for word in word_list:\n word2id[word] = n\n id2word[n] = word\n n += 1\n return word2id, id2word\n\n\n\ndef sort_batch(batch, ys, lengths):\n seq_lengths, perm_idx = lengths.sort(0, descending=True)\n seq_tensor = batch[perm_idx]\n targ_tensor = ys[perm_idx]\n return seq_tensor, targ_tensor, seq_lengths\n\n\ndef one_fold(X_train, y_train, X_test, y_test):\n num_labels = NUM_CLASS\n vocab_size = 10000\n pad_len = 30\n batch_size = 64\n hidden_dim = 200\n __use_unk = False\n\n es = EarlyStop(5)\n word2id, id2word = build_vocab(X_train, vocab_size, use_unk=__use_unk)\n embedding_dim = len(word2id)\n train_data = DataSet(X_train, y_train, pad_len, word2id, num_labels, use_unk=__use_unk)\n train_loader = DataLoader(train_data, batch_size=batch_size, shuffle=True)\n\n test_data = DataSet(X_test, y_test, pad_len, word2id, num_labels, use_unk=__use_unk)\n test_loader = DataLoader(test_data, batch_size=batch_size)\n model = AttentionLSTMClassifier(embedding_dim, hidden_dim, vocab_size, word2id,\n num_labels, batch_size, use_att=True)\n model.load_bog_embedding(word2id)\n model.cuda()\n\n optimizer = optim.Adam(filter(lambda p: p.requires_grad, model.parameters()))\n loss_criterion = nn.BCELoss()\n for epoch in range(4):\n print('Epoch:', epoch, '===================================')\n train_loss = 0\n for i, (data, seq_len, label) in enumerate(train_loader):\n data, label, seq_len = sort_batch(data, label, seq_len.view(-1))\n y_pred = model(Variable(data).cuda(), seq_len)\n optimizer.zero_grad()\n loss = loss_criterion(y_pred, Variable(label).cuda())\n loss.backward()\n optimizer.step()\n train_loss += loss.data[0]\n pred_list = []\n gold_list = []\n test_loss = 0\n for i, (data, seq_len, label) in enumerate(test_loader):\n data, label, seq_len = sort_batch(data, label, seq_len.view(-1))\n y_pred = model(Variable(data, volatile=True).cuda(), seq_len)\n loss = loss_criterion(y_pred, Variable(label, volatile=True).cuda())\n test_loss += loss.data[0]\n pred_list.append(y_pred.data.cpu().numpy())\n gold_list.append(label.numpy())\n\n print(\"Train Loss: \", train_loss, \" Evaluation: \", test_loss)\n es.new_loss(test_loss)\n if es.if_stop():\n print('Start over fitting')\n break\n return np.concatenate(pred_list, axis=0), np.concatenate(gold_list, axis=0)\n\n\ndef plot_confusion_matrix(cm, classes,\n normalize=False,\n title=None,\n cmap=plt.cm.Blues):\n \"\"\"\n This function prints and plots the confusion matrix.\n Normalization can be applied by setting `normalize=True`.\n \"\"\"\n if normalize:\n cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]\n print(\"Normalized confusion matrix\")\n else:\n cm = cm.astype('float')\n print('Confusion matrix, without normalization')\n\n print(cm)\n\n plt.imshow(cm, interpolation='nearest', cmap=cmap)\n # plt.title(title)\n plt.colorbar()\n tick_marks = np.arange(len(classes))\n plt.xticks(tick_marks, classes, rotation=45)\n plt.yticks(tick_marks, classes)\n\n fmt = '.2f' if normalize else '.1f'\n thresh = cm.max() / 2.\n for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):\n plt.text(j, i, format(cm[i, j], fmt),\n horizontalalignment=\"center\",\n color=\"white\" if cm[i, j] > thresh else \"black\")\n\n\n plt.tight_layout()\n plt.ylabel('True label')\n plt.xlabel('Predicted label')\n\n\ndef confusion_matrix(pred_list, gold_list):\n assert gold_list.shape == pred_list.shape\n # m, n = pred_list.shape\n\n m = len(pred_list)\n cm = np.zeros([len(emotions), len(emotions)])\n\n for i in range(m):\n j = gold_list[i]\n k = pred_list[i]\n cm[j][k] += 1\n return cm\n\n\ndef one_vs_all_measure(gold, pred):\n one_hot_gold = np.zeros([len(gold), NUM_CLASS])\n one_hot_pred = np.zeros([len(pred), NUM_CLASS])\n assert len(gold) == len(pred)\n for i in range(len(gold)):\n one_hot_gold[i, gold[i]] = 1\n one_hot_pred[i, pred[i]] = 1\n retval = np.zeros([NUM_CLASS, 3])\n for i in range(NUM_CLASS):\n per_gold = one_hot_gold[:, i]\n per_pred = one_hot_pred[:, i]\n p = precision_score(per_gold, per_pred, average='binary')\n r = recall_score(per_gold, per_pred, average='binary')\n f = f1_score(per_gold, per_pred, average='binary')\n retval[i, :] = np.asarray([p, r, f])\n return retval\n\n\nif __name__ == '__main__':\n p_avg = 0\n r_avg = 0\n f_avg = 0\n emotions = [\"joy\", \"fear\", \"anger\", \"sadness\", \"disgust\", \"shame\", \"guilt\"]\n from sklearn.model_selection import StratifiedKFold\n X, y = isear_data()\n y = np.asarray(y)\n cnf_matrix_list = []\n cm = np.zeros([len(emotions), len(emotions)])\n measure_9_emo = np.zeros([3])\n n_folds = 5\n one_vs_all = np.zeros([NUM_CLASS, 3])\n\n kf = StratifiedKFold(n_splits=n_folds)\n for train_index, test_index in kf.split(X, y):\n X_train = [X[tmp] for tmp in train_index]\n X_test = [X[tmp] for tmp in test_index]\n y_train, y_test = y[train_index], y[test_index]\n\n pred_list, gold_list = one_fold(X_train, y_train, X_test, y_test)\n\n pred_list = np.argmax(pred_list, axis=1)\n gold_list = np.argmax(gold_list, axis=1)\n one_vs_all += one_vs_all_measure(gold_list, pred_list)\n\n measure_9_emo[0] += precision_score(gold_list, pred_list, average='macro')\n measure_9_emo[1] += recall_score(gold_list, pred_list, average='macro')\n measure_9_emo[2] += f1_score(gold_list, pred_list, average='macro')\n\n cnf_matrix = confusion_matrix(pred_list, gold_list)\n cnf_matrix_list.append(cnf_matrix)\n\n for cnf_tmp in cnf_matrix_list:\n cm += cnf_tmp\n one_vs_all /= 5\n print(one_vs_all)\n measure_9_emo /= 5\n print(measure_9_emo)\n\n cm /= 5\n plt.figure()\n plot_confusion_matrix(cm, classes=emotions, normalize=False)\n plt.show()\n\n\n\n\n","sub_path":"trainer_IJCLA_ISEAR_BOW.py","file_name":"trainer_IJCLA_ISEAR_BOW.py","file_ext":"py","file_size_in_byte":9606,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"32815211","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\nimport configparser\nfrom hermes_python.hermes import Hermes\nfrom hermes_python.ontology import *\nimport io\nimport mysql.connector\n\nCONFIGURATION_ENCODING_FORMAT = \"utf-8\"\nCONFIG_INI = \"config.ini\"\n\nclass SnipsConfigParser(configparser.SafeConfigParser):\n def to_dict(self):\n return {section : {option_name : option for option_name, option in self.items(section)} for section in self.sections()}\n\n\ndef read_configuration_file(configuration_file):\n try:\n with io.open(configuration_file, encoding=CONFIGURATION_ENCODING_FORMAT) as f:\n conf_parser = SnipsConfigParser()\n conf_parser.readfp(f)\n return conf_parser.to_dict()\n except (IOError, configparser.Error) as e:\n return dict()\n\n\ndef subscribe_intent_callback(hermes, intentMessage):\n conf = read_configuration_file(CONFIG_INI)\n action_wrapper(hermes, intentMessage, conf)\n\ndef decryption(message):\n decrypted = \"\"\n if message.isdigit():\n decrypted = int(message) - 13\n else:\n for i in message:\n letter = ord(i)\n if(letter > 90 and letter < 97) or letter < 65 or letter > 122:\n decrypted += i\n else:\n if i.isupper():\n max = 91\n min = 65\n else:\n max = 123\n min = 97\n temp = (letter - 13) % max\n if temp < min:\n temp += min\n decrypted += chr(temp)\n return str(decrypted)\n\ndef action_wrapper(hermes, intentMessage, conf):\n\n mydb = mysql.connector.connect(\n\thost=\"localhost\",\n\tuser=\"pi\",\n\tpasswd=\"1234\",\n\tdatabase=\"home\")\n my_cursor = mydb.cursor()\n house_room = intentMessage.slots.room.first().value\n room = str(house_room)\n try:\n if room == \"outside\":\n my_cursor.execute(\"select temperature from outside where id = (select max(id) from outside)\")\n result = my_cursor.fetchone()\n decryptedMessage = decryption(str(result[0]))\n result_sentence = (\"There are \" + decryptedMessage + \" degrees outside\")\n elif room == \"living room\":\n my_cursor.execute(\"select temperature from livingroom where id = (select max(id) from livingroom)\")\n result = my_cursor.fetchone()\n decryptedMessage = decryption(str(result[0]))\n result_sentence = (\"There are \" + decryptedMessage + \" degrees in the living room\")\n else:\n my_cursor.execute(\"select temperature from \" + room + \" where id = (select max(id) from \" + room + \")\")\n result = my_cursor.fetchone()\n decryptedMessage = decryption(str(result[0]))\n result_sentence = (\"There are \" + decryptedMessage + \" degrees in the \" + room)\n except:\n result_sentence = \"Invalid place or room\"\n\n current_session_id = intentMessage.session_id\n hermes.publish_end_session(current_session_id, result_sentence)\n\n\n\nif __name__ == \"__main__\":\n with Hermes(\"localhost:1883\") as h:\n h.subscribe_intent(\"Faakka:askTemp\", subscribe_intent_callback) \\\n .start()\n","sub_path":"action-Faakka-askTemp.py","file_name":"action-Faakka-askTemp.py","file_ext":"py","file_size_in_byte":3185,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"590897737","text":"import sys\n\ndef fix(n, k):\n return (n % k + k) % k\n\nmemo = {}\ndef try_all(nums, i, k, total_mod_k = 0):\n if i == len(nums):\n if total_mod_k == 0:\n return True\n return False\n\n if i not in memo:\n memo[i] = try_all(nums, i + 1, k, fix(total_mod_k + nums[i]), k) or try_all(nums, i + 1, k, fix(total_mod_k - nums[i], k))\n\n return memo[i]\n\nif __name__ == \"__main__\":\n # try all possible sum of a sequence to get number n | n % k = 0\n n, k = map(int, sys.stdin.readline().rstrip().split())\n nums = list(map(int, sys.stdint.readline().rstrip().split()))\n\n result = try_all(nums, 0, k)\n\n print(result)\n","sub_path":"Mostafa Sa'ad/Dynamic Programming Problems/try_all.py","file_name":"try_all.py","file_ext":"py","file_size_in_byte":653,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"312125523","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('Meeting', '0010_meeting_type_of_meeting'),\n ]\n\n operations = [\n migrations.RemoveField(\n model_name='meeting',\n name='chairman',\n ),\n migrations.RemoveField(\n model_name='meeting',\n name='speecher',\n ),\n migrations.AlterField(\n model_name='meeting',\n name='rmm_of_meeting',\n field=models.CharField(default=b'', max_length=100, verbose_name=b'\\xe4\\xbc\\x9a\\xe8\\xae\\xae\\xe8\\xb4\\x9f\\xe8\\xb4\\xa3\\xe4\\xba\\xba\\xe5\\x9c\\xb0\\xe5\\x8c\\xba\\xe7\\xbb\\x8f\\xe7\\x90\\x86'),\n ),\n ]\n","sub_path":"MeetingInfoManage/Meeting/migrations/0011_auto_20161126_1434.py","file_name":"0011_auto_20161126_1434.py","file_ext":"py","file_size_in_byte":768,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"65432793","text":"import random\nimport subprocess\nimport sys\nimport time\n\n\n\nx = 0\n\n\n\nprint(\"Выберите:\")\nprint(\"1. Мультипоточная работа\")\nprint(\"2. Однопоточная работа\")\ni = 2\n\nif i == '1':\n print(\"MULTITHREAD ACTIVATED\")\n times = time.time()\n while True:\n x += 1\n process = subprocess.Popen([sys.executable, \"RunBots.py\"])\n process.wait()\n print(f\"Отдыхаем 60 секунд\")\n time.sleep(60)\n process = subprocess.Popen([sys.executable, \"TestMessagingWithBots.py\"])\n process.wait()\n if x % 5 == 0:\n print(f\"Отдыхаем 60 секунд перед заданием на вход в каналы\")\n time.sleep(60)\n process = subprocess.Popen([sys.executable, \"JoinChannelsTests.py\"])\n process.wait()\n\n\n print(f\"Успешно Круг:{x}\")\n print(f\"Время:{time.time() - times}\")\n if x % 10 == 0:\n free_time = random.randrange(300, 1200)\n print(f\"Успешно отдыхаем {600 + free_time}\")\n time.sleep(free_time + 600)\n\n if time.time() - times > 6000:\n print('Проверка баланса')\n process = subprocess.Popen([sys.executable, \"Checkbalance.py\"])\n process.wait()\n times = time.time()\nelse:\n times = time.time()\n print(\"SOLO THREAD ACTIVATED\")\n while True:\n x += 1\n process = subprocess.Popen([sys.executable, \"RunBotsSoloThread.py\"])\n process.wait()\n print(f\"Отдыхаем 15 секунд\")\n time.sleep(15)\n process = subprocess.Popen([sys.executable, \"MessagingWithBotsSoloThread.py\"])\n process.wait()\n if x % 5 == 0:\n print(f\"Отдыхаем 60 секунд перед заданием на вход в каналы\")\n time.sleep(120)\n process = subprocess.Popen([sys.executable, \"JoinChannelsSoloThread.py\"])\n process.wait()\n\n\n print(f\"Успешно Круг:{x}\")\n print(f\"Время:{time.time() - times}\")\n if x % 10 == 0:\n free_time = random.randrange(120, 300)\n print(f\"Успешно отдыхаем {200 + free_time}\")\n time.sleep(free_time + 200)\n\n if time.time() - times > 6000:\n print('Проверка баланса')\n process = subprocess.Popen([sys.executable, \"Checkbalance.py\"])\n process.wait()\n times = time.time()\n\n\n\n\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2537,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"433137708","text":"SUCCESS = 0\nERROR = -1\n\n\nclass ConfigXML:\n \"\"\"Contains variables for xml config files that we use.\n apps : apps.xml\n build : build.xml\n \"\"\"\n apps = None\n build = None\n\n\nclass BuildStates:\n BUILD_STATE_ERRORED = -1 # build errored, no command is allowed after this\n BUILD_STATE_QUEUED = 0 # in the web's queue\n BUILD_STATE_PENDING = 1 # this is the first build in the queue, we do some stuff before we kick it off\n BUILD_STATE_SVN_UPDATE = 2 # build is doing the SVN update\n BUILD_STATE_BUILDING = 3 # compiling\n BUILD_STATE_RESTARTING = 4 # servers restarting\n BUILD_STATE_SYNCING = 5 # syncing in progress\n BUILD_STATE_CANCELED = 6 # build was canceled\n BUILD_STATE_WAITING = 7 # id and appname submitted to the build server\n\n\nclass DeployController:\n # define here the name of the script\n _name = \"deploy2.py\"\n _ssh = \"ssh -t -t\"\n\n # definition of the calls we make\n FORCE_RESTART = \"%(ssh)s %%s %%s/%(name)s --restart %%s\" % {\n 'ssh': _ssh,\n 'name': _name\n }\n SYNC = \"%(ssh)s %%s %%s/%(name)s %%s %%s norestart\" % {\n 'ssh': _ssh,\n 'name': _name\n }\n RESTART = \"%(ssh)s %%s %%s/%(name)s %%s %%s\" % {'ssh': _ssh, 'name': _name}\n","sub_path":"web-contents/variables.py","file_name":"variables.py","file_ext":"py","file_size_in_byte":1255,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"462682952","text":"def rev_chain(s):\n a = ''\n s = ''.join(list(reversed(s)))\n D = {'A':'T','C':'G','G':'C','T':'A'}\n for i in s:\n a += D[i]\n return a\n\ndef t2s(t):\n a = list(map(str,t))\n return ' '.join(a)\n\nSeq = open(\"D:/Download/rosalind_revp.txt\",mode='r')\ns = ''\nfor line in Seq.readlines():\n line = line.strip('\\n')\n if '>' in line :\n continue\n else:\n s += line\nSeq.close()\nres = []\nout = open(\"D:/Download/output.txt\",mode='w')\nfor i in range(len(s)):\n for j in range(4,13):\n if i + j > len(s):\n break\n if s[i:i+j] == rev_chain(s[i:i+j]):\n res.append((i+1,j))\nfor t in res:\n out.write(t2s(t)+'\\n')\nout.close()\n","sub_path":"Basic/Locating Restriction Sites.py","file_name":"Locating Restriction Sites.py","file_ext":"py","file_size_in_byte":689,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"551582458","text":"import unittest\n\nfrom pynet import Encryptor, Node, PeerDefinition, NodeHooks\nfrom pynet.util import to_bytes, _run_node, send_data\n\n\naddr_1 = (\"localhost\", 54320)\n# addr_1 = \"/tmp/pynet_test_node.sock\"\naddr_2 = (\"localhost\", 54321)\nnode_pk = Encryptor.new_key()\nmaster_encryptor = Encryptor(Encryptor.new_key(), node_pk.publickey())\nmaster_def = PeerDefinition(\"master\", None, master_encryptor.private_key.publickey())\n\n\ndef handle_message(sender, body):\n return (True, body.upper(), None)\n\n\nclass HooksOverride(NodeHooks):\n def handle_raw_message(self, data):\n if data == b\"register\":\n self.node.write(\"Registered!\", encrypt=False)\n return (True, None)\n else:\n return super().handle_raw_message(data)\n\n\nclass TestNode(unittest.TestCase):\n @classmethod\n def setUpClass(cls):\n cls.addr = addr_1\n cls.node = Node(handle_message, cls.addr, \"slave-1\", node_pk, [master_def])\n cls.thread = _run_node(cls.node)\n\n @classmethod\n def tearDownClass(cls):\n cls.node.stop()\n cls.thread.join()\n\n def test_ping(self):\n response = send_data(self.addr, b\"ping\")\n self.assertEqual(response, b\"pong\")\n\n def test_identify(self):\n response = send_data(self.addr, b\"identify\")\n expected_pk = self.node.name.encode() + b\"\\n\" + node_pk.publickey().exportKey(\"PEM\")\n self.assertEqual(response, expected_pk)\n\n def log_bytes_stats(self, data):\n import logging\n print()\n logging.warning(\"Data length: {0}\".format(len(data)))\n logging.warning(\"First 10 bytes: {0}\".format(data[:10]))\n logging.warning(\"Last 10 bytes: {0}\".format(data[-10:]))\n\n byted = {}\n for i, b in enumerate(data):\n if b not in byted:\n byted[b] = dict(count=0, indices=[])\n byted[b][\"count\"] += 1\n byted[b][\"indices\"].append(i)\n\n # for byte, stats in sorted(byted.items()):\n # print(\"{0:4}: {1:3}; {2}\".format(byte, stats[\"count\"], \", \".join(str(i) for i in stats[\"indices\"])))\n\n def test_message(self):\n send_message = b\"hello there to you my good friends how is it that you are doing today I really would like to know oh yes I would\"\n signed_cipher_text = Node.construct_message(master_encryptor, master_def.name, send_message)\n # self.log_bytes_stats(signed_cipher_text)\n response = send_data(self.addr, signed_cipher_text)\n message, signature = master_encryptor.decrypt(response)\n sender, status, body = message.split(b\"\\n\", 2)\n self.assertEqual(sender, self.node.name.encode())\n self.assertEqual(status, b\"ok\")\n self.assertEqual(body, send_message.upper())\n self.assertTrue(master_encryptor.verify_message(message, signature))\n\n def test_get_config(self):\n cfg = self.node.get_config()\n self.assertEqual(set(cfg.keys()), {\"address\", \"name\", \"private_key\", \"known_peers\"})\n self.assertEqual(len(cfg[\"known_peers\"]), 1)\n kp1 = cfg[\"known_peers\"][0]\n self.assertEqual(set(kp1.keys()), {\"name\", \"address\", \"public_key\"})\n\n def test_from_config(self):\n cfg = self.node.get_config()\n cfg[\"address\"] = \":\".join(str(o) for o in addr_2)\n node = Node.from_config(handle_message, cfg)\n\n def test_special_handling(self):\n self.node.hooks = HooksOverride()\n response = send_data(self.addr, b\"register\")\n self.assertEqual(response, b\"Registered!\")\n","sub_path":"tests/test_node.py","file_name":"test_node.py","file_ext":"py","file_size_in_byte":3264,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"41133924","text":"import sys\nsys.stdin = open('sort.txt','r')\n\n# 카운팅소\n# 시간복잡도 O(N)\n# 속도는 빠르나 최대값을 구해 카운팅 리스트를 만들어야하므로 숫자가 매우크면 메모리면에서 비효율적이다.\n\nCounting=[0]*10001\nN=int(input())\nfor _ in range(N):\n Counting[int(input())]+=1\nfor i in range(10001):\n sys.stdout.write('%s\\n'%i*Counting[i])","sub_path":"algoritm/20하반기 코딩테스트/.Algorithm정리/.Sort/카운팅정렬.py","file_name":"카운팅정렬.py","file_ext":"py","file_size_in_byte":381,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"100927765","text":"from selenium import webdriver\nimport time\n\ntry:\n link = 'http://suninjuly.github.io/registration1.html'\n link ='http://suninjuly.github.io/registration2.html'\n browser = webdriver.Chrome()\n browser.get(link)\n\n #Fill required field\n first_name = browser.find_element_by_css_selector('input.form-control.first')\n first_name.send_keys('Ivan')\n time.sleep(1)\n last_name = browser.find_element_by_css_selector('input.form-control.second')\n last_name.send_keys('Last name')\n time.sleep(1)\n email = browser.find_element_by_css_selector('input.form-control.third')\n email.send_keys('Email')\n time.sleep(1)\n \n #Send filled field \n button = browser.find_element_by_css_selector('button.btn')\n button.click()\n\n #Check, we passed registration\n #Wait page loading\n time.sleep(1)\n\n #Find element, contains text\n welcome_text_elt = browser.find_element_by_tag_name('h1')\n # Write in variable welcome_text text from element welcome_text_elt\n welcome_text = welcome_text_elt.text\n\n #With help assert check, what we succes\n assert \"Congratulations! You have successfully registered!\" == welcome_text\n\nfinally:\n # Wait for visiable check succes of the script\n time.sleep(5)\n # Terminate browser\n browser.quit()\n\n","sub_path":"Scripts/selenium_course/python lesson6_step10.py","file_name":"python lesson6_step10.py","file_ext":"py","file_size_in_byte":1287,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"625686762","text":"import os\nfrom dataclasses import dataclass\nfrom typing import List, Optional\n\nfrom keras import Sequential\nfrom keras import optimizers\nfrom keras.callbacks import History, EarlyStopping, TensorBoard\nfrom keras.layers import LSTM, Dense, RepeatVector, TimeDistributed, np\nfrom keras.utils import plot_model\n\nfrom ikfs_anomaly_detector.core.custom_types import Signals\nfrom ikfs_anomaly_detector.core.utils import fill_zeros_with_previous, PROJECT_PATH\nfrom ikfs_anomaly_detector.intellectual.utils import z_normalization, squared_error, ewma\n\n\n@dataclass\nclass SignalsGroup:\n name: str\n signals: List[str]\n signals_data: Optional[Signals] = None\n\n\n@dataclass\nclass AutoencoderResult:\n signals: np.ndarray\n decoded_signals: np.ndarray\n mse: np.ndarray # Mean squared error\n ewma_mse: np.ndarray\n\n\nclass LSTMAutoencoder:\n EPOCHS = 100\n MIN_EPOCHS = 1\n BATCH_SIZE = 64\n VALIDATION_SPLIT = 0.05\n\n EWMA_WINDOW_SIZE = 120\n EWMA_ALPHA = 1 - np.exp(-np.log(2) / EWMA_WINDOW_SIZE)\n\n def __init__(self, signals_count: int, models_dir: str = '', tensorboard_dir: str = '') -> None:\n self._signals_count = signals_count\n self._models_dir = models_dir or os.path.join(PROJECT_PATH, 'models')\n self._tensorboard_dir = tensorboard_dir\n\n self._model = Sequential([\n LSTM(units=64, activation='relu', input_shape=(signals_count, 1)),\n RepeatVector(signals_count),\n LSTM(units=64, activation='relu', return_sequences=True),\n TimeDistributed(Dense(1)),\n ])\n\n def train(self, signals_group: SignalsGroup, clipnorm: float = 0.) -> History:\n if len(signals_group.signals_data) != self._signals_count:\n raise ValueError(f'Модель может обработать строго {self._signals_count} сигналов')\n\n print(f'Обучение LSTM-автокодировщика для группы сигналов '\n f'\"{signals_group.name}\" {signals_group.signals}...')\n\n x_train = np.column_stack([\n self._preprocess(signal)\n for signal in signals_group.signals_data.values()\n ])\n x_train = np.reshape(x_train, (x_train.shape[0], x_train.shape[1], 1)) # samples, sample_len, features\n\n optimizer = optimizers.Adam(clipnorm=clipnorm)\n self._model.compile(optimizer=optimizer, loss='mse')\n\n callbacks = [\n EarlyStopping('val_loss', patience=self.MIN_EPOCHS, min_delta=0.05),\n ]\n if self._tensorboard_dir:\n os.makedirs(os.path.dirname(self._tensorboard_dir), exist_ok=True)\n callbacks.append(\n TensorBoard(\n log_dir=self._get_tensorboard_logs_dir(signals_group.name),\n batch_size=self.BATCH_SIZE,\n histogram_freq=0,\n write_graph=True,\n write_grads=True,\n write_images=True,\n ))\n\n history = self._model.fit(\n x_train, x_train,\n batch_size=self.BATCH_SIZE,\n epochs=self.EPOCHS,\n validation_split=self.VALIDATION_SPLIT,\n shuffle=True,\n callbacks=callbacks,\n )\n\n models_path = self._get_model_path(signals_group.name)\n os.makedirs(os.path.dirname(models_path), exist_ok=True)\n self._model.save_weights(models_path)\n print(f'Модель сохранена в \"{models_path}\"')\n\n return history\n\n def analyze(self, signals_group: SignalsGroup) -> AutoencoderResult:\n if len(signals_group.signals) != self._signals_count:\n raise ValueError(f'Модель может обработать строго {self._signals_count} сигналов')\n\n model_path = self._get_model_path(signals_group.name)\n if not os.path.exists(model_path):\n raise FileNotFoundError(f'Модель для группы сигналов {signals_group.name} не найдена. Выполните обучение')\n self._model.load_weights(model_path)\n\n data = np.array([self._preprocess(signal) for signal in signals_group.signals_data.values()])\n data_stacked = np.column_stack(data)\n # samples, sample_len, features\n data_reshaped = data_stacked.reshape(data_stacked.shape[0], data_stacked.shape[1], 1)\n\n decoded_data_reshaped = self._model.predict(data_reshaped, batch_size=self.BATCH_SIZE)\n decoded_data = decoded_data_reshaped.reshape(data_stacked.shape)\n decoded_data = np.column_stack(decoded_data)\n\n mse = np.sum([\n squared_error(predictions, targets)\n for predictions, targets in zip(decoded_data, data)\n ], axis=0)\n ewma_mse = ewma(mse, window=self.EWMA_WINDOW_SIZE, alpha=self.EWMA_ALPHA)\n\n return AutoencoderResult(\n signals=data,\n decoded_signals=decoded_data,\n mse=mse,\n ewma_mse=ewma_mse,\n )\n\n def plot_model(self, img_path: str, show_shapes=True, show_layer_names=True) -> None:\n plot_model(\n self._model,\n to_file=img_path,\n show_shapes=show_shapes,\n show_layer_names=show_layer_names,\n )\n\n @staticmethod\n def _preprocess(data: np.ndarray) -> np.ndarray:\n return z_normalization(fill_zeros_with_previous(data))\n\n def _get_model_path(self, group_name: str) -> str:\n return os.path.join(self._models_dir, 'autoencoder', f'{group_name}.h5')\n\n def _get_tensorboard_logs_dir(self, group_name: str) -> str:\n return os.path.join(self._tensorboard_dir, 'autoencoder', group_name)\n","sub_path":"ikfs_anomaly_detector/intellectual/autoencoder.py","file_name":"autoencoder.py","file_ext":"py","file_size_in_byte":5675,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"634792876","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('entry', '0006_auto_20150522_1445'),\n ]\n\n operations = [\n migrations.AddField(\n model_name='category',\n name='content_template',\n field=models.CharField(default=b'module/default_category_list.html', help_text='\\u5c55\\u73b0\\u5217\\u8868\\u9875\\u7684\\u6a21\\u7248', max_length=250, verbose_name='\\u5217\\u8868\\u9875\\u6a21\\u7248', choices=[(b'module/default_category_list.html', '\\u9ed8\\u8ba4\\u6a21\\u7248')]),\n preserve_default=True,\n ),\n migrations.AddField(\n model_name='category',\n name='detail_template',\n field=models.CharField(default=b'module/default_category_detail.html', help_text='\\u5c55\\u73b0\\u5185\\u5bb9\\u9875\\u7684\\u6a21\\u7248', max_length=250, verbose_name='\\u5185\\u5bb9\\u9875\\u6a21\\u7248', choices=[(b'module/default_category_detail.html', '\\u9ed8\\u8ba4\\u6a21\\u7248')]),\n preserve_default=True,\n ),\n ]\n","sub_path":"entry/migrations/0007_auto_20150525_1502.py","file_name":"0007_auto_20150525_1502.py","file_ext":"py","file_size_in_byte":1114,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"233174874","text":"import os\nimport torch\nimport torch.nn as nn\nimport torchvision \nfrom torchvision.transforms import Compose\nimport numpy as np\nfrom PIL import Image\n\nclass SimpleCNN(nn.Module):\n def __init__(self, input_channels, output_channels):\n super(SimpleCNN, self).__init__()\n self.input_channels = input_channels\n self.output_channels = output_channels\n self.conv1 = nn.Conv2d(input_channels, 32, 1, 1)\n self.bn1 = nn.BatchNorm2d(32)\n self.conv2 = nn.Conv2d(32, 64, 1, 1)\n self.bn2 = nn.BatchNorm2d(64)\n self.relu = nn.ReLU()\n self.flat = nn.Flatten()\n self.fc = nn.Linear(65536, self.output_channels)\n\n def forward(self, x):\n y = self.conv1(x)\n y = self.bn1(y)\n y = self.relu(y)\n y = self.conv2(y)\n y = self.bn2(y)\n y = self.relu(y)\n y = self.flat(y)\n y = self.fc(y)\n # print(y.shape)\n return y\n","sub_path":"model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":935,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"91416175","text":"from .....cnab240.libs.paymentType import NonBarcodeTaxPayment, BarcodePayment\nfrom .occurrences import occurrences\n\n\nclass PaymentResponseStatus:\n\n success = \"success\"\n failed = \"failed\"\n scheduled = \"scheduled\"\n unknown = \"unknown\"\n\n\nclass PaymentType:\n\n transfer = \"transfer\"\n chargePayment = \"charge-payment\"\n nonBarcodeTaxPayment = \"tax-payment\"\n barcodePayment = \"barcode-payment\"\n\n\nclass PaymentResponse:\n\n def __init__(self, identifier=None, occurrences=None, content=None, authentication=None, amountInCents=None, paymentType=None, nonBarcodeTax=None):\n self.identifier = identifier\n self.occurrences = occurrences\n self.content = content or []\n self.authentication = authentication\n self.amountInCents = amountInCents\n self.type = paymentType\n self.nonBarcodeTax = nonBarcodeTax\n\n def occurrencesText(self):\n return [occurrences[occurrenceId] for occurrenceId in self.occurrences]\n\n def occurrencesTextAtIndex(self, index):\n occurrenceId = self.occurrences[index]\n return occurrences[occurrenceId]\n\n def status(self):\n if \"00\" in self.occurrences:\n return PaymentResponseStatus.success\n if \"BD\" in self.occurrences:\n return PaymentResponseStatus.scheduled\n if [code in self.occurrences for code in [\"RJ\", \"DV\", \"SS\"]].count(True) > 0:\n return PaymentResponseStatus.failed\n return PaymentResponseStatus.unknown\n\n def contentText(self, breakLine=\"\\n\"):\n return breakLine.join(self.content)\n\n\nclass PaymentParser:\n\n @classmethod\n def parseFile(cls, file):\n lines = file.readlines()\n return cls.parseLines(lines)\n\n @classmethod\n def parseText(cls, text):\n lines = text.splitlines()[:-1]\n return cls.parseLines(lines)\n\n @classmethod\n def parseLines(cls, lines):\n result = []\n currentResponse = None\n for line in lines:\n if line[7] in [\"0\", \"1\", \"9\"]:\n continue\n\n if line[7] == \"3\" and line[13] in [\"A\", \"J\", \"O\", \"N\"]:\n if currentResponse is not None:\n result.append(currentResponse)\n currentResponse = PaymentResponse()\n\n if line[7] == \"3\" and line[13] == \"A\":\n currentResponse.content.append(line)\n currentResponse.identifier = cls._getIdentifierSegmentA(line)\n currentResponse.occurrences = cls._getOccurrences(line)\n currentResponse.amountInCents = cls._getAmountSegmentA(line)\n currentResponse.type = PaymentType.transfer\n elif line[7] == \"3\" and line[13] == \"J\":\n currentResponse.content.append(line)\n currentResponse.identifier = cls._getIdentifierSegmentJ(line)\n currentResponse.occurrences = cls._getOccurrences(line)\n currentResponse.amountInCents = cls._getAmountSegmentJ(line)\n currentResponse.type = PaymentType.chargePayment\n elif line[7] == \"3\" and line[13] == \"O\":\n currentResponse.content.append(line)\n currentResponse.identifier = cls._getIdentifierSegmentO(line)\n currentResponse.occurrences = cls._getOccurrences(line)\n currentResponse.amountInCents = cls._getAmountSegmentO(line)\n currentResponse.type = PaymentType.barcodePayment\n elif line[7] == \"3\" and line[13] == \"N\":\n currentResponse.content.append(line)\n currentResponse.identifier = cls._getIdentifierSegmentN(line)\n currentResponse.occurrences = cls._getOccurrences(line)\n currentResponse.nonBarcodeTax = cls._getNonBarcodeTaxSegmentN(line)\n currentResponse.type = (\n PaymentType.barcodePayment\n if currentResponse.nonBarcodeTax == BarcodePayment.fgts\n else PaymentType.nonBarcodeTaxPayment\n )\n elif line[7] == \"3\" and line[13] == \"Z\":\n currentResponse.content.append(line)\n currentResponse.authentication = cls._getAuthentication(line)\n\n if line[7] == \"5\" and currentResponse is not None:\n result.append(currentResponse)\n currentResponse = None\n\n return result\n\n @classmethod\n def _getOccurrences(cls, line):\n occurrencesString = line[230:240].strip()\n return cls._splitString(occurrencesString)\n\n @classmethod\n def _splitString(cls, string):\n return [string[i:i+2] for i in range(0, len(string), 2)]\n\n @classmethod\n def _getAmountSegmentA(cls, line):\n return int(line[119:134].strip())\n\n @classmethod\n def _getAmountSegmentJ(cls, line):\n return int(line[152:167].strip())\n\n @classmethod\n def _getAmountSegmentO(cls, line):\n return int(line[144:159].strip())\n\n @classmethod\n def _getIdentifierSegmentA(self, line):\n return line[73:93].strip()\n\n @classmethod\n def _getIdentifierSegmentJ(self, line):\n return line[182:202].strip()\n\n @classmethod\n def _getIdentifierSegmentO(self, line):\n return line[174:194].strip()\n\n @classmethod\n def _getIdentifierSegmentN(self, line):\n return line[195:215].strip()\n\n @classmethod\n def _getAuthentication(cls, line):\n return line[14:78].strip()\n\n @classmethod\n def _getNonBarcodeTaxSegmentN(self, line):\n taxTypeId = line[17:19].strip()\n return {\n \"01\": NonBarcodeTaxPayment.gps,\n \"02\": NonBarcodeTaxPayment.darf,\n \"11\": BarcodePayment.fgts,\n }[taxTypeId]\n","sub_path":"febraban/cnab240/itau/sispag/result/parser.py","file_name":"parser.py","file_ext":"py","file_size_in_byte":5709,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"614088307","text":"first_case = int(input())\n\nfirst_set = set(map(int, input().split()))\n\nsecond_case = int(input())\n\nsecond_set = set(map(int, input().split()))\n\n# Both Solutions Work(Returns a set of uncommon elements which belong to either sets but nt both sets)\n\nsymetric_diff = sorted(list(first_set.symmetric_difference(second_set)))\n\nprint(*symetric_diff, sep=\"\\n\")\n\n","sub_path":"Problem Solving Track/Symetric Difference.py","file_name":"Symetric Difference.py","file_ext":"py","file_size_in_byte":355,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"586475170","text":"# Copyright 2020 The TensorFlow Authors\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"This module implements the Blinn-Phong specular reflectance.\n\nFor a derivation of the normalization factor ensuring energy conservation, we\nrefer the interested reader to:\nFabian Giesen.\n\"Derivation of Phong and Blinn-Phong BRDF normalization factors\". 2009\n\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport math\n\nimport tensorflow as tf\n\nfrom tensorflow_graphics.math import vector\nfrom tensorflow_graphics.util import asserts\nfrom tensorflow_graphics.util import export_api\nfrom tensorflow_graphics.util import safe_ops\nfrom tensorflow_graphics.util import shape\nfrom tensorflow_graphics.util import type_alias\n\n\ndef _brdf_normalization_factor(shininess: type_alias.TensorLike) -> tf.Tensor:\n \"\"\"Returns the normalization factor needed to ensure energy conservation.\"\"\"\n numerator = (shininess + 2.0) * (shininess + 4.0)\n denominator = 8.0 * math.pi * (\n tf.pow(tf.constant(2.0, dtype=shininess.dtype), -shininess / 2.0) +\n shininess)\n return safe_ops.safe_signed_div(numerator, denominator)\n\n\ndef brdf(direction_incoming_light: type_alias.TensorLike,\n direction_outgoing_light: type_alias.TensorLike,\n surface_normal: type_alias.TensorLike,\n shininess: type_alias.TensorLike,\n albedo: type_alias.TensorLike,\n brdf_normalization: bool = True,\n name: str = \"blinn_phong_brdf\") -> tf.Tensor:\n \"\"\"Evaluates the specular brdf of the Blinn-Phong model.\n\n Note:\n In the following, A1 to An are optional batch dimensions, which must be\n broadcast compatible.\n\n Note:\n The gradient of this function is not smooth when the dot product of the\n normal with any light is 0.0.\n\n Args:\n direction_incoming_light: A tensor of shape `[A1, ..., An, 3]`, where the\n last dimension represents a normalized incoming light vector.\n direction_outgoing_light: A tensor of shape `[A1, ..., An, 3]`, where the\n last dimension represents a normalized outgoing light vector.\n surface_normal: A tensor of shape `[A1, ..., An, 3]`, where the last\n dimension represents a normalized surface normal.\n shininess: A tensor of shape `[A1, ..., An, 1]`, where the last dimension\n represents a non-negative shininess coefficient.\n albedo: A tensor of shape `[A1, ..., An, 3]`, where the last dimension\n represents albedo with values in [0,1].\n brdf_normalization: A `bool` indicating whether normalization should be\n applied to enforce the energy conservation property of BRDFs. Note that\n `brdf_normalization` must be set to False in order to use the original\n Blinn-Phong specular model.\n name: A name for this op. Defaults to \"blinn_phong_brdf\".\n\n Returns:\n A tensor of shape `[A1, ..., An, 3]`, where the last dimension represents\n the amount of light reflected in the outgoing light direction.\n\n Raises:\n ValueError: if the shape of `direction_incoming_light`,\n `direction_outgoing_light`, `surface_normal`, `shininess` or `albedo` is not\n supported.\n InvalidArgumentError: if not all of shininess values are non-negative, or if\n at least one element of `albedo` is outside of [0,1].\n \"\"\"\n with tf.name_scope(name):\n direction_incoming_light = tf.convert_to_tensor(\n value=direction_incoming_light)\n direction_outgoing_light = tf.convert_to_tensor(\n value=direction_outgoing_light)\n surface_normal = tf.convert_to_tensor(value=surface_normal)\n shininess = tf.convert_to_tensor(value=shininess)\n albedo = tf.convert_to_tensor(value=albedo)\n\n shape.check_static(\n tensor=direction_incoming_light,\n tensor_name=\"direction_incoming_light\",\n has_dim_equals=(-1, 3))\n shape.check_static(\n tensor=direction_outgoing_light,\n tensor_name=\"direction_outgoing_light\",\n has_dim_equals=(-1, 3))\n shape.check_static(\n tensor=surface_normal,\n tensor_name=\"surface_normal\",\n has_dim_equals=(-1, 3))\n shape.check_static(\n tensor=shininess, tensor_name=\"shininess\", has_dim_equals=(-1, 1))\n shape.check_static(\n tensor=albedo, tensor_name=\"albedo\", has_dim_equals=(-1, 3))\n shape.compare_batch_dimensions(\n tensors=(direction_incoming_light, direction_outgoing_light,\n surface_normal, shininess, albedo),\n tensor_names=(\"direction_incoming_light\", \"direction_outgoing_light\",\n \"surface_normal\", \"shininess\", \"albedo\"),\n last_axes=-2,\n broadcast_compatible=True)\n direction_incoming_light = asserts.assert_normalized(\n direction_incoming_light)\n direction_outgoing_light = asserts.assert_normalized(\n direction_outgoing_light)\n surface_normal = asserts.assert_normalized(surface_normal)\n albedo = asserts.assert_all_in_range(albedo, 0.0, 1.0, open_bounds=False)\n shininess = asserts.assert_all_above(shininess, 0.0, open_bound=False)\n\n # Checks whether the incoming or outgoing light point behind the surface.\n dot_incoming_light_surface_normal = vector.dot(-direction_incoming_light,\n surface_normal)\n dot_outgoing_light_surface_normal = vector.dot(direction_outgoing_light,\n surface_normal)\n min_dot = tf.minimum(dot_incoming_light_surface_normal,\n dot_outgoing_light_surface_normal)\n difference_outgoing_incoming = (\n direction_outgoing_light - direction_incoming_light)\n difference_outgoing_incoming = tf.math.l2_normalize(\n difference_outgoing_incoming, axis=-1)\n cos_alpha = vector.dot(\n surface_normal, difference_outgoing_incoming, axis=-1)\n cos_alpha = tf.maximum(cos_alpha, tf.zeros_like(cos_alpha))\n blinn_phong_model = albedo * tf.pow(cos_alpha, shininess)\n if brdf_normalization:\n blinn_phong_model *= _brdf_normalization_factor(shininess)\n common_shape = shape.get_broadcasted_shape(min_dot.shape,\n blinn_phong_model.shape)\n d_val = lambda dim: 1 if dim is None else tf.compat.dimension_value(dim)\n common_shape = [d_val(dim) for dim in common_shape]\n condition = tf.broadcast_to(tf.greater_equal(min_dot, 0.0), common_shape)\n blinn_phong_model = tf.broadcast_to(blinn_phong_model, common_shape)\n return tf.where(condition, blinn_phong_model,\n tf.zeros_like(blinn_phong_model))\n\n\n# API contains all public functions and classes.\n__all__ = export_api.get_functions_and_classes()\n","sub_path":"tensorflow_graphics/rendering/reflectance/blinn_phong.py","file_name":"blinn_phong.py","file_ext":"py","file_size_in_byte":7156,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"510721035","text":"import pytest\nfrom spytest import st\nfrom spytest.dicts import SpyTestDict\nfrom apis.system.boot_up import sonic_installer_list\nfrom apis.system.basic import check_sonic_branding\n\nbrand_data = SpyTestDict()\n\n\n@pytest.fixture(scope=\"module\", autouse=True)\ndef branding_module_hooks(request):\n global vars\n vars = st.ensure_min_topology(\"D1\")\n initialize_variables()\n yield\n\n\n@pytest.fixture(scope=\"function\", autouse=True)\ndef branding_func_hooks(request):\n yield\n\n\ndef initialize_variables():\n brand_data.clear()\n brand_data.build_version_in_show_version = vars.version[vars.D1]\n brand_data.sonic_installer_output = sonic_installer_list(vars.D1)\n brand_data.build_version_in_sonic_installer = brand_data.sonic_installer_output['Current'][0]\n\n\n@pytest.mark.test_ft_version_branding\ndef test_ft_version_branding():\n \"\"\"\n This test function verifies the branding changes\n Author: Jagadish Chatrasi\n \"\"\"\n st.banner('Verify that \"show version\" command displaying version of the build in correct format or not')\n if not check_sonic_branding(brand_data.build_version_in_show_version):\n st.report_fail('invalid_build_version_format', 'show version')\n st.log('Successfully verified that \"show version\" command displaying version of the build in correct format')\n\n st.banner('Verifying that \"sonic_installer list\" command displaying version of the build in correct format or not')\n if not check_sonic_branding(brand_data.build_version_in_sonic_installer, cli_type='click'): #Passing the cli_type because the sonic_installer output is supported only in click\n st.report_fail('invalid_build_version_format', 'sonic installer list')\n st.log('Successfully verified \"sonic_installer list\" command displaying version of the build in correct format')\n\n st.banner('Verifying that version string does not contains the text \"dirty\"')\n if 'dirty' in brand_data.build_version_in_show_version.lower():\n st.report_fail('found_string_dirty', 'show version')\n if 'dirty' in brand_data.build_version_in_sonic_installer.lower():\n st.report_fail('found_string_dirty', 'sonic installer list')\n st.log('Successfully verified that version string does not contains the text \"dirty\"')\n\n st.report_pass('test_case_passed')\n","sub_path":"tests/system/test_branding.py","file_name":"test_branding.py","file_ext":"py","file_size_in_byte":2318,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"572701431","text":"\"\"\"\nApproach:\nstore all occurrences in a hashmap\ntake compliment at each index and check if it has occured already, if yes, we have found the pair. if not, keep looking\n\n\"\"\"\nclass Solution:\n def twoSum(self, nums: List[int], target: int) -> List[int]:\n if not nums:\n return nums\n hash_map = {}\n\n for i in range(len(nums)):\n compliment = target - nums[i]\n if compliment in hash_map:\n return ([i, hash_map[compliment]])\n else:\n hash_map[nums[i]] = i\n return -1\n\n\"\"\"\nTC: O(n)\nSC: O(n)\n\"\"\"\n","sub_path":"Problem-1.py","file_name":"Problem-1.py","file_ext":"py","file_size_in_byte":591,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"522377911","text":"# Copyright (c) Facebook, Inc. and its affiliates.\n# All rights reserved.\n# This source code is licensed under the BSD-style license found in the\n# LICENSE file in the root directory of this source tree.\n\nimport logging\nimport time\nfrom typing import Callable, Tuple\n\nimport click\nimport torch\nfrom torch import Tensor\n\nlogging.basicConfig(level=logging.DEBUG)\n\ntry:\n # pyre-ignore[21]\n from fbgemm_gpu import open_source # noqa: F401\nexcept Exception:\n torch.ops.load_library(\"//deeplearning/fbgemm/fbgemm_gpu:sparse_ops\")\n torch.ops.load_library(\"//deeplearning/fbgemm/fbgemm_gpu:sparse_ops_cpu\")\n\ndef benchmark_torch_function(\n func: Callable[[Tensor], Tensor],\n input: Tensor,\n flush_gpu_cache_size_mb: int,\n ) -> Tuple[float, Tensor]:\n if torch.cuda.is_available():\n torch.cuda.synchronize()\n start_event = torch.cuda.Event(enable_timing=True)\n end_event = torch.cuda.Event(enable_timing=True)\n # Flush the cache\n if flush_gpu_cache_size_mb:\n _ = torch.rand(\n flush_gpu_cache_size_mb * 1024 * 1024 // 4, dtype=torch.float\n )\n torch.cuda.synchronize()\n start_event.record()\n # Benchmark code\n output = func(input)\n # Accumulate the time for iters iteration\n end_event.record()\n torch.cuda.synchronize()\n elapsed_time = start_event.elapsed_time(end_event) * 1.0e-3\n else:\n start_time = time.time()\n output = func(input)\n elapsed_time = time.time() - start_time\n return float(elapsed_time), output\n\n@click.command()\n@click.option(\"--flush-gpu-cache-size-mb\", default=0)\n@click.option(\"--iters\", default=100)\n@click.option(\"--num-columns\", default=512)\n@click.option(\"--num-rows\", default=512)\n@click.option(\"--warmup-runs\", default=2)\ndef main(\n flush_gpu_cache_size_mb: int,\n iters: int,\n num_columns: int,\n num_rows: int,\n warmup_runs: int,\n) -> None:\n\n total_time = {\n \"8bit_quant\": 0.0,\n \"4bit_quant\": 0.0,\n \"2bit_quant\": 0.0,\n \"8bit_dequant\": 0.0,\n \"4bit_dequant\": 0.0,\n \"2bit_dequant\": 0.0,\n }\n\n input_data = torch.rand(num_rows, num_columns).float()\n if torch.cuda.is_available():\n input_data = input_data.cuda()\n for step in range(iters + warmup_runs):\n time, quant_data_8bit = benchmark_torch_function(\n torch.ops.fbgemm.FloatToFused8BitRowwiseQuantized,\n input_data,\n flush_gpu_cache_size_mb,\n )\n if step >= warmup_runs:\n total_time[\"8bit_quant\"] += time\n\n time, quant_data_4bit = benchmark_torch_function(\n lambda input : torch.ops.fbgemm.FloatToFusedNBitRowwiseQuantizedSBHalf(input, 4),\n input_data,\n flush_gpu_cache_size_mb,\n )\n if step >= warmup_runs:\n total_time[\"4bit_quant\"] += time\n\n time, quant_data_2bit = benchmark_torch_function(\n lambda input : torch.ops.fbgemm.FloatToFusedNBitRowwiseQuantizedSBHalf(input, 2),\n input_data,\n flush_gpu_cache_size_mb,\n )\n if step >= warmup_runs:\n total_time[\"2bit_quant\"] += time\n\n time, _ = benchmark_torch_function(\n torch.ops.fbgemm.Fused8BitRowwiseQuantizedToFloat,\n quant_data_8bit,\n flush_gpu_cache_size_mb,\n )\n if step >= warmup_runs:\n total_time[\"8bit_dequant\"] += time\n\n time, _ = benchmark_torch_function(\n lambda input : torch.ops.fbgemm.FusedNBitRowwiseQuantizedSBHalfToFloat(input, 4),\n quant_data_4bit,\n flush_gpu_cache_size_mb,\n )\n if step >= warmup_runs:\n total_time[\"4bit_dequant\"] += time\n\n time, _ = benchmark_torch_function(\n lambda input : torch.ops.fbgemm.FusedNBitRowwiseQuantizedSBHalfToFloat(input, 2),\n quant_data_2bit,\n flush_gpu_cache_size_mb,\n )\n if step >= warmup_runs:\n total_time[\"2bit_dequant\"] += time\n\n logging.info(\n f\"-------------- ncols={num_columns}, nrows={num_rows}-------------\"\n )\n for k, t_time in total_time.items():\n logging.info(\n f\"{k} time per iter: {t_time / iters * 1.0e6:.0f}us\"\n )\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"fbgemm_gpu/bench/quantize_ops_benchmark.py","file_name":"quantize_ops_benchmark.py","file_ext":"py","file_size_in_byte":4330,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"495679673","text":"#! /usr/bin/python3\n# -*- coding: UTF-8 -*-\nimport time\n\nclass myMessage():\n def __init__(self, From, To, Text):\n self.From = From\n self.To = To\n self.Text = Text\n self.time_send = time.time()","sub_path":"ppchatV2/chatroom.py","file_name":"chatroom.py","file_ext":"py","file_size_in_byte":223,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"443783575","text":"# -*- coding: utf-8 -*-\nfrom selenium import webdriver\nfrom bs4 import BeautifulSoup\nimport csv\n\ndriver = webdriver.PhantomJS(executable_path = '/home/guowei/bin/phantomjs/bin/phantomjs')\n\ndriver.get('http://item.m.jd.com/comments/1514794.html?sid=f78e65a1b56bb56e2b85135d980625c1')\n\npageSource = driver.page_source\nbsObj = BeautifulSoup(pageSource)\ncommentList = bsObj.findAll('span', {'class': 'name'})\n\ncsvFile = open('./name.csv', 'wt')\ntry:\n\twriter = csv.writer(csvFile)\n\twriter.writerow(('id', 'name'))\n\tnameId = 1\n\tfor cmt in commentList:\n\t\tcmtText = cmt.get_text().encode('utf-8')\n\t\twriter.writerow((nameId, cmtText))\n\t\tnameId += 1\nfinally:\n\tcsvFile.close()\n\ndriver.close()\n","sub_path":"old-python/mitchell/csvChinse.py","file_name":"csvChinse.py","file_ext":"py","file_size_in_byte":682,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"17196466","text":"import subprocess\nimport requests\n\n_mesh_api = 'http://0.0.0.0:5000/location'\n\nclass Locate:\n\tdef __init__(self, api=_mesh_api):\n\t\tself.api = api\n\n\t\t''' Open a pipe to `iwconfig` and read its output. '''\n\t\tiw = subprocess.Popen('iwconfig', stdout=subprocess.PIPE)\n\t\tiw_data = iw.communicate()[0] # .communicate() returns (stdout, stderr), we need stdout\n\n\t\t''' Parse `iwconfig` output to obtain ESSID of currently\n\t\t associated router. '''\n\t\tiw_data = iw_data.decode('utf-8') # Decode Python bytes into string\n\t\tiw_data = iw_data.split('\\n') # Split by newline character\n\n\t\t''' The ESSID needed is on the `wlan0` line of iwconfig's output,\n\t\t so find that line, split it by spaces, and get the ESSID out.\n\t\t Then expose it to the object by assigning it to a property\n\t\t of the current instance (self.ESSID). '''\n\t\tself.essid = None # Default value\n\t\tfor line in iw_data:\n\t\t\tif line.startswith('wlan0'):\n\t\t\t\tline = line.split('ESSID:')[1]\n\t\t\t\tline = line.split(' ')[0]\n\t\t\t\tself.Essid = line.replace('\"', '')\n\t\t\t\tbreak\n\n\t\t''' Ping mesh node database for physical location of the router. '''\n\t\tr = requests.get(self.api + '/' + self.essid)\n\t\tloc_data = r.json()\n\t\tself.spot = loc_data['spot']\n","sub_path":"python/poly/location/location.py","file_name":"location.py","file_ext":"py","file_size_in_byte":1206,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"329783347","text":"def f(x):\n\treturn x*((x**2 + 3*x - 3)/(x**2 + 1))\n\nx0 = float(input(\"a: \"))\nif abs(x0) > 10**4:\n\tprint(\"Intervalo invalido\")\n\tx0 = float(input(\"a: \"))\n\nx1 = float(input(\"b: \"))\nif abs(x1) > 10**4:\n\tx1 = float(input(\"b: \"))\n\tprint(\"Intervalo invalido\")\n\nxa, xb = x0, x1\nfa, fb = f(xa), f(xb)\n\nfor i in range (5):\n\tx_falsa = (fb * xa - fa * xb) / (fb - fa)\n\tf_falsa = f(x_falsa)\n\n\tif f_falsa < 0:\n\t\txa, fa = x_falsa, f_falsa\n\telse:\n\t\txb, fb = x_falsa, f_falsa\n\n\tprint(\"%.6f\" %(x_falsa))","sub_path":"functions_5.py","file_name":"functions_5.py","file_ext":"py","file_size_in_byte":484,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"53333103","text":"from django.core.management.base import BaseCommand, CommandError\nfrom utils.data_factory import DataFactory\n\nclass Command(BaseCommand):\n help = \"Read Data from Mockup csv file and Populate DB\"\n\n def add_arguments(self, parser):\n parser.add_argument(\"-f\", \"--file_path\", default='./data/product_mockup_data.csv', type=str, help=\"Path for user data csv file.\")\n \n def handle(self, *args, **options):\n try:\n file_path = options['file_path']\n\n self.stdout.write(self.style.NOTICE(\"Start Populating Product DB\"))\n\n factory = DataFactory()\n product_data = factory.load_from_csv(file_path)\n print(product_data[0])\n factory.populate_products(product_data)\n \n self.stdout.write(self.style.SUCCESS(\"Successfully Populated Product DB\"))\n \n except CommandError as e:\n print(e)\n","sub_path":"products/management/commands/populate_product_db.py","file_name":"populate_product_db.py","file_ext":"py","file_size_in_byte":915,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"163233639","text":"#!/usr/bin/env python3\n\nimport argparse\nimport csv\nimport json\nimport os\nimport shutil\n\nfrom termcolor import colored, cprint\nfrom colorama import init\n\nfrom mrdpf.core import run_parsers\nfrom mrdpf.io.general import get_supported_files\nfrom mrdpf.io.general import get_matching_glob\nfrom mrdpf.parsers import Parsers\nfrom mrdpf.helpers import DataclassJSONEncoder\n\nheader = r\" _____ _____ _____ __ \" + '\\n'\\\n r\" | __ \\| __ \\| __ \\ / _|\" + '\\n'\\\n r\" _ __ ___ | |__) | | | | |__) | |_ \" + '\\n'\\\n r\" | '_ ` _ \\| _ /| | | | ___/| _|\" + '\\n'\\\n r\" | | | | | | | \\ \\| |__| | | | | \" + '\\n'\\\n r\" |_| |_| |_|_| \\_\\_____/|_| |_| \" + '\\n'\n\ndef create_dir(path):\n \"\"\"Create directory at path. Returns path to created directory.\n\n If a directory already exists at the provided path, a number will \n be appended until the directory does not exist.\n \"\"\"\n append = 0\n while os.path.isdir(path):\n append += 1\n path = path + '_' + str(append) if append != 0 else ''\n\n cprint(f'=> Creating folder {path}', 'green')\n os.mkdir(path)\n return path\n\ndef create_file(path, name, extension):\n \"\"\"Create file with name and extension at given base path.\n\n If a file with the given name and extension already exists, a number \n will be appended until a file does not exist.\n\n :returns: Path to created file\n \"\"\"\n append = 0\n file_name = os.path.join(path, name + '.' + extension)\n\n while os.path.isfile(file_name):\n append += 1\n file_name = os.path.join(path, name + '_' + (str(append) if append != 0 else '') + '.' + extension)\n\n return file_name\n\ndef write_data(path, name, extension, data):\n \"\"\"Write a list of dataclasses to a CSV file at given path with name and extension.\n \n :returns: Path to created file or None if data is empty\n \"\"\"\n if len(data) > 0:\n file_name = create_file(path, name, extension)\n cprint(f'=> Writing {len(data)} {name} to {file_name}', 'green')\n write_dataclass_list_to_csv(file_name, data)\n return file_name\n else:\n cprint(f'-- No {name} to write', 'cyan')\n return None\n\ndef write_dataframe(path, name, extension, data):\n \"\"\"Write a pandas dataframe to a CSV file at path with name and extension.\n \n :returns: Path to created file or None if data is empty\n \"\"\"\n if len(data) > 0:\n file_name = create_file(path, name, extension)\n cprint(f'=> Writing {name} to {file_name}', 'green')\n data.to_csv(file_name) \n return file_name\n else:\n cprint(f'-- No {name} to write', 'cyan')\n return None\n\ndef write_dataclass_list_to_csv(path: str, data: list):\n \"\"\"Write a list of dataclasses to a CSV file at path\"\"\"\n with open(path, 'w', newline='') as file: \n writer = csv.writer(file, delimiter=',')\n headers_writter = False\n\n for item in data:\n d = item.to_dict()\n\n if not headers_writter:\n writer.writerow(d.keys())\n headers_writter = True\n\n writer.writerow(d.values())\n\ndef get_parser():\n \"\"\"Create argument parser\"\"\"\n parser = argparse.ArgumentParser()\n\n input_group = parser.add_mutually_exclusive_group(required=True)\n input_group.add_argument('-d', '--dir', help='Directory to search for compatible files')\n input_group.add_argument('-f', '--file', help='File to parse')\n\n parser.add_argument('-o', '--out', help='Folder to write results to', required=True)\n\n parser.add_argument('--no-recurse', help='Do not recurse. If input option is not --dir this option is ignored.', action='store_true')\n parser.add_argument('--clear', help='Clear output directory before writing results (will delete all files below out directory)', action='store_true')\n return parser\n\nif __name__ == '__main__':\n \"\"\"Main Function\"\"\"\n init()\n\n cprint(header, 'cyan')\n cprint('Author: Jonathan Holtmann\\n', 'cyan')\n\n parser = get_parser()\n args = parser.parse_args()\n\n if not os.path.isdir(args.out):\n parser.error(f'Output directory {args.out} does not exist')\n\n if args.clear:\n cprint(f'-- Deleting all files and folders in {args.out}', 'red')\n\n for file_name in os.listdir(args.out):\n file_path = os.path.join(args.out, file_name)\n try:\n if os.path.isfile(file_path) or os.path.islink(file_path):\n os.unlink(file_path)\n elif os.path.isdir(file_path):\n shutil.rmtree(file_path)\n except Exception as e:\n cprint(f'Failed to delete {file_path} -- {e}: %s', 'red')\n\n if args.dir is not None:\n cprint(f'-- Given directory \"{args.dir}\"', 'cyan')\n\n if not os.path.isdir(args.dir):\n parser.error(f'Directory {args.dir} does not exist')\n\n files = get_supported_files(args.dir, not args.no_recurse)\n elif args.file is not None:\n cprint(f'Given file \"{args.file}\"', 'cyan')\n\n if not os.path.isdir(args.file):\n parser.error(f'File {args.file} does not exist')\n\n parser = get_matching_glob(args.file)\n\n if parser:\n files = { parser: [args.file] }\n else:\n parser.error(f'File {args.file} is not supported by mrdpf')\n \n cprint(f'++ Got {sum([len(data) for data in files.values()])} files to parse using {len(files)} parsers', 'cyan')\n\n results = run_parsers(files)\n\n cprint(f'=> Got results from {len(results)} parsers', 'green')\n\n write_log = list()\n\n for parser in results.keys():\n cprint(f'++ Processing results from parser {parser}', 'cyan')\n\n if parser == Parsers.PREFERENCES_PLIST:\n append = 0\n for result in results[parser]:\n file_name = create_file(args.out, 'preferences_plist', 'json')\n\n cprint(f'=> Writing file {file_name}', 'green')\n\n with open(file_name, 'w') as out_file:\n json.dump(result.data.preferences, out_file, cls=DataclassJSONEncoder, indent=4)\n write_log.append([str(result.path), parser, file_name, ''])\n elif parser == Parsers.APP_SUPPORT_DB:\n for result in results[parser]:\n # dump tables\n folder_name = create_dir(os.path.join(args.out, 'DB_DUMP'))\n cprint(f'=> Dumping app support database to folder {folder_name}', 'green')\n dump_paths = result.data.dump_tables(folder_name)\n write_log.append([str(result.path), parser, folder_name, 'Database Dump Folder'])\n [write_log.append([str(dump_path), parser, folder_name, 'Database Table']) for dump_path in dump_paths]\n\n # dump no-wal tables\n if result.data.wal:\n folder_name = create_dir(os.path.join(args.out, 'DB_DUMP_IGNORE_WAL'))\n cprint(f'=> Dumping app support database (ignoring wal) to folder {folder_name}', 'green')\n dump_paths = result.data.dump_tables_nw(folder_name)\n write_log.append([str(result.path), parser, folder_name, 'Database Dump Folder'])\n [write_log.append([str(dump_path), parser, folder_name, 'Database Table']) for dump_path in dump_paths] \n \n path = write_data(args.out, 'bookmarks', 'csv', result.data.bookmarks)\n if path: write_log.append([str(result.path), parser, path, 'Parsed Bookmarks table'])\n\n path = write_data(args.out, 'metadata', 'csv', result.data.metadata)\n if path: write_log.append([str(result.path), parser, path, 'Parsed Metadata table'])\n\n path = write_data(args.out, 'bookmark_order', 'csv', result.data.bookmark_order)\n if path: write_log.append([str(result.path), parser, path, 'Parsed Bookmark Order table'])\n elif parser == Parsers.OFFLINE_STORAGE:\n for result in results[parser]:\n path = write_dataframe(args.out, 'offline_storage', 'csv', result.data.parameters)\n write_log.append([str(result.path), parser, path, 'Parsed Offline Storage data'])\n \n \n headers = ['Input File', 'Parsed Using', 'Parsed To', 'Extra Information']\n\n with open(os.path.join(args.out, 'write_log.csv'), 'w', newline='') as out_file:\n writer = csv.writer(out_file, delimiter=',', quotechar='|', quoting=csv.QUOTE_MINIMAL)\n writer.writerow(headers)\n for line in write_log:\n writer.writerow(line)\n \n cprint(f'=> Wrote \"write_log.csv\" to {args.out}', 'green')\n cprint(f'== Done', 'cyan')","sub_path":"mrdpf_cli.py","file_name":"mrdpf_cli.py","file_ext":"py","file_size_in_byte":8741,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"636691","text":"#!/usr/bin/python3\n# Implementing a parenthesis balance parser using a stack\n# process a string of parentheses from left to right\n# if opening parenthsis push into stack indicating that closing p\n#will appear later if closing parenthesis, pop stack, the parentheses are balenced if its possible to pop the stack to match every closing parenthesis\n\nimport stack\n\ndef parens_parser(str_parens):\n s = stack.Stack()\n balanced = True\n index = 0\n while index < len(str_parens) and balanced:\n symbol = str_parens[index]\n if symbol == '(':\n s.push(symbol)\n else:\n if s.is_empty():\n balanced = False\n else:\n s.pop()\n\n index +=1\n if balanced and s.is_empty():\n return True\n else:\n return False\n\n#test the function\nprint(parens_parser('((()()(())))))))'))\nprint(parens_parser('()(())'))\n","sub_path":"ds/parser.py","file_name":"parser.py","file_ext":"py","file_size_in_byte":899,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"494180690","text":"#!/usr/bin/python3\nfrom sys import stdin\n'''\ntest cases are bad - inefficient - not testing algorithm itself\nx is much too big, bigger than n, which always gives n as answer.\nSample test case is much too simple either to test algorithm \ndescribed in problem description.\nWithout good test cases it is no fun to develop the algorithm, \nwithout knowing if it is correct :(\n'''\ndef find_max (a, x, y, n):\n if y >= n: return n\n # following is just to handle sample test case\n # no good solution !\n for i in range (n):\n if not a [i]:\n y += 1\n if y >= n: return n\n elif x - a [i] >= 0:\n y += 1\n x -= a [i]\n if y >= n: return n\n return y\n\ndef main ():\n read = stdin.readline\n t = int (read ())\n for t_ in range (t):\n n, x, y = map (int, read ().split ())\n a = list (map (int, read ().split ()))\n print (find_max (a, x, y, n))\n\nif __name__ == \"__main__\": main ()","sub_path":"_zero_subarray.py","file_name":"_zero_subarray.py","file_ext":"py","file_size_in_byte":968,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"81942368","text":"from django.urls import re_path\n\nfrom vlog import views\n\nurlpatterns = [\n re_path('^$', views.IndexView.as_view(), name='index'),\n\n re_path(\n '^home/$', views.IndexView.as_view(), name='index'\n ),\n\n re_path(\n '^home/categories/popular/$',\n views.PopularCategoriesView.as_view(), name='popular_categories'\n ),\n\n re_path(\n '^home/tags/popular/$',\n views.PopulatedTagsView.as_view(), name='populated_tags'\n ),\n\n re_path(\n '^home/articles/popular/$',\n views.PopularArticlesView.as_view(), name='popular_articles'\n ),\n\n re_path(\n '^home/articles/', views.ArticlesView.as_view(), name='articles'\n ),\n\n re_path(\n '^home/categories/$', views.CategoriesView.as_view(), name='categories'\n ),\n\n re_path(\n \"^home/categories/(?P[\\w-]+[']*)\"\n \"/articles/(?P[\\w-]+[']*)/\",\n views.ArticleView.as_view(), name='article'\n ),\n\n re_path(\n \"^home/categories/(?P[\\w-]+[']*)/\",\n views.CategoryView.as_view(), name='category'\n ),\n\n re_path(\n '^home/tags/$', views.TagsView.as_view(), name='tags'\n ),\n\n re_path(\n \"^home/tags/(?P[\\w-]+[']*)/\", views.TagView.as_view(), name='tag'\n ),\n]","sub_path":"src/vlog/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1325,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"176777452","text":"import utils\nimport logging\nimport time\nimport datetime as dt\nimport pandas as pd\nimport argparse\nimport subprocess\nimport numpy as np\nimport requests as r\n\n \nif __name__ == '__main__':\n # Parse cl arguments\n ap = argparse.ArgumentParser()\n ap.add_argument('-i', '--inserts', type=int, default=10, help='Amount of rows to insert per loop.')\n ap.add_argument('-p', '--passes', type=int, default=5, help='Amount of times to loop query and insert jobs.')\n ap.add_argument('-ql', '--querylimit', type=int, default=20, help='Max rows in query response.')\n ap.add_argument('--host', type=str, default='https://www.iltalehti.fi/robots.txt', help='Ping destination.')\n ap = ap.parse_args()\n \n # Init logging\n logging.basicConfig(level=logging.DEBUG)\n # Get BQ client\n client = utils.bigquery_service()\n # Dict for df conversion\n timing = {'time': [], 'mean insert time': [], 'query time': [], 'ping': []}\n for i in range(ap.passes):\n # Ping host\n pingtimes = []\n for _ in range(5):\n t0 = time.time()\n r.get(ap.host)\n pingtimes.append(time.time() - t0)\n pingtimes = np.array(pingtimes)\n timing['ping'].append(np.median(pingtimes))\n \n # Current time\n timing['time'].append(dt.datetime.now().strftime('%Y-%m-%d %H:%M:%S.%f'))\n \n # Inserts\n t0 = time.time()\n utils.dummy_streaming_inserts(client, insert_number=ap.inserts)\n mu_inserts = (time.time() - t0)/ap.inserts\n timing['mean insert time'].append(mu_inserts)\n logging.debug('mean insert time: {}'.format(mu_inserts))\n \n # Querying\n t0 = time.time()\n utils.dummy_query(client, limit=ap.querylimit)\n query_time = time.time() - t0\n timing['query time'].append(query_time)\n logging.debug('query time: {}'.format(query_time))\n \n timing_df = pd.DataFrame.from_dict(timing)\n timing_df.to_csv('times.csv')\n\n # To benchmark table on BQ\n table_ref = client.dataset('logging').table('benchmark')\n table = client.get_table(table_ref) # API request\n \n for i in range(len(timing_df['time'])):\n # To BQ\n rows_to_insert = [\n {'time': timing_df.loc[i, 'time'],\n 'mean_insert_time': timing_df.loc[i, 'mean insert time'],\n 'ping': timing_df.loc[i, 'ping'],\n 'query_time': timing_df.loc[i, 'query time']}\n ]\n # This should be []\n errors = client.insert_rows(table, rows_to_insert) # API request","sub_path":"bqbenchmark/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2561,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"36152102","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\n# Вариант: ((N+5)^2 mod 19) + 1 = 8\n# Задание: Использовать словарь, содержащий следующие ключи: название пункта\n# назначения; номер поезда; время отправления. Написать программу, выполняющую\n# следующие действия: ввод с клавиатуры данных в список, состоящий из словарей\n# заданной структуры; записи должны быть упорядочены по номерам поездов;\n# вывод на экран информации о поезде, номер которого введен с клавиатуры;\n# если таких поездов нет, выдать на дисплей соответствующее сообщение.\n\nimport sys\n\n\nif __name__ == '__main__':\n # Список поездов.\n trains = []\n\n # Организовать бесконечный цикл запроса команд.\n while True:\n # Запросить команду из терминала.\n command = input(\">>> \").lower()\n\n # Выполнить действие в соответствие с командой.\n if command == 'exit':\n break\n\n elif command == 'add':\n # Запросить данные о поезде.\n name = input(\"Пункт назначения? \")\n number = input(\"Номер поезда? \")\n time = input(\"Время отправления? \")\n\n # Создать словарь\n train = {\n 'name': name,\n 'number': number,\n 'time': time,\n }\n\n # Добавить словарь в список.\n trains.append(train)\n # Отсортировать список в случае необходимости.\n if len(trains) > 1:\n trains.sort(key=lambda item: item.get('number', ''))\n\n elif command == 'list':\n # Заголовок таблицы.\n line = '+-{}-+-{}-+-{}-+-{}-+'.format(\n '-' * 4,\n '-' * 30,\n '-' * 20,\n '-' * 17\n )\n print(line)\n print(\n '| {:^4} | {:^30} | {:^20} | {:^17} |'.format(\n \"№\",\n \"Название пункта назначения\",\n \"Номер поезда\",\n \"Время отправления\",\n )\n )\n print(line)\n\n # Вывести данные о всех поездах.\n for idx, train in enumerate(trains, 1):\n print(\n '| {:>4} | {:<30} | {:<20} | {:>17} |'.format(\n idx,\n train.get('name', ''),\n train.get('number', ''),\n train.get('time', '')\n )\n )\n\n print(line)\n\n elif command.startswith('select '):\n\n # Разбить команду на части для выделения номера поезда.\n parts = command.split(' ', maxsplit=1)\n\n # Инициализировать счетчик.\n count = 0\n # Проверить сведения поездов из списка.\n for train in trains:\n if train.get('number', '') == parts[1]:\n count += 1\n print(\n '{:>4}: {}, {}, {}'.format(\n count,\n train.get('name', ' '),\n train.get('number', ' '),\n train.get('time', ' '))\n )\n\n # Если счетчик равен 0, то поезд не найден.\n if count == 0:\n print(\"Поезд с данным номером не найден.\")\n\n elif command == 'help':\n # Вывести справку о работе с программой.\n print(\"Список команд:\\n\")\n print(\"add - добавить поезд;\")\n print(\"list - вывести список поездов;\")\n print(\"select <стаж> - запросить номер поезда;\")\n print(\"help - отобразить справку;\")\n print(\"exit - завершить работу с программой.\")\n\n else:\n print(f\"Неизвестная команда {command}\", file=sys.stderr)\n","sub_path":"individual_1.py","file_name":"individual_1.py","file_ext":"py","file_size_in_byte":4810,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"339210065","text":"import re\n#fname = input(\"Enter file name: \")\nfname=(\"mbox-short.txt\")\ncuenta=dict()\ncorreo=list() #inicializa diccionario\nif len(fname) < 1 : fname = \"mbox-short.txt\"\ntry:\n bigcount=None\n bigword=None\n texto = open(fname)\n for linea in texto:\n palabras=linea.split() #Separa en palabras el texto.\n palabra=linea.rstrip() #elimina espacios\n #if not line.startswith('From:'):continue #selecciona solo los que comiencen con From\n if re.search('^From:',palabra):\n #print(palabra)\n for palabra in palabras:\n cuenta[palabra]=cuenta.get(palabra,0)+1 #llena el diccionario con las tuplas palabras, cantidad\n for palabra,cantidad in cuenta.items():\n if palabra=='From:':\n continue\n elif bigcount is None or cantidad > bigcount:\n bigword=palabra\n bigcount=cantidad\nexcept:\n print(\"Error\")\nprint(bigword,bigcount)\n","sub_path":"tarea10_3.py","file_name":"tarea10_3.py","file_ext":"py","file_size_in_byte":934,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"627093990","text":"import json\r\nimport requests\r\nimport pypyodbc\r\n\r\nconnection = pypyodbc.connect('Driver={SQL Server};'\r\n 'Server=localhost\\SQLEXPRESS;'\r\n 'Database=ffl;'\r\n 'Trusted_Connection=yes;')\r\n\r\nresponse = requests.get(\"https://fantasy.premierleague.com/drf/fixtures/\")\r\nbootstrap = json.loads(response.text)\r\ncursor = connection.cursor()\r\nfor element in bootstrap:\r\n if element[\"finished\"]: \r\n cursor.execute(\"insert into dbo.fixture values (?,?, ?, ?, ?, ?, ?) \", \r\n [element[\"id\"],\r\n element[\"team_h\"],\r\n element[\"team_a\"],\r\n element[\"team_h_score\"],\r\n element[\"team_a_score\"],\r\n element[\"kickoff_time\"],\r\n element[\"event\"]])\r\nconnection.commit() \r\nconnection.close()\r\n\r\n","sub_path":"2019_work/insertFixtures.py","file_name":"insertFixtures.py","file_ext":"py","file_size_in_byte":738,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"103916904","text":"\n# coding: utf-8\n\n########################################################################\n\"\"\"\nMise en place d'un premier réseau à deux couches, avec une convolution \nsuivie d'une phase de pooling où la taille de l'image d'entrée est divisée\npar deux.\n\nobjectif: transférer les codes de forward de caffe à tensorflow\nbase de donnée: mnist\nsimplification : on va considérer une convolution \"simple\" sans padding,\navec un stride de 1\n\n\"\"\"\n########################################################################\n\nimport tensorflow as tf\nimport time\nimport pdb\n######################## couche de convolution ########################\ndef dk_conv(x, f, f_corr , alpha):\n\t\"\"\"\n\t\t+ x est l'image (ou le feature map) d'entrée NHWC\n\t\t+ f est tensor contenant l'ensemble des filtres servant à faire la convolution\n\t\t+ f_corr est la matrice contenant le résultat du calcul k1(Z.T * Z)^(-1/2)\n\t\t+ x et f ont des dimensions permettant de faire un convolution à deux dimensions\n\t\"\"\"\n\t\n\tsess = tf.Session()\n\t\n\tstride = [1,1,1,1]\n\tconv = tf.nn.conv2d(input = x,filter = f, strides = stride, padding = \"VALID\", data_format = \"NHWC\") \n\t# récupération des données concernant les filtres \"fh,fw,ic,oc\"\n\tfh,fw,ic,oc = sess.run(tf.shape(f))\n\t#fh,fw,ic,oc = tf.shape(f)\n\t# récupération des données liées aux images \n\tn,h,w,c = sess.run(tf.shape(x))\n\t\n\t#print \"fh: {} \\t fw: {} \\t ic: {} \\t oc: {}\".format(fh,fw,ic,oc)\n\t\n\tmat_norm = []\n\tstart = time.time()\n\t# générer les patches \n\tpatches = tf.extract_image_patches(x, [1,fh,fw,1,],[1,1,1,1],[1,1,1,1], padding = \"VALID\")\n\t\n\t# prendre la norme de ces patches\n\tpatches_norm = tf.norm(patches, ord = 2, axis = 3, keep_dims = True )\n\t\n\t# remplacer les normes inférieurs à 10^-12 par 10^-12, la valeur 10^-12 est choisie puisque c'est la valeur par défaut dans tf.l2_normalize\n\tpatches_norm = tf.where( condition = (patches_norm<1e-15), x = 1e-15*tf.ones_like(patches_norm), y = patches_norm, name = 'patches_norm' ) \n\t# dupliquer ces normes de sorte à pouvoir faire un produit element-wise avec avec conv\n\t# en divisant chaque convolution par la norme du patch correspondant\n\t\n\n\tpatches_norm = tf.tile( input = patches_norm, multiples = [1,1,1,oc]) # dupliquer oc fois pour faire le produit element-wise\n\n\t\n\t# calcul de k1(Z.T * x/||x||) ou k1 (y) = exp(-alpha * y)\n\tkernel_conv = tf.exp(- alpha * conv / patches_norm)\n\t\n\n############\t\n#\tpdb.set_trace()\n############\t\n\t\n\n\t# mise en forme de kernel_conv pour le calcul du produit f_corr = k1(Z.T * Z)^(-1/2)\n\n\t# transposition de la matrice kernel_conv de la forme \"NHWC\" à la forme \"CNHW\"\n\tkernel_conv = tf.transpose(kernel_conv, perm = [3,0,1,2])\n\t\n\n\n\t# reshape de kernel_conv pour la mettre sous la forme \"C x N*H*W\" où les H*W premiers éléments\n\t# représentent la première image, les H*W suivant la deuxième et ainsi de suite\n\tkernel_conv = tf.reshape( kernel_conv, shape = [oc, -1] ) \n\t\n\t\n\t# calcul du produit f_corr * kernel_conv\n\tkernel_conv = tf.matmul(f_corr, kernel_conv)\n\t\n\t# reshape de kernel_conv sous la forme \" C x N x H x W \"\n\tkernel_conv = tf.reshape(kernel_conv, shape = [oc,n,h-fh+1,w-fw+1] ) # h-fh+1 est la dimension après la convolution sans padding\n\t\n\t# transposition pour obtenir la forme \"NHWC\" et multiplication pour garder l'homogénéité\n\tkernel_conv = tf.transpose(kernel_conv, perm = [1,2,3,0])\n\tkernel_conv = tf.multiply(patches_norm,kernel_conv)\n\t\t\n\tsess.close()\n\t#print \"temps total de calcul de kernel_conv {}\".format(time.time()-start)\n\treturn kernel_conv \n########################################################################\n\"\"\"\n# importation de la base de données MNIST\nfrom tensorflow.examples.tutorials.mnist import input_data\nmnist = input_data.read_data_sets('MNIST_data', one_hot = True)\n\nsess = tf.Session()\n\nx = tf.placeholder(tf.float32, shape = [None, 28, 28, 1]) # NHWC\ny_ = tf.placeholder( tf.float32 , shape = [None, 10])\n\n################### initialisation des info concernant les filtres \n# cette partie random devra être remplacée par les filtres précalculé \nfh = 3 # filter_height \nfw = 3 # filter_width\nic = 1 # in_channels, nombre de canaux de l'entrée (1 pour cifar-10)\noc = 64 # out_channels, nombre de filtres\nfs = [fh, fw, ic, oc] # filter_shape\nrand_patches = tf.random_normal(shape = fs, mean = 0.0, stddev = 0.1)\n\n\nalpha = tf.constant(4.0)\n\n# récupération d'un batch MNIST\nbatch_size = 64\nx, y_ = mnist. train.next_batch(batch_size)\nf_corr_test = tf.random_normal(shape = [oc, oc] , mean = 1, stddev = 0.5)\n# reshape de x pour la mettre sous la forme d'une image\nx = tf.reshape(x,shape =[-1,28,28,1]) # NHWC\n\ntemp = dk_conv(x, rand_patches, f_corr_test, alpha)\n\nsess.run(temp)\n\nprint \"shape, temp : {} \".format( sess.run(tf.shape(temp))) \n\n\n\n\nsess.close()\n\"\"\"\n","sub_path":"dk_conv.py","file_name":"dk_conv.py","file_ext":"py","file_size_in_byte":4749,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"580992653","text":"from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport sys\nimport os\nsys.path.append('../../ican/tools')\nsys.path.append('../../ican/lib')\nsys.path.append('../../frcnn/tools')\nsys.path.append('../../frcnn/lib')\n\nimport _init_paths\nfrom PIL import Image\nfrom ult.config import cfg\nfrom utils.timer import Timer\nimport _init_paths\nimport pickle\nimport json\nimport numpy as np\nimport cv2\nimport multiprocessing as mp\n\nimport sys\nimport datetime\nimport mp3play\nclip = mp3play.load('../../put_down_knife.mp3')\n\nCLASSES = ('__background__', 'person', 'bicycle', 'car', 'motorcycle', 'airplane', 'bus','train', 'truck', 'boat', 'traffic light', 'fire hydrant', 'stop sign', 'parking meter','bench', 'bird', 'cat', 'dog', 'horse', 'sheep', 'cow', 'elephant', 'bear', 'zebra', 'giraffe', 'backpack','umbrella', 'handbag', 'tie', 'suitcase', 'frisbee', 'skis', 'snowboard', 'sports ball', 'kite','baseball bat', 'baseball glove', 'skateboard', 'surfboard', 'tennis racket', 'bottle', 'wine glass', 'cup', 'fork', 'knife', 'spoon', 'bowl','banana', 'apple', 'sandwich', 'orange', 'broccoli','carrot', 'hot dog', 'pizza', 'donut', 'cake', 'chair', 'couch', 'potted plant', 'bed', 'dining table','toilet', 'tv', 'laptop', 'mouse', 'remote', 'keyboard', 'cell phone', 'microwave', 'oven','toaster', 'sink', 'refrigerator', 'book', 'clock', 'vase', 'scissors', 'teddy bear', 'hair drier','toothbrush')\n\n\nfrom Demo import run_ican\nfrom Object_Detector import *\n\ndef end_pt(start_pt, text, font_size, count):\n y2 = start_pt[1] + 40\n x2 = start_pt[0] + 400\n \n return (int(x2), int(y2))\n\ndef create_text(img, text, shape, count, H_box):\n font_size = 1\n x1,y1, x2, y2 = H_box\n x1,y1, x2, y2 = int(x1),int(y1), int(x2), int(y2)\n start_pt = (x1+5, y1+5+30*(count-1))\n \n cv2.rectangle(img, start_pt, end_pt(start_pt, text, font_size, count), get_color(count),thickness = -1)\n cv2.putText(\n img, text, (x1 + 10, y1 -5 +(count*30)), cv2.FONT_HERSHEY_COMPLEX_SMALL, font_size, (255,255,255), 2, cv2.LINE_AA\n )\n \n \n \ndef get_color(number):\n# print('number:', number)\n num = str(int(number)%10)\n font_color = {\n '0': (100, 0, 0),\n '1': (0, 100, 0),\n '2': (0, 0, 100),\n '3': (100, 100, 0),\n '4': (100, 0, 100),\n '5': (0, 100, 100),\n '6': (100, 100, 100),\n '7': (200, 0, 0),\n '8': (0, 200, 0),\n '9': (0, 0, 200)\n }\n \n return font_color[num]\n\ndef create_bbox(img, box, count):\n x1,y1, x2, y2 = box\n x1,y1, x2, y2 = int(x1),int(y1), int(x2), int(y2)\n# print(x1,y1, x2, y2)\n \n cv2.line(img, (x1, y1), (x1, y2), get_color(count), 2)\n \n cv2.line(img, (x1, y2), (x2, y2), get_color(count), 2)\n \n cv2.line(img, (x2, y2), (x2, y1), get_color(count), 2)\n \n cv2.line(img, (x2, y1), (x1, y1), get_color(count), 2)\n \ndef print_image(Detection, im_data):\n img_shape = list(im_data.shape)\n new_shape = list(im_data.shape)\n# new_shape[1] = img_shape[1]+int(img_shape[1]*0.5)\n new_img = np.zeros(tuple(new_shape), np.uint8)\n new_img.fill(255)\n \n new_img[:img_shape[0],:img_shape[1]] = im_data\n\n HO_dic = {}\n HO_set = set()\n count = 0\n # print(Detection)\n action_count = -1\n had_knife, cut_obj_agent, hit_obj_agent = False, False, False\n for ele in Detection:\n \n H_box = ele['person_box'] \n\n if tuple(H_box) not in HO_set:\n HO_dic[tuple(H_box)] = count\n HO_set.add(tuple(H_box))\n count += 1 \n\n show_H_flag = 0\n\n if ele['smile'][4] > 0.5:\n action_count += 1 \n show_H_flag = 1\n text = 'smile, ' + \"%.2f\" % ele['smile'][4]\n create_text(new_img, text, img_shape, action_count, H_box)\n \n\n if ele['stand'][4] > 0.5:\n action_count += 1 \n show_H_flag = 1\n text = 'stand, ' + \"%.2f\" % ele['stand'][4]\n create_text(new_img, text, img_shape, action_count, H_box)\n\n if ele['run'][4] > 0.5:\n action_count += 1 \n show_H_flag = 1\n text = 'run, ' + \"%.2f\" % ele['run'][4]\n create_text(new_img, text, img_shape, action_count, H_box)\n\n if ele['walk'][4] > 0.5:\n action_count += 1 \n show_H_flag = 1\n text = 'walk, ' + \"%.2f\" % ele['walk'][4]\n create_text(new_img, text, img_shape, action_count, H_box)\n \n if show_H_flag == 1:\n create_bbox(new_img, H_box, action_count)\n\n for action_key, action_value in ele.items():\n if ele['cut_obj_agent'] >= 0.9:\n cut_obj_agent = True\n if ele['hit_obj_agent'] >= 0.9:\n hit_obj_agent = True\n \n if (action_key.split('_')[-1] != 'agent') and action_key != 'image_id' and action_key != 'person_box':\n if (not np.isnan(action_value[0])) and (action_value[5] > 0.05):\n# print('active: ', CLASSES[np.int(action_value[4])])\n O_box = action_value[:4]\n\n action_count += 1\n\n if tuple(O_box) not in HO_set:\n HO_dic[tuple(O_box)] = count\n HO_set.add(tuple(O_box))\n count += 1 \n\n create_bbox(new_img, H_box, action_count)\n if CLASSES[np.int(action_value[4])] == 'knife':\n had_knife = True\n \n text = action_key.split('_')[0] + ' ' + CLASSES[np.int(action_value[4])] + ', ' + \"%.2f\" % action_value[5]\n create_text(new_img, text, img_shape, action_count, H_box)\n create_bbox(new_img, O_box, action_count)\n if count >=2 and had_knife and cut_obj_agent and hit_obj_agent:\n print('放下屠刀立地成佛')\n clip.play()\n time.sleep(3) \n clip.stop()\n \n\n return new_img \n # cv2.imshow('frame', new_img[:,:,::-1])\n\n# 選擇第二隻攝影機\ncap = cv2.VideoCapture(0)\ncap.set(3,420)\ncap.set(4,320)\n\ndef main(frame, q):\n \n object_detection = run_frcnn(frame)\n print('after object_detection')\n tf.reset_default_graph()\n\n sess, hoi = run_ican(object_detection, frame)\n print('hoi: ', hoi)\n q.put(print_image(hoi, frame))\n\nwhile(cap.isOpened()):\n # 從攝影機擷取一張影像\n ret, frame = cap.read()\n '''\n q = mp.Queue()\n p = mp.Process(target=main,args=(frame, q))\n p.start()\n p.join()\n frame = q.get()\n '''\n # -----------------\n \n timer1 = Timer()\n timer1.tic()\n object_detection = run_frcnn(frame)\n print('object_detection: ', timer1.toc(average=False))\n print('-----------')\n #print('object_detection: ', object_detection)\n timer2 = Timer()\n timer2.tic()\n sess, hoi = run_ican(object_detection, frame)\n #print('hoi:', hoi)\n\n \n # -----------------\n timer3 = Timer()\n timer3.tic()\n frame = print_image(hoi, frame)\n print('print: ', timer3.toc(average=False))\n \n cv2.imshow('frame', frame)\n \n if cv2.waitKey(1) & 0xFF == ord('q'):\n break\n\n # sleep(5)\n\n# 釋放攝影機\ncap.release()\n\n# 關閉所有 OpenCV 視窗\ncv2.destroyAllWindows()\n\nsess.close()\nfrcnn_sess.close()\n\n'''\n{'cut_instr': 2,\n 'snowboard_instr': 21,\n 'cut_obj': 4,\n 'surf_instr': 0,\n 'skateboard_instr': 26,\n 'kick_obj': 7,\n 'eat_obj': 9,\n 'carry_obj': 14,\n 'throw_obj': 15,\n 'eat_instr': 16,\n 'smile': 17,\n 'look_obj': 18,\n 'hit_instr': 19,\n 'hit_obj': 20,\n 'ski_instr': 1,\n 'run': 22,\n 'sit_instr': 10,\n 'read_obj': 24,\n 'ride_instr': 5,\n 'walk': 3,\n 'point_instr': 23,\n 'jump_instr': 11,\n 'work_on_computer_instr': 8,\n 'hold_obj': 25,\n 'drink_instr': 13,\n 'lay_instr': 12,\n 'talk_on_phone_instr': 6,\n 'stand': 27,\n 'catch_obj': 28}\n'''","sub_path":"frcnn/tools/live.py","file_name":"live.py","file_ext":"py","file_size_in_byte":7925,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"581943795","text":"#!/usr/bin/env python\n#\n# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n#\n# California Institute of Technology\n# (C) 2006 All Rights Reserved\n#\n# {LicenseText}\n#\n# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n#\n\n\nfrom .BatchScheduler import BatchScheduler\nimport os\nimport sys\n\n\nclass SchedulerLSF(BatchScheduler):\n\n name = \"lsf\"\n\n import pythia.pyre.inventory\n\n command = pythia.pyre.inventory.str(\"command\", default=\"bsub\")\n bsubOptions = pythia.pyre.inventory.list(\"bsub-options\")\n\n def schedule(self, job):\n import pythia.pyre.util as util\n\n # Fix-up the job.\n if not job.task:\n # LSF scripts must have a job name; otherwise strange\n # \"/bin/sh: Event not found\" errors occur (tested on\n # TACC's Lonestar system).\n job.task = \"jobname\"\n job.walltime = util.hms(job.dwalltime.value)\n job.arguments = ' '.join(job.arguments)\n\n # Generate the main LSF batch script.\n script = self.retrieveTemplate('batch.sh', ['schedulers', 'scripts', self.name])\n if script is None:\n self._error.log(\"could not locate batch script template for '%s'\" % self.name)\n sys.exit(1)\n\n script.scheduler = self\n script.job = job\n\n if self.dry:\n print(script)\n return\n\n try:\n import subprocess\n\n cmd = [self.command]\n if self.wait:\n cmd.append(\"-K\")\n self._info.log(\"spawning: %s\" % ' '.join(cmd))\n child = subprocess.Popen(cmd, stdin=subprocess.PIPE, stdout=subprocess.PIPE, close_fds=True)\n self._info.log(\"spawned process %d\" % child.pid)\n\n child.stdin.write(script.encode(\"utf-8\"))\n child.stdin.close()\n\n if self.wait:\n self._info.log(\"Waiting for dispatch...\")\n\n for line in child.stdout:\n self._info.line(\" \" + line.rstrip())\n status = child.wait()\n self._info.log()\n\n exitStatus = None\n if (os.WIFSIGNALED(status)):\n statusStr = \"signal %d\" % os.WTERMSIG(status)\n elif (os.WIFEXITED(status)):\n exitStatus = os.WEXITSTATUS(status)\n statusStr = \"exit %d\" % exitStatus\n else:\n statusStr = \"status %d\" % status\n self._info.log(\"%s: %s\" % (cmd[0], statusStr))\n\n except IOError as e:\n self._error.log(\"%s: %s\" % (self.command, e))\n return\n\n # \"[When given the -K option], bsub will exit with the same\n # exit code as the job so that job scripts can take\n # appropriate actions based on the exit codes. bsub exits with\n # value 126 if the job was terminated while pending.\"\n if exitStatus == 0:\n pass\n elif self.wait:\n sys.exit(exitStatus)\n else:\n sys.exit(\"%s: %s: %s\" % (sys.argv[0], cmd[0], statusStr))\n\n return\n\n def jobId(cls):\n return os.environ['LSB_JOBID']\n jobId = classmethod(jobId)\n\n\n# end of file\n","sub_path":"pythia/pyre/schedulers/SchedulerLSF.py","file_name":"SchedulerLSF.py","file_ext":"py","file_size_in_byte":3211,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"415732151","text":"import numpy as np\n\ndef graph_edge_positions(graph,r):\n \n \"\"\"\n Function: Get line segments of graph with node positions r\n \n Arguments\n ---------\n \n graph[n_points:n_points]: sparse matrix\n adjacency matrix of graph\n \n \n r[:,:]: float\n array of point positions [n_points:n_dim]\n \n Result\n ------\n start[:,:],end[:,:]: float\n arrays of start and end points\n \"\"\"\n \n i,j=graph.nonzero()\n start=r[i,:]\n end=r[j,:]\n \n return start,end\n\ndef graph_edge_lengths(graph):\n \n \"\"\"\n Function: Get line edge lengths of graph\n \n Arguments\n ---------\n \n graph[n_points:n_points]: sparse matrix\n adjacency matrix of graph\n \n Result\n ------\n lengths[:]: float\n array edge lengths\n \"\"\"\n \n i,j=graph.nonzero()\n lengths=np.array(graph[i,j])[0,:]\n \n return lengths","sub_path":"maths/graphs/edges.py","file_name":"edges.py","file_ext":"py","file_size_in_byte":909,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"278976318","text":"# This script creates a report of Actual v/s Predicted conversion_status\r\n\r\nimport datetime\r\nimport os\r\nimport numpy as np\r\nimport pandas as pd\r\nfrom sklearn.metrics import recall_score as rc\r\nfrom sklearn.metrics import balanced_accuracy_score as bac\r\n\r\n\r\ndef evaluate_report(data_stream, prediction_date):\r\n \"\"\"\r\n Function to get the prediction report of a specific date.\r\n :param data_stream: the object that lets us retrieve the input data, data type: module_dep.Datastream object\r\n :param prediction_date: the date for which the prediction report was generated, data type: datetime.date object\r\n :return: DataFrame consisting of the PvA report\r\n \"\"\"\r\n base_path = os.path.dirname(os.path.realpath(__file__))\r\n \r\n filename_prediction_report = 'prediction_report_' + datetime.datetime.strftime(prediction_date, '%Y%m%d') + '.csv' \r\n # If the report for the day is not generated return this message\r\n if not os.path.isfile(filename_prediction_report):\r\n return \"Prediction report for the selected date doesn't exist.\"\r\n \r\n # Getting the actual and predicted data\r\n df_actuals = data_stream.get_data(prediction_date)\r\n df_predicted = pd.read_csv(os.path.join(base_path, filename_prediction_report))\r\n \r\n # Merging the actual and predicted files on email\r\n df_actuals_predicted = pd.merge(df_actuals, df_predicted, on='email', how='inner')\r\n \r\n # Creating a DataFrame to store the PvA report\r\n df_display = pd.DataFrame()\r\n\r\n # Converting the probabilities into binary choices based on the threshold 1,0\r\n df_display['conversion_status_predicted'] = pd.Series(np.where(df_actuals_predicted['conversion_probability']>=0.5, 1, 0), dtype=np.int)\r\n y_pred = df_display['conversion_status_predicted'].to_numpy()\r\n\r\n # Extracting actual conversion_status from the merged model\r\n y_true = df_actuals_predicted['conversion_status'].to_numpy()\r\n # Storing the extracted y's into the new DataFrame that we created\r\n df_display['conversion_status_actual'] = pd.Series(y_true)\r\n\r\n # Creating a confusion matrix for actual v/s predicted\r\n df_confusion = pd.crosstab(y_true, y_pred, rownames=['Actual'], colnames=['Predicted'], margins=True)\r\n \r\n # Calculating the Balanced Accuracy and Recall Scores\r\n bac_score = bac(y_true, y_pred)\r\n rc_score = rc(y_true, y_pred)\r\n \r\n # Calculating the conversion_rate\r\n conversion_predicted = df_display['conversion_status_actual'].sum()\r\n conversion_actual = df_display['conversion_status_predicted'].sum()\r\n conversion_rate = (conversion_actual/conversion_predicted)*100\r\n \r\n return rc_score, bac_score, conversion_rate\r\n\r\n###################################################################################\r\n","sub_path":"members_ownership/Rishav/module_PvA.py","file_name":"module_PvA.py","file_ext":"py","file_size_in_byte":2764,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"478086134","text":"def wait_until_processed(self, client, wait_timeout, distribution_id, caller_reference):\n if (distribution_id is None):\n distribution_id = self.validate_distribution_id_from_caller_reference(caller_reference=caller_reference)\n try:\n waiter = client.get_waiter('distribution_deployed')\n attempts = (1 + int((wait_timeout / 60)))\n waiter.wait(Id=distribution_id, WaiterConfig={\n 'MaxAttempts': attempts,\n })\n except botocore.exceptions.WaiterError as e:\n self.module.fail_json(msg='Timeout waiting for cloudfront action. Waited for {0} seconds before timeout. Error: {1}'.format(to_text(wait_timeout), to_native(e)))\n except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:\n self.module.fail_json_aws(e, msg='Error getting distribution {0}'.format(distribution_id))","sub_path":"Data Set/bug-fixing-5/e5269c047cc811f8f51d4c152098dd9f5066109a--bug.py","file_name":"e5269c047cc811f8f51d4c152098dd9f5066109a--bug.py","file_ext":"py","file_size_in_byte":861,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"565651274","text":"# Electron Cash - lightweight Bitcoin client\n# Copyright (C) 2017 The Electron Cash Developers\n#\n# Permission is hereby granted, free of charge, to any person\n# obtaining a copy of this software and associated documentation files\n# (the \"Software\"), to deal in the Software without restriction,\n# including without limitation the rights to use, copy, modify, merge,\n# publish, distribute, sublicense, and/or sell copies of the Software,\n# and to permit persons to whom the Software is furnished to do so,\n# subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be\n# included in all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND,\n# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF\n# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND\n# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS\n# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN\n# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN\n# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n# SOFTWARE.\n\n# Many of the functions in this file are copied from ElectrumX\n\nfrom collections import namedtuple\nimport hashlib\nimport struct\n\nfrom . import cashaddr, networks\nfrom enum import IntEnum\nfrom .bitcoin import EC_KEY, is_minikey, minikey_to_private_key, SCRIPT_TYPES\nfrom .util import cachedproperty, inv_dict\n\n_sha256 = hashlib.sha256\n_new_hash = hashlib.new\nhex_to_bytes = bytes.fromhex\n\n\nclass AddressError(Exception):\n '''Exception used for Address errors.'''\n\nclass ScriptError(Exception):\n '''Exception used for Script errors.'''\n\n\n# Derived from Bitcoin-ABC script.h\nclass OpCodes(IntEnum):\n # push value\n OP_0 = 0x00\n OP_FALSE = OP_0\n OP_PUSHDATA1 = 0x4c\n OP_PUSHDATA2 = 0x4d\n OP_PUSHDATA4 = 0x4e\n OP_1NEGATE = 0x4f\n OP_RESERVED = 0x50\n OP_1 = 0x51\n OP_TRUE = OP_1\n OP_2 = 0x52\n OP_3 = 0x53\n OP_4 = 0x54\n OP_5 = 0x55\n OP_6 = 0x56\n OP_7 = 0x57\n OP_8 = 0x58\n OP_9 = 0x59\n OP_10 = 0x5a\n OP_11 = 0x5b\n OP_12 = 0x5c\n OP_13 = 0x5d\n OP_14 = 0x5e\n OP_15 = 0x5f\n OP_16 = 0x60\n\n # control\n OP_NOP = 0x61\n OP_VER = 0x62\n OP_IF = 0x63\n OP_NOTIF = 0x64\n OP_VERIF = 0x65\n OP_VERNOTIF = 0x66\n OP_ELSE = 0x67\n OP_ENDIF = 0x68\n OP_VERIFY = 0x69\n OP_RETURN = 0x6a\n\n # stack ops\n OP_TOALTSTACK = 0x6b\n OP_FROMALTSTACK = 0x6c\n OP_2DROP = 0x6d\n OP_2DUP = 0x6e\n OP_3DUP = 0x6f\n OP_2OVER = 0x70\n OP_2ROT = 0x71\n OP_2SWAP = 0x72\n OP_IFDUP = 0x73\n OP_DEPTH = 0x74\n OP_DROP = 0x75\n OP_DUP = 0x76\n OP_NIP = 0x77\n OP_OVER = 0x78\n OP_PICK = 0x79\n OP_ROLL = 0x7a\n OP_ROT = 0x7b\n OP_SWAP = 0x7c\n OP_TUCK = 0x7d\n\n # splice ops\n OP_CAT = 0x7e\n OP_SPLIT = 0x7f # after monolith upgrade (May 2018)\n OP_NUM2BIN = 0x80 # after monolith upgrade (May 2018)\n OP_BIN2NUM = 0x81 # after monolith upgrade (May 2018)\n OP_SIZE = 0x82\n\n # bit logic\n OP_INVERT = 0x83\n OP_AND = 0x84\n OP_OR = 0x85\n OP_XOR = 0x86\n OP_EQUAL = 0x87\n OP_EQUALVERIFY = 0x88\n OP_RESERVED1 = 0x89\n OP_RESERVED2 = 0x8a\n\n # numeric\n OP_1ADD = 0x8b\n OP_1SUB = 0x8c\n OP_2MUL = 0x8d\n OP_2DIV = 0x8e\n OP_NEGATE = 0x8f\n OP_ABS = 0x90\n OP_NOT = 0x91\n OP_0NOTEQUAL = 0x92\n\n OP_ADD = 0x93\n OP_SUB = 0x94\n OP_MUL = 0x95\n OP_DIV = 0x96\n OP_MOD = 0x97\n OP_LSHIFT = 0x98\n OP_RSHIFT = 0x99\n\n OP_BOOLAND = 0x9a\n OP_BOOLOR = 0x9b\n OP_NUMEQUAL = 0x9c\n OP_NUMEQUALVERIFY = 0x9d\n OP_NUMNOTEQUAL = 0x9e\n OP_LESSTHAN = 0x9f\n OP_GREATERTHAN = 0xa0\n OP_LESSTHANOREQUAL = 0xa1\n OP_GREATERTHANOREQUAL = 0xa2\n OP_MIN = 0xa3\n OP_MAX = 0xa4\n\n OP_WITHIN = 0xa5\n\n # crypto\n OP_RIPEMD160 = 0xa6\n OP_SHA1 = 0xa7\n OP_SHA256 = 0xa8\n OP_HASH160 = 0xa9\n OP_HASH256 = 0xaa\n OP_CODESEPARATOR = 0xab\n OP_CHECKSIG = 0xac\n OP_CHECKSIGVERIFY = 0xad\n OP_CHECKMULTISIG = 0xae\n OP_CHECKMULTISIGVERIFY = 0xaf\n\n # expansion\n OP_NOP1 = 0xb0\n OP_CHECKLOCKTIMEVERIFY = 0xb1\n OP_NOP2 = OP_CHECKLOCKTIMEVERIFY\n OP_CHECKSEQUENCEVERIFY = 0xb2\n OP_NOP3 = OP_CHECKSEQUENCEVERIFY\n OP_NOP4 = 0xb3\n OP_NOP5 = 0xb4\n OP_NOP6 = 0xb5\n OP_NOP7 = 0xb6\n OP_NOP8 = 0xb7\n OP_NOP9 = 0xb8\n OP_NOP10 = 0xb9\n\n # More crypto\n OP_CHECKDATASIG = 0xba\n OP_CHECKDATASIGVERIFY = 0xbb\n\n # additional byte string operations\n OP_REVERSEBYTES = 0xbc\n\n\nP2PKH_prefix = bytes([OpCodes.OP_DUP, OpCodes.OP_HASH160, 20])\nP2PKH_suffix = bytes([OpCodes.OP_EQUALVERIFY, OpCodes.OP_CHECKSIG])\n\nP2SH_prefix = bytes([OpCodes.OP_HASH160, 20])\nP2SH_suffix = bytes([OpCodes.OP_EQUAL])\n\n# Utility functions\n\ndef to_bytes(x):\n '''Convert to bytes which is hashable.'''\n if isinstance(x, bytes):\n return x\n if isinstance(x, bytearray):\n return bytes(x)\n raise TypeError('{} is not bytes ({})'.format(x, type(x)))\n\ndef hash_to_hex_str(x):\n '''Convert a big-endian binary hash to displayed hex string.\n\n Display form of a binary hash is reversed and converted to hex.\n '''\n return bytes(reversed(x)).hex()\n\ndef hex_str_to_hash(x):\n '''Convert a displayed hex string to a binary hash.'''\n return bytes(reversed(hex_to_bytes(x)))\n\ndef bytes_to_int(be_bytes):\n '''Interprets a big-endian sequence of bytes as an integer'''\n return int.from_bytes(be_bytes, 'big')\n\ndef int_to_bytes(value):\n '''Converts an integer to a big-endian sequence of bytes'''\n return value.to_bytes((value.bit_length() + 7) // 8, 'big')\n\ndef sha256(x):\n '''Simple wrapper of hashlib sha256.'''\n return _sha256(x).digest()\n\ndef double_sha256(x):\n '''SHA-256 of SHA-256, as used extensively in bitcoin.'''\n return sha256(sha256(x))\n\ndef ripemd160(x):\n '''Simple wrapper of hashlib ripemd160.'''\n h = _new_hash('ripemd160')\n h.update(x)\n return h.digest()\n\ndef hash160(x):\n '''RIPEMD-160 of SHA-256.\n\n Used to make bitcoin addresses from pubkeys.'''\n return ripemd160(sha256(x))\n\nclass UnknownAddress(namedtuple(\"UnknownAddress\", \"meta\")):\n\n def __new__(cls, meta=None):\n return super(UnknownAddress, cls).__new__(cls, meta)\n\n def to_ui_string(self):\n if self.meta is not None:\n meta = self.meta\n meta = (isinstance(meta, (bytes, bytearray)) and meta.hex()) or meta\n if isinstance(meta, str) and len(meta) > 10:\n l = len(meta) // 2\n meta = \"…\" + meta[l-4:l+4] + \"…\"\n return f''\n return ''\n\n def __str__(self):\n return self.to_ui_string()\n\n def __repr__(self):\n return self.to_ui_string()\n\n\nclass PublicKey(namedtuple(\"PublicKeyTuple\", \"pubkey\")):\n\n TO_ADDRESS_OPS = [OpCodes.OP_DUP, OpCodes.OP_HASH160, -1,\n OpCodes.OP_EQUALVERIFY, OpCodes.OP_CHECKSIG]\n\n @classmethod\n def from_pubkey(cls, pubkey):\n '''Create from a public key expressed as binary bytes.'''\n if isinstance(pubkey, str):\n pubkey = hex_to_bytes(pubkey)\n cls.validate(pubkey)\n return cls(to_bytes(pubkey))\n\n @classmethod\n def privkey_from_WIF_privkey(cls, WIF_privkey, *, net=None):\n '''Given a WIF private key (or minikey), return the private key as\n binary and a boolean indicating whether it was encoded to\n indicate a compressed public key or not.\n '''\n if net is None: net = networks.net\n if is_minikey(WIF_privkey):\n # The Casascius coins were uncompressed\n return minikey_to_private_key(WIF_privkey), False\n raw = Base58.decode_check(WIF_privkey)\n if not raw:\n raise ValueError('Private key WIF decode error; unable to decode.')\n if raw[0] != net.WIF_PREFIX:\n # try and generate a helpful error message as this propagates up to the UI if they are creating a new wallet.\n extra = inv_dict(SCRIPT_TYPES).get(int(raw[0]-net.WIF_PREFIX), '')\n if extra:\n extra = \"; this corresponds to a key of type: '{}' which is unsupported for importing from WIF key.\".format(extra)\n raise ValueError(\"Private key has invalid WIF version byte (expected: 0x{:x} got: 0x{:x}){}\".format(net.WIF_PREFIX, raw[0], extra))\n if len(raw) == 34 and raw[-1] == 1:\n return raw[1:33], True\n if len(raw) == 33:\n return raw[1:], False\n raise ValueError('invalid private key')\n\n @classmethod\n def from_WIF_privkey(cls, WIF_privkey):\n '''Create a compressed or uncompressed public key from a private\n key.'''\n privkey, compressed = cls.privkey_from_WIF_privkey(WIF_privkey)\n ec_key = EC_KEY(privkey)\n return cls.from_pubkey(ec_key.GetPubKey(compressed))\n\n @classmethod\n def from_string(cls, string):\n '''Create from a hex string.'''\n return cls.from_pubkey(hex_to_bytes(string))\n\n @classmethod\n def validate(cls, pubkey):\n if not isinstance(pubkey, (bytes, bytearray)):\n raise TypeError('pubkey must be of bytes type, not {}'\n .format(type(pubkey)))\n if len(pubkey) == 33 and pubkey[0] in (2, 3):\n return # Compressed\n if len(pubkey) == 65 and pubkey[0] == 4:\n return # Uncompressed\n raise AddressError('invalid pubkey {}'.format(pubkey))\n\n @cachedproperty\n def address(self):\n '''Convert to an Address object.'''\n return Address(hash160(self.pubkey), Address.ADDR_P2PKH)\n\n def is_compressed(self):\n '''Returns True if the pubkey is compressed.'''\n return len(self.pubkey) == 33\n\n def to_ui_string(self):\n '''Convert to a hexadecimal string.'''\n return self.pubkey.hex()\n\n def to_storage_string(self):\n '''Convert to a hexadecimal string for storage.'''\n return self.pubkey.hex()\n\n def to_script(self):\n '''Note this returns the P2PK script.'''\n return Script.P2PK_script(self.pubkey)\n\n def to_script_hex(self):\n '''Return a script to pay to the address as a hex string.'''\n return self.to_script().hex()\n\n def to_scripthash(self):\n '''Returns the hash of the script in binary.'''\n return sha256(self.to_script())\n\n def to_scripthash_hex(self):\n '''Like other bitcoin hashes this is reversed when written in hex.'''\n return hash_to_hex_str(self.to_scripthash())\n\n def to_P2PKH_script(self):\n '''Return a P2PKH script.'''\n return self.address.to_script()\n\n def __str__(self):\n return self.to_ui_string()\n\n def __repr__(self):\n return ''.format(self.__str__())\n\n\nclass ScriptOutput(namedtuple(\"ScriptAddressTuple\", \"script\")):\n\n @classmethod\n def from_string(self, string):\n '''Instantiate from a mixture of opcodes and raw data.'''\n script = bytearray()\n for word in string.split():\n if word.startswith('OP_'):\n try:\n opcode = OpCodes[word]\n except KeyError:\n raise AddressError('unknown opcode {}'.format(word))\n script.append(opcode)\n elif word.lower().startswith(''):\n script.extend([ OpCodes.OP_PUSHDATA1, OpCodes.OP_0 ])\n else:\n import binascii\n script.extend(Script.push_data(binascii.unhexlify(word)))\n return ScriptOutput.protocol_factory(bytes(script))\n\n def to_ui_string(self, hex_only = False):\n '''Convert to user-readable OP-codes (plus text), eg OP_RETURN (12) \"Hello there!\"\n Or, to a hexadecimal string if that fails.\n Note that this function is the inverse of from_string() only if called with hex_only = True!'''\n if self.script and not hex_only:\n try:\n ret = ''\n ops = Script.get_ops(self.script)\n def lookup(x):\n try:\n return OpCodes(x).name\n except ValueError:\n return '('+str(x)+')'\n for op in ops:\n if ret: ret += \", \"\n if isinstance(op, tuple):\n if op[1] is None:\n ret += \"\"\n else:\n if hex_only:\n friendlystring = None\n else:\n # Attempt to make a friendly string, or fail to hex\n try:\n # Ascii only\n friendlystring = op[1].decode('ascii') # raises UnicodeDecodeError with bytes > 127.\n\n # Count ugly characters (that need escaping in python strings' repr())\n uglies = 0\n for b in op[1]:\n if b < 0x20 or b == 0x7f:\n uglies += 1\n # Less than half of characters may be ugly.\n if 2*uglies >= len(op[1]):\n friendlystring = None\n except UnicodeDecodeError:\n friendlystring = None\n\n if friendlystring is None:\n ret += lookup(op[0]) + \" \" + op[1].hex()\n else:\n ret += lookup(op[0]) + \" \" + repr(friendlystring)\n elif isinstance(op, int):\n ret += lookup(op)\n else:\n ret += '[' + (op.hex() if isinstance(op, bytes) else str(op)) + ']'\n return ret\n except ScriptError:\n # Truncated script -- so just default to normal 'hex' encoding below.\n pass\n return self.script.hex()\n\n def to_script(self):\n return self.script\n\n def is_opreturn(self):\n ''' Returns True iff this script is an OP_RETURN script (starts with\n the OP_RETURN byte)'''\n return bool(self.script and self.script[0] == OpCodes.OP_RETURN)\n\n def __str__(self):\n return self.to_ui_string(True)\n\n def __repr__(self):\n return ''.format(self.__str__())\n\n\n ###########################################\n # Protocol system methods and class attrs #\n ###########################################\n\n # subclasses of ScriptOutput that handle protocols. Currently this will\n # contain a cashacct.ScriptOutput instance.\n #\n # NOTE: All subclasses of this class must be hashable. Please implement\n # __hash__ for any subclasses. (This is because our is_mine cache in\n # wallet.py assumes all possible types that pass through it are hashable).\n #\n protocol_classes = set()\n\n def make_complete(self, block_height=None, block_hash=None, txid=None):\n ''' Subclasses implement this, noop here. '''\n pass\n\n def is_complete(self):\n ''' Subclasses implement this, noop here. '''\n return True\n\n @classmethod\n def find_protocol_class(cls, script_bytes):\n ''' Scans the protocol_classes set, and if the passed-in script matches\n a known protocol, returns that class, otherwise returns our class. '''\n for c in cls.protocol_classes:\n if c.protocol_match(script_bytes):\n return c\n return __class__\n\n @staticmethod\n def protocol_factory(script):\n ''' One shot -- find the right class and construct object based on script '''\n return __class__.find_protocol_class(script)(script)\n\n\n# A namedtuple for easy comparison and unique hashing\nclass Address(namedtuple(\"AddressTuple\", \"hash160 kind\")):\n\n # Address kinds\n ADDR_P2PKH = 0\n ADDR_P2SH = 1\n\n # Address formats\n FMT_CASHADDR = 0\n FMT_LEGACY = 1\n FMT_BITPAY = 2 # Supported temporarily only for compatibility\n FMT_SLPADDR = 3\n\n _NUM_FMTS = 4 # <-- Be sure to update this to be 1+ last format above!\n\n # Default to CashAddr using 'simpleledger' or 'slptest' prefix\n FMT_UI = FMT_SLPADDR\n\n def __new__(cls, hash160, kind):\n assert kind in (cls.ADDR_P2PKH, cls.ADDR_P2SH)\n hash160 = to_bytes(hash160)\n assert len(hash160) == 20, \"hash must be 20 bytes\"\n ret = super().__new__(cls, hash160, kind)\n ret._addr2str_cache = [None] * cls._NUM_FMTS\n return ret\n\n @classmethod\n def show_cashaddr(cls, format):\n cls.FMT_UI = format\n\n\n @classmethod\n def from_cashaddr_string(cls, string, *, net=None):\n '''Construct from a cashaddress string.'''\n if net is None: net = networks.net\n prefix = net.CASHADDR_PREFIX\n if string.upper() == string:\n prefix = prefix.upper()\n if ':' not in string:\n string = ':'.join([prefix, string])\n addr_prefix, kind, addr_hash = cashaddr.decode(string)\n if addr_prefix != prefix:\n raise AddressError('address has unexpected prefix {}'\n .format(addr_prefix))\n if kind == cashaddr.PUBKEY_TYPE:\n return cls(addr_hash, cls.ADDR_P2PKH)\n elif kind == cashaddr.SCRIPT_TYPE:\n return cls(addr_hash, cls.ADDR_P2SH)\n else:\n raise AddressError('address has unexpected kind {}'.format(kind))\n\n @classmethod\n def from_slpaddr_string(cls, string, *, net=None):\n '''Construct from a slpaddress string.'''\n if net is None: net = networks.net\n prefix = net.SLPADDR_PREFIX\n if string.upper() == string:\n prefix = prefix.upper()\n if ':' not in string:\n string = ':'.join([prefix, string])\n addr_prefix, kind, addr_hash = cashaddr.decode(string)\n if addr_prefix != prefix:\n raise AddressError('address has unexpected prefix {}'\n .format(addr_prefix))\n if kind == cashaddr.PUBKEY_TYPE:\n return cls(addr_hash, cls.ADDR_P2PKH)\n elif kind == cashaddr.SCRIPT_TYPE:\n return cls(addr_hash, cls.ADDR_P2SH)\n else:\n raise AddressError('address has unexpected kind {}'.format(kind))\n\n @classmethod\n def from_string(cls, string, *, net=None):\n '''Construct from an address string.'''\n if net is None: net = networks.net\n if len(string) > 35:\n try:\n try:\n return cls.from_slpaddr_string(string, net=net)\n except:\n return cls.from_cashaddr_string(string, net=net)\n except ValueError as e:\n raise AddressError(str(e))\n\n try:\n raw = Base58.decode_check(string)\n except Base58Error as e:\n raise AddressError(str(e))\n\n # Require version byte(s) plus hash160.\n if len(raw) != 21:\n raise AddressError('invalid address: {}'.format(string))\n\n verbyte, hash160 = raw[0], raw[1:]\n if verbyte in [net.ADDRTYPE_P2PKH,\n net.ADDRTYPE_P2PKH_BITPAY]:\n kind = cls.ADDR_P2PKH\n elif verbyte in [net.ADDRTYPE_P2SH,\n net.ADDRTYPE_P2SH_BITPAY]:\n kind = cls.ADDR_P2SH\n else:\n raise AddressError('unknown version byte: {}'.format(verbyte))\n\n return cls(hash160, kind)\n\n @classmethod\n def prefix_from_address_string(cls, string, *, net=None):\n '''Get address prefix from address string which may be missing the prefix.'''\n if net is None: net = networks.net\n if len(string) > 35:\n try:\n cls.from_slpaddr_string(string, net=net)\n return net.SLPADDR_PREFIX\n except:\n pass\n try:\n cls.from_cashaddr_string(string, net=net)\n return net.CASHADDR_PREFIX\n except:\n pass\n return ''\n\n @classmethod\n def is_valid(cls, string, *, net=None):\n if net is None: net = networks.net\n try:\n cls.from_string(string, net=net)\n return True\n except Exception:\n return False\n\n @classmethod\n def from_strings(cls, strings, *, net=None):\n '''Construct a list from an iterable of strings.'''\n if net is None: net = networks.net\n return [cls.from_string(string, net=net) for string in strings]\n\n @classmethod\n def from_pubkey(cls, pubkey):\n '''Returns a P2PKH address from a public key. The public key can\n be bytes or a hex string.'''\n if isinstance(pubkey, str):\n pubkey = hex_to_bytes(pubkey)\n PublicKey.validate(pubkey)\n return cls(hash160(pubkey), cls.ADDR_P2PKH)\n\n @classmethod\n def from_P2PKH_hash(cls, hash160):\n '''Construct from a P2PKH hash160.'''\n return cls(hash160, cls.ADDR_P2PKH)\n\n @classmethod\n def from_P2SH_hash(cls, hash160):\n '''Construct from a P2PKH hash160.'''\n return cls(hash160, cls.ADDR_P2SH)\n\n @classmethod\n def from_multisig_script(cls, script):\n return cls(hash160(script), cls.ADDR_P2SH)\n\n @classmethod\n def to_strings(cls, fmt, addrs, *, net=None):\n '''Construct a list of strings from an iterable of Address objects.'''\n if net is None: net = networks.net\n return [addr.to_string(fmt, net=net) for addr in addrs]\n\n @staticmethod\n def is_legacy(address: str, net=None) -> bool:\n \"\"\"Find if the string of the address is in legacy format\"\"\"\n if net is None:\n net = networks.net\n try:\n raw = Base58.decode_check(address)\n except Base58Error:\n return False\n\n if len(raw) != 21:\n return False\n\n verbyte = raw[0]\n legacy_formats = (\n net.ADDRTYPE_P2PKH,\n net.ADDRTYPE_P2PKH_BITPAY,\n net.ADDRTYPE_P2SH,\n net.ADDRTYPE_P2SH_BITPAY,\n )\n return verbyte in legacy_formats\n\n def to_cashaddr(self, *, net=None):\n if net is None: net = networks.net\n if self.kind == self.ADDR_P2PKH:\n kind = cashaddr.PUBKEY_TYPE\n else:\n kind = cashaddr.SCRIPT_TYPE\n return cashaddr.encode(net.CASHADDR_PREFIX, kind, self.hash160)\n\n def to_slpaddr(self, *, net=None):\n if net is None: net = networks.net\n if self.kind == self.ADDR_P2PKH:\n kind = cashaddr.PUBKEY_TYPE\n else:\n kind = cashaddr.SCRIPT_TYPE\n return cashaddr.encode(net.SLPADDR_PREFIX, kind, self.hash160)\n\n def to_string(self, fmt, *, net=None):\n '''Converts to a string of the given format.'''\n if net is None: net = networks.net\n if net is networks.net:\n try:\n cached = self._addr2str_cache[fmt]\n if cached:\n return cached\n except (IndexError, TypeError):\n raise AddressError('unrecognised format')\n\n try:\n cached = None\n\n if fmt == self.FMT_CASHADDR:\n cached = self.to_cashaddr(net=net)\n return cached\n\n if fmt == self.FMT_SLPADDR:\n cached = self.to_slpaddr(net=net)\n return cached\n\n if fmt == self.FMT_LEGACY:\n if self.kind == self.ADDR_P2PKH:\n verbyte = net.ADDRTYPE_P2PKH\n else:\n verbyte = net.ADDRTYPE_P2SH\n elif fmt == self.FMT_BITPAY:\n if self.kind == self.ADDR_P2PKH:\n verbyte = net.ADDRTYPE_P2PKH_BITPAY\n else:\n verbyte = net.ADDRTYPE_P2SH_BITPAY\n else:\n # This should never be reached due to cache-lookup check above. But leaving it in as it's a harmless sanity check.\n raise AddressError('unrecognised format')\n\n cached = Base58.encode_check(bytes([verbyte]) + self.hash160)\n return cached\n finally:\n if cached and net is networks.net:\n self._addr2str_cache[fmt] = cached\n\n def to_full_string(self, fmt, *, net=None):\n '''Convert to text, with a URI prefix for cashaddr format.'''\n if net is None: net = networks.net\n text = self.to_string(fmt, net=net)\n if fmt == self.FMT_CASHADDR:\n text = ':'.join([net.CASHADDR_PREFIX, text])\n if fmt == self.FMT_SLPADDR:\n text = ':'.join([net.SLPADDR_PREFIX, text])\n return text\n\n def to_ui_string(self, *, net=None):\n '''Convert to text in the current UI format choice.'''\n if net is None: net = networks.net\n return self.to_string(self.FMT_UI, net=net)\n\n def to_full_ui_string(self, *, net=None):\n '''Convert to text, with a URI prefix if cashaddr.'''\n if net is None: net = networks.net\n return self.to_full_string(self.FMT_UI, net=net)\n\n def to_URI_components(self, *, net=None):\n '''Returns a (scheme, path) pair for building a URI.'''\n if net is None: net = networks.net\n scheme = net.CASHADDR_PREFIX\n scheme2 = net.SLPADDR_PREFIX\n path = self.to_ui_string(net=net)\n if self.FMT_UI == self.FMT_SLPADDR:\n scheme = scheme2\n return scheme, path\n\n def to_storage_string(self, *, net=None):\n '''Convert to text in the storage format.'''\n if net is None: net = networks.net\n return self.to_string(self.FMT_LEGACY, net=net)\n\n def to_script(self):\n '''Return a binary script to pay to the address.'''\n if self.kind == self.ADDR_P2PKH:\n return Script.P2PKH_script(self.hash160)\n else:\n return Script.P2SH_script(self.hash160)\n\n def to_script_hex(self):\n '''Return a script to pay to the address as a hex string.'''\n return self.to_script().hex()\n\n def to_scripthash(self):\n '''Returns the hash of the script in binary.'''\n return sha256(self.to_script())\n\n def to_scripthash_hex(self):\n '''Like other bitcoin hashes this is reversed when written in hex.'''\n return hash_to_hex_str(self.to_scripthash())\n\n def __str__(self):\n return self.to_ui_string()\n\n def __repr__(self):\n return '
'.format(self.__str__())\n\n\ndef _match_ops(ops, pattern):\n if len(ops) != len(pattern):\n return False\n for op, pop in zip(ops, pattern):\n if pop != op:\n # -1 means 'data push', whose op is an (op, data) tuple\n if pop == -1 and isinstance(op, tuple):\n continue\n return False\n\n return True\n\n\nclass Script:\n\n @classmethod\n def P2SH_script(cls, hash160):\n assert len(hash160) == 20\n return P2SH_prefix + hash160 + P2SH_suffix\n\n @classmethod\n def P2PKH_script(cls, hash160):\n assert len(hash160) == 20\n return P2PKH_prefix + hash160 + P2PKH_suffix\n\n @classmethod\n def P2PK_script(cls, pubkey):\n return cls.push_data(pubkey) + bytes([OpCodes.OP_CHECKSIG])\n\n @classmethod\n def multisig_script(cls, m, pubkeys):\n '''Returns the script for a pay-to-multisig transaction.'''\n n = len(pubkeys)\n if not 1 <= m <= n <= 15:\n raise ScriptError('{:d} of {:d} multisig script not possible'\n .format(m, n))\n for pubkey in pubkeys:\n PublicKey.validate(pubkey) # Can be compressed or not\n # See https://bitcoin.org/en/developer-guide\n # 2 of 3 is: OP_2 pubkey1 pubkey2 pubkey3 OP_3 OP_CHECKMULTISIG\n return (bytes([OpCodes.OP_1 + m - 1])\n + b''.join(cls.push_data(pubkey) for pubkey in pubkeys)\n + bytes([OpCodes.OP_1 + n - 1, OpCodes.OP_CHECKMULTISIG]))\n\n @classmethod\n def push_data(cls, data):\n '''Returns the OpCodes to push the data on the stack.'''\n assert isinstance(data, (bytes, bytearray))\n\n n = len(data)\n if n < OpCodes.OP_PUSHDATA1:\n return bytes([n]) + data\n if n < 256:\n return bytes([OpCodes.OP_PUSHDATA1, n]) + data\n if n < 65536:\n return bytes([OpCodes.OP_PUSHDATA2]) + struct.pack(' len(script):\n raise IndexError\n if dlen > 0:\n op = (op, script[n:n + dlen])\n else:\n op = (op, None)\n n += dlen\n\n ops.append(op)\n except Exception:\n # Truncated script; e.g. tx_hash\n # ebc9fa1196a59e192352d76c0f6e73167046b9d37b8302b6bb6968dfd279b767\n raise ScriptError('truncated script')\n\n return ops\n\n\nclass Base58Error(Exception):\n '''Exception used for Base58 errors.'''\n\n\nclass Base58:\n '''Class providing base 58 functionality.'''\n\n chars = '123456789ABCDEFGHJKLMNPQRSTUVWXYZabcdefghijkmnopqrstuvwxyz'\n assert len(chars) == 58\n cmap = {c: n for n, c in enumerate(chars)}\n\n @staticmethod\n def char_value(c):\n val = Base58.cmap.get(c)\n if val is None:\n raise Base58Error('invalid base 58 character \"{}\"'.format(c))\n return val\n\n @staticmethod\n def decode(txt):\n \"\"\"Decodes txt into a big-endian bytearray.\"\"\"\n if not isinstance(txt, str):\n raise TypeError('a string is required')\n\n if not txt:\n raise Base58Error('string cannot be empty')\n\n value = 0\n for c in txt:\n value = value * 58 + Base58.char_value(c)\n\n result = int_to_bytes(value)\n\n # Prepend leading zero bytes if necessary\n count = 0\n for c in txt:\n if c != '1':\n break\n count += 1\n if count:\n result = bytes(count) + result\n\n return result\n\n @staticmethod\n def encode(be_bytes):\n \"\"\"Converts a big-endian bytearray into a base58 string.\"\"\"\n value = bytes_to_int(be_bytes)\n\n txt = ''\n while value:\n value, mod = divmod(value, 58)\n txt += Base58.chars[mod]\n\n for byte in be_bytes:\n if byte != 0:\n break\n txt += '1'\n\n return txt[::-1]\n\n @staticmethod\n def decode_check(txt):\n '''Decodes a Base58Check-encoded string to a payload. The version\n prefixes it.'''\n be_bytes = Base58.decode(txt)\n result, check = be_bytes[:-4], be_bytes[-4:]\n if check != double_sha256(result)[:4]:\n raise Base58Error('invalid base 58 checksum for {}'.format(txt))\n return result\n\n @staticmethod\n def encode_check(payload):\n \"\"\"Encodes a payload bytearray (which includes the version byte(s))\n into a Base58Check string.\"\"\"\n be_bytes = payload + double_sha256(payload)[:4]\n return Base58.encode(be_bytes)\n","sub_path":"electroncash/address.py","file_name":"address.py","file_ext":"py","file_size_in_byte":32170,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"537893129","text":"from flask_restful import Resource\nfrom flask import Response\nfrom Phone import Control\nimport json, requests\n\nclass v3_00_Config_Launch_Control(object):\n controller = None\n\n def __init__(self):\n self.controller = Control.global_controller\n\n\n def launch(self, app=None, json_string=None):\n success = 'success'\n status = '200'\n message = 'App Launched'\n data = None\n\n try:\n self.controller.log('Application launch detected.', screen=False)\n if json_string == None\\\n or json_string == ''\\\n or app == None\\\n or app == '':\n raise KeyError('Badly formed request. There was no JSON!')\n\n json_data = json.loads(json_string)\n key = json_data['key']\n\n if not key == '1234-5678-9012-3456':\n raise ValueError('Control key incorrect.')\n\n self.controller.log('Check if monitor app is configured.',\n screen=False)\n monitor_app = self.controller.get_value('monitor_app')\n if monitor_app == [] or monitor_app == None:\n self.controller.log('No monitor app, so ignore.',\n screen=False)\n data = {'app':app, 'state':'launched'}\n else:\n self.controller.log('Monitor app is configured, so notify.',\n screen=False)\n return_status = self.notify_monitor(app, monitor_app)\n if 'error' in return_status:\n status_code = return_status['status']\n if status_code == 500:\n raise requests.exceptions.ConnectionError()\n elif status_code == 400:\n raise KeyError()\n elif status_code == 400:\n raise ValueError(return_status['message'])\n else:\n raise Exception(return_status['message'])\n else:\n data = return_status\n except requests.exceptions.ConnectionError as rce:\n return {'success':'error',\n 'status':500,\n 'message':'Phone cannot communicate with the monitor '+\\\n 'app running at {0}'.format(monitor_app)+\\\n '; the response from the monitor app was'+\\\n ' a connection error: {0}'.format(str(rce))+'.'\n }\n except KeyError as ke:\n success = 'error'\n status = '400'\n message = 'Key Error: {0}'.format(str(ke))\n except ValueError as ve:\n success = 'error'\n status = '403'\n message = 'Value Error: {0}'.format(str(ve))\n except Exception as e:\n success = 'error'\n status = '500'\n message = 'Exception: {0}'.format(str(ve))\n\n return_value = self.controller.do_response(message=message,\n data=data,\n status=status,\n response=success)\n\n return return_value\n\n\n def notify_monitor(self, app=None, monitor_app=None):\n try:\n monitor_app_url = monitor_app + '/launched/' + app\n\n payload = {\"key\":\"1234-5678-9012-3456\"}\n\n self.controller.log('Connecting to {0} with payload {1}'\\\n .format(monitor_app_url, payload),\n screen=False)\n request_response = requests.post(monitor_app_url,\n json.dumps(payload))\n\n status_code = request_response.status_code\n self.controller.log('Monitor App {0} returned status {1}'\\\n .format(monitor_app_url, status_code),\n screen=False)\n\n if status_code not in (200,201,404):\n raise ValueError('Unable to communicate with monitor app! '+\\\n 'Response code '+\\\n '{0}'.format(request_response.status_code)+\\\n ' with data payload '+\\\n '{0}.'.format(request_response.text)\n )\n else:\n if status_code == 404\\\n and not ('not being monitored' in str(request_response.text)):\n raise ValueError('Unable to communicate with monitor app.'+\\\n ' Response from request was {0} {1}.'\\\n .format(status_code, request_response.text)\n )\n json_response = request_response.json()\n if 'error' in json_response:\n raise ValueError(json_response['message'])\n\n return_value = {'app':app, 'state':'launched'}\n self.controller.log('Returning {0}'\\\n .format(return_value),\n screen=False)\n return return_value\n except requests.exceptions.ConnectionError as rce:\n return {'success':'error',\n 'status':500,\n 'message':'Phone cannot communicate with the monitor '+\\\n 'app running at {0}'.format(monitor_app)+\\\n '; the response from the monitor app was'+\\\n ' a connection error: {0}'.format(str(rce))+'.'\n }\n except KeyError as ke:\n return {'success':'error',\n 'status':400,\n 'message':'Badly formed request!'\n }\n except ValueError as ve:\n return {'success':'error',\n 'status':403,\n 'message':str(ve)\n }\n except Exception as e:\n return {'success':'error',\n 'status':500,\n 'message':repr(e)\n }\n\n\n","sub_path":"v3_00/Phone/Phone_Config_Control/v3_00_Config_Launch_Control.py","file_name":"v3_00_Config_Launch_Control.py","file_ext":"py","file_size_in_byte":6226,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"424381543","text":"from IPython.display import display\nimport pandas as pd\nimport os\nimport json\nimport warnings\nfrom .utils import prepare_vega_spec, prepare_vegalite_spec\n\ndef _jupyter_labextension_paths():\n return [{\n 'name': 'jupyterlab_vega',\n 'src': 'static',\n }]\n\ndef _jupyter_nbextension_paths():\n return [{\n 'section': 'notebook',\n 'src': 'static',\n 'dest': 'jupyterlab_vega',\n 'require': 'jupyterlab_vega/extension'\n }]\n\ndef _safe_exists(path):\n \"\"\"Check path, but don't let exceptions raise\"\"\"\n try:\n return os.path.exists(path)\n except Exception:\n return False\n \n\nclass Vega():\n \"\"\"A display class for displaying Vega visualizations in the Jupyter Notebook and IPython kernel.\n \n Vega expects a spec (a JSON-able dict) and data (dict) argument\n\n not already-serialized JSON strings.\n\n Scalar types (None, number, string) are not allowed, only dict containers.\n \"\"\"\n \n # wrap data in a property, which warns about passing already-serialized JSON\n _spec = None\n _data = None\n _read_flags = 'r'\n \n def __init__(self, spec=None, data=None, url=None, filename=None, metadata=None):\n \"\"\"Create a Vega display object given raw data.\n\n Parameters\n ----------\n spec : dict\n Vega spec. Not an already-serialized JSON string.\n data : dict\n A dict of Vega datasets where the key is the dataset name and the \n value is the data values. Not an already-serialized JSON string.\n Scalar types (None, number, string) are not allowed, only dict\n or list containers.\n url : unicode\n A URL to download the data from.\n filename : unicode\n Path to a local file to load the data from.\n metadata: dict\n Specify extra metadata to attach to the json display object.\n \"\"\"\n \n if spec is not None and isinstance(spec, str):\n if spec.startswith('http') and url is None:\n url = spec\n filename = None\n spec = None\n elif _safe_exists(spec) and filename is None:\n url = None\n filename = spec\n spec = None\n \n self.spec = spec\n self.data = data\n self.metadata = metadata\n self.url = url\n self.filename = filename\n \n self.reload()\n self._check_data()\n\n def reload(self):\n \"\"\"Reload the raw spec from file or URL.\"\"\"\n if self.filename is not None:\n with open(self.filename, self._read_flags) as f:\n self.spec = json.loads(f.read())\n elif self.url is not None:\n try:\n # Deferred import\n from urllib.request import urlopen\n response = urlopen(self.url)\n self.spec = response.read()\n # extract encoding from header, if there is one:\n encoding = None\n for sub in response.headers['content-type'].split(';'):\n sub = sub.strip()\n if sub.startswith('charset'):\n encoding = sub.split('=')[-1].strip()\n break\n # decode spec, if an encoding was specified\n if encoding:\n self.spec = self.spec.decode(encoding, 'replace')\n except:\n self.spec = None\n \n def _check_data(self):\n if self.spec is not None and not isinstance(self.spec, dict):\n raise TypeError(\"%s expects a JSONable dict, not %r\" % (self.__class__.__name__, self.spec))\n if self.data is not None and not isinstance(self.data, dict):\n raise TypeError(\"%s expects a dict, not %r\" % (self.__class__.__name__, self.data))\n\n @property\n def spec(self):\n return self._spec\n \n @property\n def data(self):\n return self._data\n \n @spec.setter\n def spec(self, spec):\n if isinstance(spec, str):\n # warnings.warn(\"%s expects a JSONable dict, not %r\" % (self.__class__.__name__, spec))\n spec = json.loads(spec)\n self._spec = spec\n\n @data.setter\n def data(self, data):\n if isinstance(data, str):\n # warnings.warn(\"%s expects a dict, not %r\" % (self.__class__.__name__, data))\n data = json.loads(data)\n self._data = data\n \n def _ipython_display_(self):\n bundle = {\n 'application/vnd.vega.v2+json': prepare_vega_spec(self.spec, self.data),\n 'text/plain': ''\n }\n display(bundle, raw=True) \n \n\nclass VegaLite(Vega):\n \"\"\"VegaLite expects a spec (a JSON-able dict) and data (JSON-able list or pandas DataFrame) argument\n\n not already-serialized JSON strings.\n\n Scalar types (None, number, string) are not allowed, only dict containers.\n \"\"\"\n \n def __init__(self, spec=None, data=None, url=None, filename=None, metadata=None):\n \"\"\"Create a VegaLite display object given raw data.\n\n Parameters\n ----------\n spec : dict\n VegaLite spec. Not an already-serialized JSON string.\n data : dict or list\n VegaLite data. Not an already-serialized JSON string.\n Scalar types (None, number, string) are not allowed, only dict\n or list containers.\n url : unicode\n A URL to download the data from.\n filename : unicode\n Path to a local file to load the data from.\n metadata: dict\n Specify extra metadata to attach to the json display object.\n \"\"\"\n \n super(VegaLite, self).__init__(spec=spec, data=data, url=url, filename=filename)\n\n def _check_data(self):\n if self.spec is not None and not isinstance(self.spec, dict):\n raise TypeError(\"%s expects a JSONable dict, not %r\" % (self.__class__.__name__, self.spec))\n if self.data is not None and not isinstance(self.data, (list, pd.DataFrame)):\n raise TypeError(\"%s expects a JSONable list or pandas DataFrame, not %r\" % (self.__class__.__name__, self.data))\n \n def _ipython_display_(self):\n bundle = {\n 'application/vnd.vegalite.v1+json': prepare_vegalite_spec(self.spec, self.data),\n 'text/plain': ''\n }\n display(bundle, metadata=metadata, raw=True) \n \n\nclass VegaLite(Vega):\n \"\"\"A display class for displaying Vega-lite visualizations in the Jupyter Notebook and IPython kernel.\n\n not already-serialized JSON strings.\n\n Scalar types (None, number, string) are not allowed, only dict containers.\n \"\"\"\n \n def __init__(self, spec=None, data=None, url=None, filename=None, metadata=None):\n \"\"\"Create a VegaLite display object given raw data.\n\n Parameters\n ----------\n spec : dict\n VegaLite spec. Not an already-serialized JSON string.\n data : dict or list\n VegaLite data. Not an already-serialized JSON string.\n Scalar types (None, number, string) are not allowed, only dict\n or list containers.\n url : unicode\n A URL to download the data from.\n filename : unicode\n Path to a local file to load the data from.\n metadata: dict\n Specify extra metadata to attach to the json display object.\n \"\"\"\n \n super(VegaLite, self).__init__(spec=spec, data=data, url=url, filename=filename)\n\n def _check_data(self):\n if self.spec is not None and not isinstance(self.spec, dict):\n raise TypeError(\"%s expects a JSONable dict, not %r\" % (self.__class__.__name__, self.spec))\n if self.data is not None and not isinstance(self.data, (list, pd.DataFrame)):\n raise TypeError(\"%s expects a JSONable list or pandas DataFrame, not %r\" % (self.__class__.__name__, self.data))\n \n def _ipython_display_(self):\n bundle = {\n 'application/vnd.vegalite.v1+json': prepare_vegalite_spec(self.spec, self.data),\n 'text/plain': ''\n }\n metadata = {\n 'application/vnd.vegalite.v1+json': self.metadata\n }\n display(bundle, metadata=metadata, raw=True)\n","sub_path":"jupyterlab_vega/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":8416,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"95109615","text":"from setuptools import find_packages\nfrom setuptools import setup\n\nREQUIRED_PACKAGES = [\n 'gcsfs==0.6.0',\n 'google-cloud-storage==1.26.0',\n 'pandas==0.24.2',\n 'scikit-learn==0.20.4']\n\nPACKAGE_NAME='Model2Weeks' # model folder name\nPACKAGE_DESCRIPTION='xgboost 2 weeks'\n\nsetup(name=PACKAGE_NAME,\n version='1.0',\n install_requires=REQUIRED_PACKAGES,\n packages=find_packages(),\n include_package_data=True,\n description=PACKAGE_DESCRIPTION)\n","sub_path":"models/train/gab-week-2/setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":487,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"72453427","text":"import pygame\nfrom controller.base_controller import BaseController\nfrom boat_simulation.simple import Action\nfrom boat_simulation.latlon import LatLon\nimport numpy as np\n\nfrom pygame.locals import (\n K_UP,\n K_DOWN,\n K_LEFT,\n K_RIGHT,\n K_ESCAPE,\n KEYDOWN,\n QUIT,\n)\n\n\nclass KeyboardController(BaseController):\n def __init__(self, in_sim=True):\n BaseController.__init__(self, \"Keyboard Controller\", handle_quit=False)\n self.curr_waypoint = 0\n self.in_sim = in_sim\n\n\n def get_distances(self, waypoint, boat_x, boat_y):\n x_targ, y_targ = waypoint[0], waypoint[1]\n x_curr, y_curr = boat_x, boat_y\n\n delta_x = LatLon.dist(LatLon(y_targ, x_curr), LatLon(y_targ, x_targ))\n if x_targ < x_curr:\n delta_x *= -1\n\n delta_y = LatLon.dist(LatLon(y_curr, x_targ), LatLon(y_targ, x_targ))\n if y_targ < y_curr:\n delta_y *= -1\n\n return delta_x, delta_y\n\n\n def get_required_angle_change(self, boat_angle, delta_x, delta_y):\n angle = (np.arctan2(-delta_x, -delta_y) * 180 / np.pi) - (boat_angle)\n return angle\n\n\n def select_action_from_state(self, env, state):\n\n if self.in_sim:\n env.set_waypoint(self.curr_waypoint)\n\n boat_x, boat_y, boat_speed, _, boat_angle, boat_ang_vel, ocean_current_x, ocean_current_y, obstacles = state\n\n waypoint = [env.waypoints[self.curr_waypoint].lon, env.waypoints[self.curr_waypoint].lat]\n dist = LatLon.dist(LatLon(boat_y, boat_x), LatLon(waypoint[1], waypoint[0]))\n\n if abs(dist) < 0.05:\n self.curr_waypoint = (self.curr_waypoint + 1) % len(env.waypoints)\n\n delta_x, delta_y = self.get_distances(waypoint, boat_x, boat_y)\n angle = self.get_required_angle_change(boat_angle, delta_x, delta_y)\n angle = angle % 360\n print(min(angle, angle - 360, key=abs))\n\n # Accelerating by specified values for one frame\n for event in pygame.event.get():\n if event.type == KEYDOWN:\n if event.key == K_ESCAPE or event.type == QUIT:\n env.close()\n if event.key == K_UP:\n return Action(0, 60)\n if event.key == K_DOWN:\n return Action(0, -60)\n if event.key == K_LEFT:\n return Action(1, 60*60)\n if event.key == K_RIGHT:\n return Action(1, -60*60)\n if event.type == QUIT:\n env.close()\n\n return Action(0, 0)\n","sub_path":"boat-test/controller/keyboard_controller.py","file_name":"keyboard_controller.py","file_ext":"py","file_size_in_byte":2546,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"319555929","text":"from itertools import combinations,permutations\r\narch = open('nombreArch.txt','w')\r\nb=list(permutations(['a','b','c','d','e','f','g','h'],8))\r\nc= []\r\ncont = 0\r\nfor i in b:\r\n string = ''.join(i)\r\n arch.write(string+'\\n')\r\n cont+=1\r\n if cont==11000:\r\n break\r\narch.close()\r\nprint(cont)\r\n ","sub_path":"Otros/prueba.py","file_name":"prueba.py","file_ext":"py","file_size_in_byte":307,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"58912575","text":"import numpy as np\nimport pandas as pd\nfrom six.moves import cPickle as pickle\n\nclass DataLoader(object):\n\n DEFAULT_VOCABULARY_SIZE = 20000\n\n def __init__(self, data_root, filename, num_epochs, batch_size, \n data_column, labels_column):\n\n self.__data_root = data_root\n self.__filename = filename\n \n self.num_epochs = num_epochs\n self.batch_size = batch_size\n self.data_column = data_column\n self.labels_column = labels_column\n\n self.current_batch = 0\n\n\n def load_data(self):\n self.source, self.labels = self.__read_file(self.__data_root, self.__filename)\n self.data_len = self.labels.shape[0]\n self.sequence_len = self.source.shape[1]\n self.vocabulary = pd.read_csv(self.__data_root + \"vocabulary_Reviews\", header=None)\n self.vocab_len = len(self.vocabulary)\n self.total_batch = int((self.data_len - 1) / self.batch_size) + 1\n \n def next_batch(self, shuffle=True):\n start = self.current_batch * self.batch_size\n end = min((self.current_batch + 1) * self.batch_size, self.data_len)\n self.current_batch = (self.current_batch + 1) % self.total_batch\n \n if shuffle and self.current_batch == 1:\n shuffle_idxs = np.random.permutation(self.data_len)\n self.source = self.source[shuffle_idxs]\n self.labels = self.labels[shuffle_idxs]\n\n return self.source[start:end], self.labels[start:end]\n\n def __read_file(self, path, filename):\n with open(path + filename, 'rb') as f:\n save = pickle.load(f)\n source = save['source']\n labels = save['labels']\n return source, labels","sub_path":"data/data_loader.py","file_name":"data_loader.py","file_ext":"py","file_size_in_byte":1714,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"151108463","text":"\n# Definition for a Node.\nclass Node:\n def __init__(self, val, next, random):\n self.val = val\n self.next = next\n self.random = random\n\nclass Solution:\n def copyRandomList(self, head: 'Node') -> 'Node':\n \n newHead = Node(0,None,None)\n node_hash = {}\n temp = head\n newTemp = newHead = Node(-1,None,None)\n \n while temp:\n node_hash[temp] = Node(temp.val,None,None)\n new_temp.next = node_hash[temp]\n temp = temp.next\n new_temp = new_temp.next\n \n \n \n ","sub_path":"leetcode/linkedlist/deepCopy.py","file_name":"deepCopy.py","file_ext":"py","file_size_in_byte":601,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"289813913","text":"from core.svm import svm\n\n\nif __name__ == '__main__':\n paths = dict()\n prefix = 'core/'\n paths['data_clean'] = prefix + 'data/train_clean.csv'\n paths['label'] = prefix + 'data/train_label.txt'\n paths['bow_feat'] = prefix + 'feat/svm/bow_train.pkl'\n paths['tfidf_feat'] = prefix + 'feat/svm/tfidf_train.pkl'\n paths['svm_line_bow'] = prefix + 'model/svm/line_bow.pkl'\n paths['svm_line_tfidf'] = prefix + 'model/svm/line_tfidf.pkl'\n paths['svm_rbf_bow'] = prefix + 'model/svm/rbf_bow.pkl'\n paths['svm_rbf_tfidf'] = prefix + 'model/svm/rbf_tfidf.pkl'\n svm(paths, 'line', 'bow', 'train')\n svm(paths, 'line', 'tfidf', 'train')\n svm(paths, 'rbf', 'bow', 'train')\n svm(paths, 'rbf', 'tfidf', 'train')\n paths['data_clean'] = prefix + 'data/dev_clean.csv'\n paths['label'] = prefix + 'data/dev_label.txt'\n paths['bow_feat'] = prefix + 'feat/svm/bow_dev.pkl'\n paths['tfidf_feat'] = prefix + 'feat/svm/tfidf_dev.pkl'\n svm(paths, 'line', 'bow', 'dev')\n svm(paths, 'line', 'tfidf', 'dev')\n svm(paths, 'rbf', 'bow', 'dev')\n svm(paths, 'rbf', 'tfidf', 'dev')\n","sub_path":"train_cpu.py","file_name":"train_cpu.py","file_ext":"py","file_size_in_byte":1108,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"358897842","text":"from django.urls import path\nfrom posts.views import *\n\nurlpatterns = [\n\n path('list/', PostListView.as_view(),name='post_list'),\n path('post/detail//', PostDetailView.as_view(), name='post_detail'),\n path('post/new/', CreatePostView.as_view(), name='post_new'),\n path('post//edit/', PostUpdateView.as_view(), name='post_edit'),\n path('post//remove/', PostDeleteView.as_view(), name='post_remove'),\n\n path('post//comment/', add_comment_to_post, name='add_comment_to_post'),\n path('comment//remove/', comment_remove, name='comment_remove'),\n\n path('', post_list, name=\"post_list\"),\n path('postdetail//', post_details, name='post_detail_json')\n]\n","sub_path":"posts/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":730,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"520911280","text":"import discord\nfrom discord.ext import commands\nfrom get_weather import get_weather\nimport datetime\n#import logging\nimport requests\nfrom get_wiki import get_wiki\n\n# INITIALIZATION\nTOKEN = ''\nbot = commands.Bot(command_prefix='!', description=\"A True Gentleman Unmatched\")\ntime = datetime.datetime.now()\nhour = time.hour\n\n# REMOVE DEFAULT HELP TEXT\nbot.remove_command('help')\n\n\n#####################################################\n# LOGGING\n#####################################################\n#logging.basicConfig(level=logging.INFO)\n#logger = logging.getLogger('discord')\n#logger.setLevel(logging.DEBUG)\n#handler = logging.FileHandler(filename='discord.log', encoding='utf-8', mode='w')\n#handler.setFormatter(logging.Formatter('%(asctime)s:%(levelname)s:%(name)s: %(message)s'))\n#logger.addHandler(handler)\n\n\n@bot.event\nasync def on_ready():\n print('Successfully Logged in')\n print(bot.user.name)\n print(bot.user.id)\n print('----------------------')\n\n\n@bot.command()\nasync def champ(*, champion: str):\n champion_lower = champion.lower()\n url = 'https://u.gg/lol/champions/'+champion_lower+'/build'\n r = requests.get(url)\n if r.status_code == 200:\n msg = 'Here you go, m\\'lady'+'\\n\\n'+r.url\n else:\n msg = 'Please give a champion when you call this command.(e.g. !op.gg aatrox)'\n await bot.say(msg)\n\n\n@bot.command(pass_context=True)\nasync def hello(ctx):\n if hour <= 12:\n msg = 'Good Morning, m\\'lady. You, {0.author.mention} are incredibly beautiful.'.format(ctx.message)\n elif 18 > hour > 12:\n msg = 'Good Afternoon, m\\'lady. You, {0.author.mention} light up my life like no one else.'.format(ctx.message)\n else:\n msg = 'Good Evening, m\\'lady. You, {0.author.mention} are a beautiful human.'.format(ctx.message)\n await bot.say(msg)\n\n\n@bot.command(pass_context=True)\nasync def help(ctx):\n msg = 'Hello there m\\'lady. Your smile is radiant today! Here is a list of current available commands: \\n\\n' \\\n '```css\\n' \\\n '!champ [champion] : Receive some assistance with your runes and builds for a league champion\\n\\n' \\\n '!hello : Say hello to Zachary\\n\\n' \\\n '!help : Receive this instructional text box\\n\\n' \\\n '!weather [city] : Be awed by some wonderful weather knowledge from the infamous weather man himself\\n\\n' \\\n '!smartyzach : Become enlightened by Zach\\'s true and endless intelligence\\n\\n```'\n await bot.say(msg)\n\n\n@bot.command()\nasync def weather(*, city: str):\n report = get_weather(city)\n temp = report[1]\n humidity = report[2]\n\n await bot.say('Well so uhhh... if you lookie here..')\n\n\n#@bot.command()\n#async def smartyzach():\n\n#@bot.command_not_found()\n#async def !():\n## msg = 'Sorry, that command does not exist. Maddame, try using the !help command.'\n# await bot.say(msg)\n\n\nbot.run(TOKEN)\n\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2866,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"473098892","text":"from createtables import create_tables\nfrom settings import figure_out\nfrom pptx.util import Inches\nfrom pptx.util import Pt\n\ndef conversion_slides(prs, slide_layout, metrics, df_c_tab, df_c_12,\n conv_header_lef, conv_header_top, conv_header_wid, conv_header_hei,\n conv_table1_lef, conv_table1_top, conv_table1_wid, conv_table1_hei,\n conv_table2_lef, conv_table2_top, conv_table2_wid, conv_table2_hei):\n\n for i,element in enumerate(metrics):\n slide = prs.slides.add_slide(slide_layout)\n shapes = slide.shapes\n\n # Text headers for conversion tables\n lx, tx = Inches(conv_header_lef), Inches(conv_header_top)\n wx, hx = Inches(conv_header_wid), Inches(conv_header_hei)\n\n txBox = slide.shapes.add_textbox(lx, tx, wx, hx)\n tf = txBox.text_frame\n\n tf.text = element[0]\n\n p = tf.add_paragraph()\n p.text = \"Current Dates: \" + str(element[1]) + ' - ' + str(element[2])\n p.font.size = Pt(12)\n\n # Insert conversion tables\n get_row = df_c_tab[df_c_tab['KeyPerformanceIndicator'] == element[0]]\n\n columns = get_row.dropna(axis=1).columns.tolist()\n\n create_tables(slide, shapes, get_row.iloc[:,4:], columns[4:], conv_table1_lef, conv_table1_top,\n conv_table1_wid, conv_table1_hei, fs=10, flag=('all',0.76), numformat=None)\n\n # Insert conversion tables\n get_row = df_c_12[df_c_12['KeyPerformanceIndicator'] == element[0]]\n\n columns = get_row.columns.tolist()\n\n create_tables(slide, shapes, get_row.iloc[:,3:], columns[3:], conv_table2_lef, conv_table2_top,\n conv_table2_wid, conv_table2_hei, fs=10, flag=('all',0.76), numformat=get_row['ColumnDataType'])\n\n # Insert conversion figures\n top = Inches(2.0)\n lef = Inches(0.35)\n hei = Inches(3.5)\n slide.shapes.add_picture(figure_out + 'figure_conversion_' + str(i) + '.png', lef, top, height=hei)","sub_path":"generateDeck/trafficConversion_v2.0/conversionslides.py","file_name":"conversionslides.py","file_ext":"py","file_size_in_byte":1988,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"569951293","text":"# Copyright (c) 2003-2015 by Mike Jarvis\n#\n# TreeCorr is free software: redistribution and use in source and binary forms,\n# with or without modification, are permitted provided that the following\n# conditions are met:\n#\n# 1. Redistributions of source code must retain the above copyright notice, this\n# list of conditions, and the disclaimer given in the accompanying LICENSE\n# file.\n# 2. Redistributions in binary form must reproduce the above copyright notice,\n# this list of conditions, and the disclaimer given in the documentation\n# and/or other materials provided with the distribution.\n\nfrom __future__ import print_function\nimport numpy\nimport treecorr\nimport os\n\nfrom test_helper import get_script_name\n\ndef test_constant():\n # A fairly trivial test is to use a constant value of kappa everywhere.\n\n ngal = 100000\n A = 0.05\n L = 100.\n numpy.random.seed(8675309)\n x = (numpy.random.random_sample(ngal)-0.5) * L\n y = (numpy.random.random_sample(ngal)-0.5) * L\n kappa = A * numpy.ones(ngal)\n\n cat = treecorr.Catalog(x=x, y=y, k=kappa, x_units='arcmin', y_units='arcmin')\n kk = treecorr.KKCorrelation(bin_size=0.1, min_sep=0.1, max_sep=10., sep_units='arcmin')\n kk.process(cat)\n print('kk.xi = ',kk.xi)\n numpy.testing.assert_almost_equal(kk.xi, A**2, decimal=10)\n\n # Now add some noise to the values. It should still work, but at slightly lower accuracy.\n kappa += 0.001 * (numpy.random.random_sample(ngal)-0.5)\n cat = treecorr.Catalog(x=x, y=y, k=kappa, x_units='arcmin', y_units='arcmin')\n kk.process(cat)\n print('kk.xi = ',kk.xi)\n numpy.testing.assert_almost_equal(kk.xi, A**2, decimal=6)\n\n\ndef test_kk():\n # cf. http://adsabs.harvard.edu/abs/2002A%26A...389..729S for the basic formulae I use here.\n #\n # Use kappa(r) = A exp(-r^2/2s^2)\n #\n # The Fourier transform is: kappa~(k) = 2 pi A s^2 exp(-s^2 k^2/2) / L^2\n # P(k) = (1/2pi) <|kappa~(k)|^2> = 2 pi A^2 (s/L)^4 exp(-s^2 k^2)\n # xi(r) = (1/2pi) int( dk k P(k) J0(kr) ) \n # = pi A^2 (s/L)^2 exp(-r^2/2s^2/4)\n # Note: I'm not sure I handled the L factors correctly, but the units at the end need\n # to be kappa^2, so it needs to be (s/L)^2. \n\n\n ngal = 1000000\n A = 0.05\n s = 10.\n L = 30. * s # Not infinity, so this introduces some error. Our integrals were to infinity.\n numpy.random.seed(8675309)\n x = (numpy.random.random_sample(ngal)-0.5) * L\n y = (numpy.random.random_sample(ngal)-0.5) * L\n r2 = (x**2 + y**2)/s**2\n kappa = A * numpy.exp(-r2/2.)\n\n cat = treecorr.Catalog(x=x, y=y, k=kappa, x_units='arcmin', y_units='arcmin')\n kk = treecorr.KKCorrelation(bin_size=0.1, min_sep=1., max_sep=50., sep_units='arcmin',\n verbose=1)\n kk.process(cat)\n\n # log() != , but it should be close:\n print('meanlogr - log(meanr) = ',kk.meanlogr - numpy.log(kk.meanr))\n numpy.testing.assert_almost_equal(kk.meanlogr, numpy.log(kk.meanr), decimal=3)\n\n r = kk.meanr\n true_xi = numpy.pi * A**2 * (s/L)**2 * numpy.exp(-0.25*r**2/s**2)\n print('kk.xi = ',kk.xi)\n print('true_xi = ',true_xi)\n print('ratio = ',kk.xi / true_xi)\n print('diff = ',kk.xi - true_xi)\n print('max diff = ',max(abs(kk.xi - true_xi)))\n assert max(abs(kk.xi - true_xi)) < 5.e-7\n\n # It should also work as a cross-correlation of this cat with itself\n kk.process(cat,cat)\n numpy.testing.assert_almost_equal(kk.meanlogr, numpy.log(kk.meanr), decimal=3)\n assert max(abs(kk.xi - true_xi)) < 5.e-7\n\n # Check that we get the same result using the corr2 executable:\n if __name__ == '__main__':\n cat.write(os.path.join('data','kk.dat'))\n import subprocess\n corr2_exe = get_script_name('corr2')\n p = subprocess.Popen( [corr2_exe,\"kk.params\"] )\n p.communicate()\n corr2_output = numpy.genfromtxt(os.path.join('output','kk.out'), names=True)\n print('kk.xi = ',kk.xi)\n print('from corr2 output = ',corr2_output['xi'])\n print('ratio = ',corr2_output['xi']/kk.xi)\n print('diff = ',corr2_output['xi']-kk.xi)\n numpy.testing.assert_almost_equal(corr2_output['xi']/kk.xi, 1., decimal=3)\n\n # Check the fits write option\n out_file_name = os.path.join('output','kk_out.fits')\n kk.write(out_file_name)\n try:\n import fitsio\n data = fitsio.read(out_file_name)\n numpy.testing.assert_almost_equal(data['R_nom'], numpy.exp(kk.logr))\n numpy.testing.assert_almost_equal(data['meanR'], kk.meanr)\n numpy.testing.assert_almost_equal(data['meanlogR'], kk.meanlogr)\n numpy.testing.assert_almost_equal(data['xi'], kk.xi)\n numpy.testing.assert_almost_equal(data['sigma_xi'], numpy.sqrt(kk.varxi))\n numpy.testing.assert_almost_equal(data['weight'], kk.weight)\n numpy.testing.assert_almost_equal(data['npairs'], kk.npairs)\n except ImportError:\n print('Unable to import fitsio. Skipping fits tests.')\n\n # Check the read function\n kk2 = treecorr.KKCorrelation(bin_size=0.1, min_sep=1., max_sep=100., sep_units='arcmin')\n kk2.read(out_file_name)\n numpy.testing.assert_almost_equal(kk2.logr, kk.logr)\n numpy.testing.assert_almost_equal(kk2.meanr, kk.meanr)\n numpy.testing.assert_almost_equal(kk2.meanlogr, kk.meanlogr)\n numpy.testing.assert_almost_equal(kk2.xi, kk.xi)\n numpy.testing.assert_almost_equal(kk2.varxi, kk.varxi)\n numpy.testing.assert_almost_equal(kk2.weight, kk.weight)\n numpy.testing.assert_almost_equal(kk2.npairs, kk.npairs)\n\n\nif __name__ == '__main__':\n test_constant()\n test_kk()\n","sub_path":"tests/test_kk.py","file_name":"test_kk.py","file_ext":"py","file_size_in_byte":5603,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"526919347","text":"# -*- coding: utf-8 -*-\nimport json\nimport re\n\nfrom requests.exceptions import ConnectionError, HTTPError\n\nfrom .services import update_component\n\n\ndef post_save(model, os_path, contents_manager, **kwargs):\n \"\"\"Send notebooks to PlatIAgro Projects API after save.\"\"\"\n # only do this for notebooks\n if model[\"type\"] != \"notebook\":\n return\n\n match = re.search(r\"components/(.*?)/(Experiment|Deployment).ipynb\", os_path)\n\n if match:\n component_id = match.group(1)\n notebook_type = match.group(2)\n\n with open(os_path) as f:\n notebook = json.load(f)\n\n try:\n if notebook_type == \"Experiment\":\n update_component(component_id, experiment_notebook=notebook)\n else:\n update_component(component_id, deployment_notebook=notebook)\n except (ConnectionError, HTTPError) as e:\n print(str(e))\n\n\ndef setup_hooks(web_app):\n web_app.settings[\"contents_manager\"].post_save_hook = post_save\n","sub_path":"jupyterlab_extension/hooks.py","file_name":"hooks.py","file_ext":"py","file_size_in_byte":1003,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"651816033","text":"import os.path as Path\r\nimport sqlite3\r\n\r\n\r\nSQL_SELECT_ALL = '''SELECT\r\n id, todo_date, descr, status\r\n FROM\r\n todolist\r\n'''\r\nSQL_SELECT_TODO_BY_ID = SQL_SELECT_ALL + ' WHERE id=?'\r\nSQL_SELECT_ALL_TODO = SQL_SELECT_ALL + ' WHERE todo_date>=? AND deleted!=? AND status=? ORDER BY todo_date'\r\nSQL_INSERT_NEW_TODO = '''INSERT INTO\r\n todolist (todo_date, descr, status, deleted)\r\n VALUES (?, ?, 'WORK', 'N')\r\n'''\r\nSQL_UPDATE_TODO_DESCR = 'UPDATE todolist SET descr=? WHERE id=?'\r\nSQL_RESTART_TODO = 'UPDATE todolist SET todo_date=?, status=? WHERE id=?'\r\nSQL_UPDATE_TODO_STATUS = 'UPDATE todolist SET status=? WHERE id=?'\r\nSQL_HIDE_TODO = 'UPDATE todolist SET deleted=? WHERE id=?'\r\nSQL_CLEAN_DATABASE = 'DELETE * FROM todolist'\r\n\r\n\r\ndef connect(db_name=None):\r\n \"\"\"выполняет подключение к БД\"\"\"\r\n if db_name is None:\r\n db_name = ':memory:'\r\n conn = sqlite3.connect(db_name)\r\n return conn\r\n\r\ndef initialize(conn):\r\n \"\"\"инициализирует структуру БД\"\"\"\r\n script_path = Path.join(Path.dirname(__file__), 'schema.sql')\r\n\r\n with conn, open(script_path) as f:\r\n conn.executescript(f.read())\r\n\r\ndef find_all_todo(conn):\r\n \"\"\"возвращает список задач с сортировкой по дате выполнения\"\"\"\r\n \r\n with conn:\r\n cursor = conn.execute(SQL_SELECT_ALL, ())\r\n for row in cursor:\r\n print(row)\r\n return\r\n \r\ndef find_all_todo_by_date(conn, todo_date, deleted, status):\r\n \"\"\"возвращает список задач с сортировкой по дате выполнения\"\"\"\r\n \r\n with conn:\r\n cursor = conn.execute(SQL_SELECT_ALL_TODO, (todo_date, deleted, status,))\r\n for row in cursor:\r\n print(row)\r\n return\r\n\r\n\r\ndef find_todo_by_id(conn, todo_id):\r\n \"\"\"возвращает задачу с указанным id\"\"\"\r\n \r\n with conn:\r\n cursor = conn.execute(SQL_SELECT_TODO_BY_ID, (todo_id,))\r\n\r\n founded_row = cursor.fetchone()\r\n print(founded_row)\r\n\r\n return founded_row\r\n\r\n\r\ndef edit_todo_descr(conn, new_descr, todo_id):\r\n \"\"\"Редактирует описание задачи\"\"\"\r\n \r\n with conn:\r\n cursor = conn.execute(SQL_UPDATE_TODO_DESCR, (new_descr, todo_id,))\r\n return print('Запись изменена')\r\n \r\n\r\n\r\ndef add_todo(conn, todo_date, todo_descr):\r\n \"\"\"Создает новую запись в БД\"\"\"\r\n\r\n with conn:\r\n cursor = conn.execute(SQL_INSERT_NEW_TODO, (todo_date, todo_descr,))\r\n return print('Новая задача создана')\r\n\r\n\r\n\r\ndef edit_todo_status_to_close(conn, todo_id):\r\n \"\"\"Закрывает задачу\"\"\"\r\n todo_status = 'CLOSED'\r\n with conn:\r\n cursor = conn.execute(SQL_UPDATE_TODO_STATUS, (todo_status, todo_id,))\r\n return print('Выбранная задача закрыта:')\r\n\r\ndef restart_todo(conn, todo_id, todo_date):\r\n \"\"\"Рестартит задачу\"\"\"\r\n todo_status = 'WORK'\r\n with conn:\r\n cursor = conn.execute(SQL_RESTART_TODO, (todo_date, todo_status, todo_id,))\r\n return print('Выбранная задача снова запущена с датой исполнения ' + str(todo_date) +':') \r\n\r\n","sub_path":"todolist/todolist/storage.py","file_name":"storage.py","file_ext":"py","file_size_in_byte":3339,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"503167391","text":"#! /usr/bin/env python3\n\nimport atexit\nimport os\nimport sys\nfrom subprocess import PIPE, run, STDOUT, CalledProcessError\n\nexe = \"cargo run --release --\".split()\nrestantes_file = \"restantes.txt\"\nfinalizados_file = \"finalizados.txt\"\nresult_folder = \"resultados\"\nout_folder = os.path.join(os.path.curdir, result_folder)\n\nrestantes = []\nfinalizados = []\n\n\ndef escreve_arquivos():\n with open(finalizados_file, 'w') as f:\n s = '\\n'.join(finalizados)\n print(s, file=f, end='')\n\n with open(restantes_file, 'w') as f:\n s = '\\n'.join(restantes)\n print(s, file=f, end='')\n\natexit.register(escreve_arquivos)\n\nwith open(restantes_file) as f:\n restantes = [l.strip() for l in f if l.strip()]\n\ntry:\n with open(finalizados_file) as f:\n finalizados = [l.strip() for l in f if l.strip()]\nexcept FileNotFoundError:\n pass\n\nos.makedirs(out_folder, exist_ok=True)\nprint('Pasta de saida:', out_folder, '\\n')\n\nwhile restantes:\n c = restantes[-1]\n print('Config:', c)\n\n infile = sys.argv[1]\n\n p = run([*exe, infile, '-e'], input=c, stdout=PIPE, stderr=STDOUT,\n universal_newlines=True)\n\n try:\n p.check_returncode()\n except CalledProcessError as e:\n print('Erro ao executar o processo, código: {}, mensagem: {}'\n .format(e.returncode, e.stderr))\n\n restantes.pop()\n finalizados.append(c)\n\n aid = c.split()[1]\n saida = p.stdout\n print(aid, '\\n', saida, end='\\n\\n')\n\n filename = infile.split('/')[-1]\n\n out_path = os.path.join(out_folder, filename + '-' + aid + '.csv')\n with open(out_path, 'w') as f:\n print(saida, file=f, end='')\n\n","sub_path":"scripts/executa_experimentos.py","file_name":"executa_experimentos.py","file_ext":"py","file_size_in_byte":1642,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"137384969","text":"from flask import Flask,request\n\nimport pandas as pd\nimport numpy as np\nimport pickle\n\n\napp = Flask(__name__)\npickle_in = open('classifier.pkl','rb')\nclf = pickle.load(pickle_in)\n\n@app.route('/')\ndef welcome():\n return \"Welcome All\"\n\n@app.route('/predict')\ndef pred_():\n variance=request.args.get(\"variance\")\n skewness=request.args.get(\"skewness\")\n curtosis=request.args.get(\"curtosis\")\n entropy=request.args.get(\"entropy\")\n prediction = clf.predict([[variance,skewness,curtosis,entropy]])\n return \"The predicted values is \" + str(prediction)\n\n@app.route(\"/predict_file\",methods=['POST'])\ndef pred_1():\n df_test = pd.read_csv(request.files.get(\"file\"))\n prediction = clf.predict(df_test)\n return \"The predicted values are\" + str(list(prediction))\n\n\n\n\n\nif(__name__=='__main__'):\n app.run()","sub_path":"Deep Learning ( 2020 )/Deployment/Old_part(1)/V1 - FLask+Docker/ML_Flasssger(V.2)/flask_app_noUI.py","file_name":"flask_app_noUI.py","file_ext":"py","file_size_in_byte":821,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"602216655","text":"from ST3DNet import *\nimport pickle\nfrom utils import *\nimport os\nimport math\nimport numpy as np\nimport tensorflow as tf\nfrom keras.utils.vis_utils import plot_model\nfrom keras.optimizers import Adam\nfrom tensorflow.compat.v1 import ConfigProto\nfrom tensorflow.compat.v1 import InteractiveSession\nfrom keras.callbacks import EarlyStopping, ModelCheckpoint\n\nfrom evaluation import evaluate\n\nconfig = ConfigProto()\nconfig.gpu_options.allow_growth = True\nsession = InteractiveSession(config=config)\n\nnb_epoch = 1500 # number of epoch at training stage\nnb_epoch_cont = 20 # number of epoch at training (cont) stage\nbatch_size = 32 # batch size\nT = 24 # number of time intervals in one day\n# lr = 0.0001 # learning rate\nlr = 0.00002 # learning rate\nlen_closeness = 6 # length of closeness dependent sequence\nlen_period = 0 # length of peroid dependent sequence\nlen_trend = 4 # length of trend dependent sequence\nnb_residual_unit = 4 # number of residual units\nnb_flow = 2 # there are two types of flows: new-flow and end-flow\ndays_test = 10 # divide data into two subsets: Train & Test, of which the test set is the last 10 days\nlen_test = T * days_test\nmap_height, map_width = 16, 8 # grid size\nnb_area = 81\nm_factor = math.sqrt(1. * map_height * map_width / nb_area)\n# m_factor = 1\n\npath_result = 'RET'\npath_model = 'MODEL'\nif os.path.isdir(path_result) is False:\n os.mkdir(path_result)\nif os.path.isdir(path_model) is False:\n os.mkdir(path_model)\n\nfilename = os.path.join(\"../data\", 'CACHE', 'ST3DNet', 'BikeNYC_c%d_p%d_t%d_noext'%(len_closeness, len_period, len_trend))\nf = open(filename, 'rb')\nX_train = pickle.load(f)\nY_train = pickle.load(f)\nX_test = pickle.load(f)\nY_test = pickle.load(f)\nmmn = pickle.load(f)\nexternal_dim = pickle.load(f)\ntimestamp_train = pickle.load(f)\ntimestamp_test = pickle.load(f)\n\nfor i in X_train:\n print(i.shape)\n\nY_train = mmn.inverse_transform(Y_train) # X is MaxMinNormalized, Y is real value\nY_test = mmn.inverse_transform(Y_test)\n\nc_conf = (len_closeness, nb_flow, map_height,\n map_width) if len_closeness > 0 else None\nt_conf = (len_trend, nb_flow, map_height,\n map_width) if len_trend > 0 else None\n\nfor i in range(0,10):\n model = ST3DNet(c_conf=c_conf, t_conf=t_conf, external_dim=external_dim, nb_residual_unit=nb_residual_unit)\n\n adam = Adam(lr=lr)\n model.compile(loss='mse', optimizer=adam, metrics=[rmse])\n # model.summary()\n # plot_model(model, to_file='model.png',show_shapes=True)\n\n \n hyperparams_name = 'BikeNYC.c{}.p{}.t{}.resunit{}.lr{}'.format(\n len_closeness, len_period, len_trend, nb_residual_unit, lr)\n fname_param = os.path.join(path_model, '{}.best.h5'.format(hyperparams_name))\n\n early_stopping = EarlyStopping(monitor='val_rmse', patience=50, mode='min')\n model_checkpoint = ModelCheckpoint(fname_param, monitor='val_rmse', verbose=0, save_best_only=True, mode='min')\n\n print('=' * 10)\n print(\"training model...\")\n np.random.seed(i*18)\n tf.random.set_seed(i*18)\n history = model.fit(X_train, Y_train,\n epochs=nb_epoch,\n batch_size=batch_size,\n validation_split=0.1,\n callbacks=[early_stopping, model_checkpoint],\n verbose=0)\n\n model.save_weights(os.path.join(\n path_model, '{}.h5'.format(hyperparams_name)), overwrite=True)\n \n # evaluate model\n print('evaluating using the model that has the best loss on the valid set')\n model.load_weights(fname_param)\n score = model.evaluate(X_train, Y_train, batch_size=Y_train.shape[0] // 48, verbose=0)\n print('Train score: %.6f rmse (real): %.6f' %(score[0], score[1] * m_factor))\n\n\n score = model.evaluate(X_test, Y_test, batch_size=Y_test.shape[0], verbose=0)\n print('Test score: %.6f rmse (real): %.6f' %(score[0], score[1] * m_factor))\n\n print('=' * 10)\n print(\"training model (cont)...\")\n fname_param = os.path.join(path_model, '{}.cont.best.h5'.format(hyperparams_name))\n model_checkpoint = ModelCheckpoint(fname_param, monitor='rmse', verbose=0, save_best_only=True, mode='min')\n history = model.fit(X_train, Y_train, epochs=nb_epoch_cont, verbose=0, batch_size=batch_size, callbacks=[model_checkpoint], validation_data=(X_test, Y_test))\n model.save_weights(os.path.join(\n path_model, '{}.h5'.format(hyperparams_name)), overwrite=True)\n\n print('=' * 10)\n print('evaluating using the final model')\n score = model.evaluate(X_train, Y_train, batch_size=Y_train.shape[0] // 48, verbose=0)\n print('Train score: %.6f rmse (real): %.6f' %\n (score[0], score[1] * m_factor))\n\n score = model.evaluate(\n X_test, Y_test, batch_size=Y_test.shape[0], verbose=0)\n print('Test score: %.6f rmse (real): %.6f' %\n (score[0], score[1] * m_factor))\n\n\n model.load_weights(fname_param) # load best weights for current iteration\n \n Y_pred = model.predict(X_test) # compute predictions\n\n score = evaluate(Y_test, Y_pred, rmse_factor=1) # evaluate performance\n\n # save to csv\n csv_name = os.path.join('results','ST3DNet_bikeNYC_results.csv')\n if not os.path.isfile(csv_name):\n if os.path.isdir('results') is False:\n os.mkdir('results')\n with open(csv_name, 'a', encoding = \"utf-8\") as file:\n file.write('iteration,'\n 'rsme_in,rsme_out,rsme_tot,'\n 'mape_in,mape_out,mape_tot,'\n 'ape_in,ape_out,ape_tot'\n )\n file.write(\"\\n\")\n file.close()\n with open(csv_name, 'a', encoding = \"utf-8\") as file:\n file.write(f'{i},{score[0]},{score[1]},{score[2]},{score[3]},'\n f'{score[4]},{score[5]},{score[6]},{score[7]},{score[8]}'\n )\n file.write(\"\\n\")\n file.close()\n K.clear_session()","sub_path":"ST3DNet/main_bikeNYC.py","file_name":"main_bikeNYC.py","file_ext":"py","file_size_in_byte":5894,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"622190538","text":"import unittest\nimport time\n\nfrom os import environ\nimport shutil\nimport os\nfrom DataFileUtil.DataFileUtilClient import DataFileUtil\nimport requests\ntry:\n from ConfigParser import ConfigParser # py2 @UnusedImport\nexcept:\n from configparser import ConfigParser # py3 @UnresolvedImport @Reimport\n\nfrom Workspace.WorkspaceClient import Workspace\nfrom DataFileUtil.baseclient import ServerError as DFUError\nfrom ReadsUtils.ReadsUtilsImpl import ReadsUtils\nfrom ReadsUtils.ReadsUtilsServer import MethodContext\n\n\nclass ReadsUtilsTest(unittest.TestCase):\n\n @classmethod\n def setUpClass(cls):\n cls.token = environ.get('KB_AUTH_TOKEN', None)\n # WARNING: don't call any logging methods on the context object,\n # it'll result in a NoneType error\n cls.ctx = MethodContext(None)\n cls.ctx.update({'token': cls.token,\n 'provenance': [\n {'service': 'ReadsUtils',\n 'method': 'please_never_use_it_in_production',\n 'method_params': []\n }],\n 'authenticated': 1})\n config_file = environ.get('KB_DEPLOYMENT_CONFIG', None)\n cls.cfg = {}\n config = ConfigParser()\n config.read(config_file)\n for nameval in config.items('ReadsUtils'):\n cls.cfg[nameval[0]] = nameval[1]\n cls.shockURL = cls.cfg['shock-url']\n cls.ws = Workspace(cls.cfg['workspace-url'], token=cls.token)\n cls.impl = ReadsUtils(cls.cfg)\n shutil.rmtree(cls.cfg['scratch'])\n os.mkdir(cls.cfg['scratch'])\n suffix = int(time.time() * 1000)\n wsName = \"test_ReadsUtils_\" + str(suffix)\n cls.ws_info = cls.ws.create_workspace({'workspace': wsName})\n cls.dfu = DataFileUtil(os.environ['SDK_CALLBACK_URL'], token=cls.token)\n\n @classmethod\n def tearDownClass(cls):\n if hasattr(cls, 'ws_info'):\n cls.ws.delete_workspace({'id': cls.ws_info[0]})\n print('Test workspace was deleted')\n\n @classmethod\n def delete_shock_node(cls, node_id):\n header = {'Authorization': 'Oauth {0}'.format(cls.token)}\n requests.delete(cls.shockURL + '/node/' + node_id, headers=header,\n allow_redirects=True)\n print('Deleted shock node ' + node_id)\n\n @classmethod\n def upload_file_to_shock(cls, file_path):\n \"\"\"\n Use HTTP multi-part POST to save a file to a SHOCK instance.\n \"\"\"\n\n header = dict()\n header[\"Authorization\"] = \"Oauth {0}\".format(cls.token)\n\n if file_path is None:\n raise Exception(\"No file given for upload to SHOCK!\")\n\n with open(os.path.abspath(file_path), 'rb') as dataFile:\n files = {'upload': dataFile}\n print('POSTing data')\n response = requests.post(\n cls.shockURL + '/node', headers=header, files=files,\n stream=True, allow_redirects=True)\n print('got response')\n\n if not response.ok:\n response.raise_for_status()\n\n result = response.json()\n\n if result['error']:\n raise Exception(result['error'][0])\n else:\n return result[\"data\"]\n\n def make_ref(self, objinfo):\n return str(objinfo[6]) + '/' + str(objinfo[0]) + '/' + str(objinfo[4])\n\n def test_FASTA_validation(self):\n self.check_FASTA('data/sample.fa', 1)\n self.check_FASTA('data/sample.fas', 1)\n self.check_FASTA('data/sample.fna', 1)\n self.check_FASTA('data/sample.fasta', 1)\n self.check_FASTA('data/sample_missing_data.fa', 0)\n\n def check_FASTA(self, filename, result):\n self.assertEqual(\n self.impl.validateFASTA(\n self.ctx, {'file_path': filename})[0]['valid'], result)\n\n def test_FASTA_val_fail_no_file(self):\n self.fail_val_FASTA('nofile', 'No such file: nofile')\n self.fail_val_FASTA(None, 'No such file: None')\n self.fail_val_FASTA('', 'No such file: ')\n\n def test_FASTA_val_fail_bad_ext(self):\n self.fail_val_FASTA('data/sample.txt',\n 'File data/sample.txt is not a FASTA file')\n\n def test_FASTQ_validation(self):\n self.check_fq('data/Sample1.fastq', 0, 1)\n self.check_fq('data/Sample2_interleaved_illumina.fnq', 1, 1)\n # fail on interleaved file specified as non-interleaved\n self.check_fq('data/Sample2_interleaved_illumina.fnq', 0, 0)\n self.check_fq('data/Sample3_interleaved_casava1.8.fq', 1, 1)\n self.check_fq('data/Sample4_interleaved_NCBI_SRA.fastq', 1, 1)\n self.check_fq('data/Sample5_interleaved.fastq', 1, 1)\n self.check_fq('data/Sample5_interleaved_blank_lines.fastq', 1, 1)\n self.check_fq('data/Sample5_noninterleaved.1.fastq', 0, 1)\n self.check_fq('data/Sample5_noninterleaved.2.fastq', 0, 1)\n self.check_fq('data/Sample1_invalid.fastq', 0, 0)\n self.check_fq('data/Sample5_interleaved_missing_line.fastq', 1, 0)\n\n def test_FASTQ_multiple(self):\n f1 = 'data/Sample1.fastq'\n f2 = 'data/Sample4_interleaved_NCBI_SRA.fastq'\n fn1 = os.path.basename(f1)\n fn2 = os.path.basename(f2)\n nfn1 = self.cfg['scratch'] + '/' + fn1\n nfn2 = self.cfg['scratch'] + '/' + fn2\n shutil.copyfile(f1, nfn1)\n shutil.copyfile(f2, nfn2)\n self.assertEqual(self.impl.validateFASTQ(\n self.ctx, [{'file_path': nfn1,\n 'interleaved': 0},\n {'file_path': nfn2,\n 'interleaved': 1}\n ])[0], [{'validated': 1}, {'validated': 1}])\n\n def check_fq(self, filepath, interleaved, ok):\n fn = os.path.basename(filepath)\n newfn = self.cfg['scratch'] + '/' + fn\n shutil.copyfile(filepath, newfn)\n self.assertEqual(self.impl.validateFASTQ(\n self.ctx, [{'file_path': newfn,\n 'interleaved': interleaved}])[0][0]['validated'], ok)\n for l in open(newfn):\n self.assertNotEqual(l, '')\n\n def test_FASTQ_val_fail_no_file(self):\n self.fail_val_FASTQ([{'file_path': 'nofile'}], 'No such file: nofile')\n self.fail_val_FASTQ([{'file_path': None}], 'No such file: None')\n self.fail_val_FASTQ([{'file_path': ''}], 'No such file: ')\n\n def test_FASTQ_val_fail_bad_ext(self):\n self.fail_val_FASTQ([{'file_path': 'data/sample.txt'}],\n 'File data/sample.txt is not a FASTQ file')\n\n def test_single_end_reads_gzip(self):\n # gzip, minimum inputs\n ret = self.upload_file_to_shock('data/Sample1.fastq.gz')\n ref = self.impl.upload_reads(self.ctx, {'fwd_id': ret['id'],\n 'sequencing_tech': 'seqtech',\n 'wsname': self.ws_info[1],\n 'name': 'singlereads1'})\n obj = self.dfu.get_objects(\n {'object_refs': [self.ws_info[1] + '/singlereads1']})['data'][0]\n self.delete_shock_node(ret['id'])\n self.assertEqual(ref[0]['obj_ref'], self.make_ref(obj['info']))\n self.assertEqual(obj['info'][2].startswith(\n 'KBaseFile.SingleEndLibrary'), True)\n d = obj['data']\n self.assertEqual(d['sequencing_tech'], 'seqtech')\n self.assertEqual(d['single_genome'], 1)\n self.assertEqual('source' not in d, True)\n self.assertEqual('strain' not in d, True)\n self.check_lib(d['lib'], 2847, 'Sample1.fastq.gz', ret['id'],\n '48efea6945c4382c68f5eac485c177c2')\n\n def test_single_end_reads_metagenome_objid(self):\n # single genome = 0, test saving to an object id\n ret = self.upload_file_to_shock('data/Sample5_noninterleaved.1.fastq')\n ref = self.impl.upload_reads(self.ctx, {'fwd_id': ret['id'],\n 'sequencing_tech': 'seqtech2',\n 'wsname': self.ws_info[1],\n 'name': 'singlereads2',\n 'single_genome': 0})\n obj = self.dfu.get_objects(\n {'object_refs': [self.ws_info[1] + '/singlereads2']})['data'][0]\n self.assertEqual(ref[0]['obj_ref'], self.make_ref(obj['info']))\n self.assertEqual(obj['info'][2].startswith(\n 'KBaseFile.SingleEndLibrary'), True)\n d = obj['data']\n self.assertEqual(d['sequencing_tech'], 'seqtech2')\n self.assertEqual(d['single_genome'], 0)\n self.assertEqual('source' not in d, True)\n self.assertEqual('strain' not in d, True)\n self.check_lib(d['lib'], 1116, 'Sample5_noninterleaved.1.fastq',\n ret['id'], '140a61c7f183dd6a2b93ef195bb3ec63')\n\n # test saving with IDs only\n ref = self.impl.upload_reads(\n self.ctx, {'fwd_id': ret['id'],\n 'sequencing_tech': 'seqtech2-1',\n 'wsid': self.ws_info[0],\n 'objid': obj['info'][0]})\n obj = self.dfu.get_objects(\n {'object_refs': [self.ws_info[1] + '/singlereads2/2']})['data'][0]\n self.delete_shock_node(ret['id'])\n self.assertEqual(ref[0]['obj_ref'], self.make_ref(obj['info']))\n self.assertEqual(obj['info'][2].startswith(\n 'KBaseFile.SingleEndLibrary'), True)\n d = obj['data']\n self.assertEqual(d['sequencing_tech'], 'seqtech2-1')\n self.assertEqual(d['single_genome'], 1)\n self.assertEqual('source' not in d, True)\n self.assertEqual('strain' not in d, True)\n self.check_lib(d['lib'], 1116, 'Sample5_noninterleaved.1.fastq',\n ret['id'], '140a61c7f183dd6a2b93ef195bb3ec63')\n\n def test_single_end_reads_genome_source_strain(self):\n # specify single genome, source, strain, use workspace id\n ret = self.upload_file_to_shock('data/Sample1.fastq')\n strain = {'genus': 'Yersinia',\n 'species': 'pestis',\n 'strain': 'happypants'\n }\n source = {'source': 'my pants'}\n ref = self.impl.upload_reads(\n self.ctx,\n {'fwd_id': ret['id'],\n 'sequencing_tech': 'seqtech3',\n 'wsid': self.ws_info[0],\n 'name': 'singlereads3',\n 'single_genome': 1,\n 'strain': strain,\n 'source': source,\n 'interleaved': 0\n })\n obj = self.dfu.get_objects(\n {'object_refs': [self.ws_info[1] + '/singlereads3']})['data'][0]\n\n self.delete_shock_node(ret['id'])\n self.assertEqual(ref[0]['obj_ref'], self.make_ref(obj['info']))\n self.assertEqual(obj['info'][2].startswith(\n 'KBaseFile.SingleEndLibrary'), True)\n d = obj['data']\n self.assertEqual(d['sequencing_tech'], 'seqtech3')\n self.assertEqual(d['single_genome'], 1)\n self.assertEqual(d['source'], source)\n self.assertEqual(d['strain'], strain)\n self.check_lib(d['lib'], 9648, 'Sample1.fastq', ret['id'],\n 'f118ee769a5e1b40ec44629994dfc3cd')\n\n def test_paired_end_reads(self):\n # paired end non interlaced, minimum inputs\n ret1 = self.upload_file_to_shock('data/Sample5_noninterleaved.1.fastq')\n ret2 = self.upload_file_to_shock('data/Sample1.fastq.gz')\n ref = self.impl.upload_reads(\n self.ctx, {'fwd_id': ret1['id'],\n 'rev_id': ret2['id'],\n 'sequencing_tech': 'seqtech-pr1',\n 'wsname': self.ws_info[1],\n 'name': 'pairedreads1',\n 'interleaved': 1})\n obj = self.dfu.get_objects(\n {'object_refs': [self.ws_info[1] + '/pairedreads1']})['data'][0]\n self.delete_shock_node(ret1['id'])\n self.delete_shock_node(ret2['id'])\n self.assertEqual(ref[0]['obj_ref'], self.make_ref(obj['info']))\n self.assertEqual(obj['info'][2].startswith(\n 'KBaseFile.PairedEndLibrary'), True)\n d = obj['data']\n self.assertEqual(d['sequencing_tech'], 'seqtech-pr1')\n self.assertEqual(d['single_genome'], 1)\n self.assertEqual('source' not in d, True)\n self.assertEqual('strain' not in d, True)\n self.assertEqual(d['interleaved'], 0)\n self.assertEqual(d['read_orientation_outward'], 0)\n self.assertEqual(d['insert_size_mean'], None)\n self.assertEqual(d['insert_size_std_dev'], None)\n self.check_lib(d['lib1'], 1116, 'Sample5_noninterleaved.1.fastq',\n ret1['id'], '140a61c7f183dd6a2b93ef195bb3ec63')\n self.check_lib(d['lib2'], 2847, 'Sample1.fastq.gz',\n ret2['id'], '48efea6945c4382c68f5eac485c177c2')\n\n def test_interleaved_with_pe_inputs(self):\n # paired end interlaced with the 4 pe input set\n ret = self.upload_file_to_shock('data/Sample5_interleaved.fastq')\n ref = self.impl.upload_reads(\n self.ctx, {'fwd_id': ret['id'],\n 'sequencing_tech': 'seqtech-pr2',\n 'wsname': self.ws_info[1],\n 'name': 'pairedreads2',\n 'interleaved': 1,\n 'read_orientation_outward': 'a',\n 'insert_size_mean': 72.1,\n 'insert_size_std_dev': 84.0\n })\n obj = self.ws.get_objects2(\n {'objects': [{'ref': self.ws_info[1] + '/pairedreads2'}]}\n )['data'][0]\n self.delete_shock_node(ret['id'])\n self.assertEqual(ref[0]['obj_ref'], self.make_ref(obj['info']))\n self.assertEqual(obj['info'][2].startswith(\n 'KBaseFile.PairedEndLibrary'), True)\n d = obj['data']\n self.assertEqual(d['sequencing_tech'], 'seqtech-pr2')\n self.assertEqual(d['single_genome'], 1)\n self.assertEqual('source' not in d, True)\n self.assertEqual('strain' not in d, True)\n self.assertEqual(d['interleaved'], 1)\n self.assertEqual(d['read_orientation_outward'], 1)\n self.assertEqual(d['insert_size_mean'], 72.1)\n self.assertEqual(d['insert_size_std_dev'], 84.0)\n self.check_lib(d['lib1'], 2232, 'Sample5_interleaved.fastq',\n ret['id'], '971a5f445055c85fd45b17459e15e3ed')\n\n def check_lib(self, lib, size, filename, id_, md5):\n self.assertEqual(lib['size'], size)\n self.assertEqual(lib['type'], 'fq')\n self.assertEqual(lib['encoding'], 'ascii')\n libfile = lib['file']\n self.assertEqual(libfile['file_name'], filename)\n self.assertEqual(libfile['id'], id_)\n self.assertEqual(libfile['hid'].startswith('KBH_'), True)\n self.assertEqual(libfile['remote_md5'], md5)\n self.assertEqual(libfile['type'], 'shock')\n self.assertEqual(libfile['url'], self.shockURL)\n\n def fail_upload_reads(self, params, error, exception=ValueError):\n with self.assertRaises(exception) as context:\n self.impl.upload_reads(self.ctx, params)\n self.assertEqual(error, str(context.exception.message))\n\n def test_upload_fail_no_reads(self):\n self.fail_upload_reads(\n {'sequencing_tech': 'tech',\n 'wsname': self.ws_info[1],\n 'name': 'foo'\n },\n 'No reads file provided')\n\n def test_upload_fail_no_seqtech(self):\n self.fail_upload_reads(\n {'fwd_id': 'foo',\n 'wsname': self.ws_info[1],\n 'name': 'foo'\n },\n 'The sequencing technology must be provided')\n\n def test_upload_fail_no_ws(self):\n self.fail_upload_reads(\n {'sequencing_tech': 'tech',\n 'fwd_id': 'bar',\n 'name': 'foo'\n },\n 'Exactly one of the workspace ID or name must be provided')\n\n def test_upload_fail_no_obj_id(self):\n self.fail_upload_reads(\n {'sequencing_tech': 'tech',\n 'fwd_id': 'bar',\n 'wsname': self.ws_info[1],\n },\n 'Exactly one of the object ID or name must be provided')\n\n def test_upload_fail_non_existant_objid(self):\n ret = self.upload_file_to_shock('data/Sample1.fastq')\n self.fail_upload_reads(\n {'sequencing_tech': 'tech',\n 'wsname': self.ws_info[1],\n 'fwd_id': ret['id'],\n 'objid': 1000000\n },\n 'There is no object with id 1000000', exception=DFUError)\n self.delete_shock_node(ret['id'])\n\n def test_upload_fail_non_existant_shockid(self):\n ret = self.upload_file_to_shock('data/Sample1.fastq')\n self.fail_upload_reads(\n {'sequencing_tech': 'tech',\n 'wsname': self.ws_info[1],\n 'fwd_id': 'foo',\n 'name': 'bar'\n },\n 'Error downloading file from shock node foo: Node not found',\n exception=DFUError)\n self.delete_shock_node(ret['id'])\n\n def test_upload_fail_non_string_wsname(self):\n self.fail_upload_reads(\n {'sequencing_tech': 'tech',\n 'wsname': 1,\n 'fwd_id': 'bar',\n 'name': 'foo'\n },\n 'wsname must be a string')\n\n def test_upload_fail_bad_wsname(self):\n self.fail_upload_reads(\n {'sequencing_tech': 'tech',\n 'wsname': '&bad',\n 'fwd_id': 'bar',\n 'name': 'foo'\n },\n 'Illegal character in workspace name &bad: &', exception=DFUError)\n\n def test_upload_fail_non_num_mean(self):\n self.fail_upload_reads(\n {'sequencing_tech': 'tech',\n 'wsname': self.ws_info[1],\n 'fwd_id': 'bar',\n 'name': 'foo',\n 'insert_size_mean': 'foo'\n },\n 'insert_size_mean must be a number')\n\n def test_upload_fail_non_num_std(self):\n self.fail_upload_reads(\n {'sequencing_tech': 'tech',\n 'wsname': self.ws_info[1],\n 'fwd_id': 'bar',\n 'name': 'foo',\n 'insert_size_std_dev': 'foo'\n },\n 'insert_size_std_dev must be a number')\n\n def test_upload_fail_neg_mean(self):\n self.fail_upload_reads(\n {'sequencing_tech': 'tech',\n 'wsname': self.ws_info[1],\n 'fwd_id': 'bar',\n 'name': 'foo',\n 'insert_size_mean': 0\n },\n 'insert_size_mean must be > 0')\n\n def test_upload_fail_neg_std(self):\n self.fail_upload_reads(\n {'sequencing_tech': 'tech',\n 'wsname': self.ws_info[1],\n 'fwd_id': 'bar',\n 'name': 'foo',\n 'insert_size_std_dev': 0\n },\n 'insert_size_std_dev must be > 0')\n\n def test_upload_fail_bad_fasta(self):\n print('*** upload_fail_bad_fasta ***')\n ret = self.upload_file_to_shock('data/Sample1_invalid.fastq')\n self.fail_upload_reads(\n {'sequencing_tech': 'tech',\n 'wsname': self.ws_info[1],\n 'fwd_id': ret['id'],\n 'name': 'bar'\n },\n 'Invalid fasta file /kb/module/work/tmp/fwd/Sample1_invalid' +\n '.fastq from Shock node ' + ret['id'])\n self.delete_shock_node(ret['id'])\n\n def test_upload_fail_interleaved_for_single(self):\n ret = self.upload_file_to_shock('data/Sample5_interleaved.fastq')\n self.fail_upload_reads(\n {'sequencing_tech': 'tech',\n 'wsname': self.ws_info[1],\n 'fwd_id': ret['id'],\n 'name': 'bar'\n },\n 'Invalid fasta file /kb/module/work/tmp/fwd/Sample5_interleaved' +\n '.fastq from Shock node ' + ret['id'])\n self.delete_shock_node(ret['id'])\n\n def test_upload_fail_interleaved_for_paired(self):\n ret1 = self.upload_file_to_shock('data/Sample1.fastq')\n ret2 = self.upload_file_to_shock('data/Sample5_interleaved.fastq')\n self.fail_upload_reads(\n {'sequencing_tech': 'tech',\n 'wsname': self.ws_info[1],\n 'fwd_id': ret1['id'],\n 'rev_id': ret2['id'],\n 'name': 'bar'\n },\n 'Invalid fasta file /kb/module/work/tmp/rev/Sample5_interleaved' +\n '.fastq from Shock node ' + ret2['id'])\n self.delete_shock_node(ret1['id'])\n self.delete_shock_node(ret2['id'])\n\n def fail_val_FASTA(self, filename, error, exception=ValueError):\n with self.assertRaises(exception) as context:\n self.impl.validateFASTA(self.ctx, {'file_path': filename})\n self.assertEqual(error, str(context.exception.message))\n\n def fail_val_FASTQ(self, params, error, exception=ValueError):\n with self.assertRaises(exception) as context:\n self.impl.validateFASTQ(self.ctx, params)\n self.assertEqual(error, str(context.exception.message))\n","sub_path":"test/ReadsUtils_server_test.py","file_name":"ReadsUtils_server_test.py","file_ext":"py","file_size_in_byte":21202,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"282773892","text":"import os\nimport csv\n\n\n# read data then sorted by Candidate\nwith open('election_data.csv', 'r', newline='') as fileinput:\n csv_input = csv.DictReader(fileinput)\n # Sort data by candidate column\n data = sorted(csv_input, key=lambda row: (row[\"Candidate\"]))\n\n# output a new sorted csv file\nwith open('sorted_election_data.csv', 'w', newline='') as fileoutput: \n csv_output = csv.DictWriter(fileoutput, fieldnames=csv_input.fieldnames)\n csv_output.writeheader()\n csv_output.writerows(data)\n\nwith open(\"sorted_election_data.csv\", \"r\", newline=\"\") as csvfile:\n csvreader = csv.reader(csvfile, delimiter=\",\")\n \n # read 1st row\n csv_header = next(csvreader)\n\n # read 2nd row\n row2 = next(csvreader)\n newcandidate = row2[2]\n candidatelist =[newcandidate]\n counter = 1\n candidatecount =[]\n\n for row in csvreader:\n \n # detect new candidate\n if row[2] != newcandidate:\n newcandidate = row[2]\n candidatelist.append(newcandidate)\n candidatecount.append(counter)\n counter = 0\n \n counter +=1\n\n # append the last count for last candidate \n candidatecount.append(counter)\n\n# Find total vote max,vote count and percent votes for each candidate\ntotalvote = sum(candidatecount)\nmaxcount = max(candidatecount)\nmaxpos = candidatecount.index(max(candidatecount)) \nwinner = candidatelist[maxpos]\npercentvote = [round(x/totalvote * 100,3) for x in candidatecount]\npercentresults = zip(candidatelist,percentvote, candidatecount)\n\n\n# print results to terminal\nprint(\"Election Results\")\nprint(\"------------------------\")\nprint(f'Total Votes: {totalvote}')\nprint(\"------------------------\")\nfor x in percentresults:\n print(f'{x[0]}: {x[1]}% ({x[2]})')\nprint(\"------------------------\")\nprint(f'Winner: {winner}')\n\n\n# write result to a csv file\n# re-create the tubble \n# tubble values are discarded after iterated through in print() above\n# thus re-creating the tublle here\npercentresults = zip(candidatelist,percentvote, candidatecount)\n\n# write file to csv file \nwith open(\"election_results.csv\", \"w\", newline=\"\") as writefile:\n \n csvwriter = csv.writer(writefile, delimiter=\",\")\n csvwriter.writerow([\"Election Results\"])\n csvwriter.writerow([\"Total Votes\", str(totalvote)])\n csvwriter.writerows(percentresults)\n csvwriter.writerow([\"Winner\", str(winner)])\n\n## Comments:\n# I chose to sort Candidate by name to track new candidates\n# assuming I don't know how how many candidates and who are the candidates\n# For this homework, since the result shows there are only 4 candidates\n# I can also do \"if\" statesments and keep track of counts there\n# I have tried to sort data using data read from csv.read but was not able \n# to get it run, unlike with csv.DictRead\n# I have also tried to loop through the sorted data. However, data is an order-dict\n# type and I couldn't iterate sucessfully.\n# Because of the above reasons, I chose to re-write the sorted data into a new csv\n# file and do my analysis with the new file.\n# I'm aware that this is not memory- and time-friendly method. \n# I'd love to know how I could sort data using data read from csv.read or \n# how i could loop with order-dict data outout from csv.DictRead","sub_path":"election.py","file_name":"election.py","file_ext":"py","file_size_in_byte":3253,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"38927632","text":"import allure\nimport pytest\nfrom lib.my_requests import MyRequests\nfrom lib.base_case import BaseCase\nfrom lib.assertions import Assertions\n\n\n@allure.epic(\"Registration cases\")\n@allure.feature(\"Registration\")\nclass TestUserRegister(BaseCase):\n missed_params = [\n (\"username\"),\n (\"firstName\"),\n (\"lastName\"),\n (\"email\"),\n (\"password\")\n ]\n\n @allure.title(\"Test register user (successful)\")\n @allure.severity(severity_level=\"CRITICAL\")\n @allure.story(\"Register user (successful)\")\n @allure.description(\"This test successfully register user by prepare data\")\n def test_create_user_successfully(self):\n data = self.prepare_registration_data()\n response = MyRequests.post(\"/user/\", data=data)\n Assertions.assert_code_status(response, 200)\n Assertions.assert_json_has_key(response, \"id\")\n\n @allure.title(\"Test register user with existing email (unsuccessful)\")\n @allure.story(\"Register with existing email\")\n @allure.description(\"This test doesn't register user with existing email\")\n def test_create_user_with_existing_email(self):\n email = 'vinkotov@example.com'\n data = self.prepare_registration_data(email)\n response = MyRequests.post(\"/user/\", data=data)\n\n Assertions.assert_code_status(response, 400)\n assert response.content.decode(\"utf-8\") == f\"Users with email '{email}' already exists\", \\\n f\"Unexpected response content {response.content}\"\n\n @allure.title(\"Test register user with email without '@' (unsuccessful)\")\n @allure.story(\"Register with user with email without '@'\")\n @allure.description(\"This test doesn't register user with email without '@'\")\n def test_create_user_with_incorrect_email(self):\n email = 'test-test.ru'\n data = self.prepare_registration_data(email)\n response = MyRequests.post(\"/user/\", data=data)\n Assertions.assert_code_status(response, 400)\n assert response.content.decode(\"utf-8\") == f\"Invalid email format\", \\\n f\"Unexpected response content {response.content}\"\n\n @allure.title(\"Test register user with missed param (unsuccessful)\")\n @allure.story(\"Register with missed param\")\n @allure.description(\"This test doesn't register user with missed param\")\n @pytest.mark.parametrize('missed_param', missed_params)\n def test_create_user_without_param(self, missed_param):\n data = self.prepare_registration_data()\n data.pop(missed_param)\n data_with_missed_param = data\n response = MyRequests.post(\"/user/\", data=data_with_missed_param)\n Assertions.assert_code_status(response, 400)\n assert response.content.decode(\"utf-8\") == f\"The following required params are missed: {missed_param}\", \\\n f\"Unexpected response content {response.content} with missed param: {missed_param}\"\n\n @allure.title(\"Test register user with too short name (unsuccessful)\")\n @allure.story(\"Register with too short name\")\n @allure.description(\"This test doesn't register user with too short name\")\n def test_create_user_with_short_name(self):\n data = self.prepare_registration_data()\n data['firstName'] = 't'\n response = MyRequests.post(\"/user/\", data=data)\n Assertions.assert_code_status(response, 400)\n assert response.content.decode(\"utf-8\") == f\"The value of 'firstName' field is too short\", \\\n f\"Unexpected response content for field 'firstName' when it is too short\"\n\n @allure.title(\"Test register user too long name (unsuccessful)\")\n @allure.story(\"Register with too long name\")\n @allure.description(\"This test doesn't register user with too long name\")\n def test_create_user_with_long_name(self):\n data = self.prepare_registration_data()\n long_name = \"\"\n while len(long_name) <= 250:\n long_name += \"t\"\n data['firstName'] = long_name\n response = MyRequests.post(\"/user/\", data=data)\n Assertions.assert_code_status(response, 400)\n assert response.content.decode(\"utf-8\") == f\"The value of 'firstName' field is too long\", \\\n f\"Unexpected response content for field 'firstName' when it is too long\"\n\n\n\n\n","sub_path":"tests/test_user_register.py","file_name":"test_user_register.py","file_ext":"py","file_size_in_byte":4214,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"623989098","text":"import socket\r\nimport sys\r\ntry:\r\n s=socket.socket()\r\nexcept socket.error as err:\r\n print(\"socket creating error {}\".format(err)\r\n )\r\nport=9999\r\ns.bind(('',port))\r\nprint(\"socket bind to port {}\".format(port))\r\ns.listen(5)\r\nwhile True:\r\n client,addr=s.accept()\r\n print(\"connection recieved from {}\".format(addr))\r\n client.send(\"conn established. from server\".encode('utf-8'))\r\n\r\n\r\n ","sub_path":"listener.py","file_name":"listener.py","file_ext":"py","file_size_in_byte":401,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"389015707","text":"# Community Page\nfrom werkzeug.exceptions import abort\nfrom my_app.models import Posts, Profile, User\nfrom flask import Blueprint, render_template, redirect, url_for, request, flash\nfrom flask_login import login_required, current_user\nfrom datetime import datetime\nfrom my_app import db\n\ncommunity_bp = Blueprint('community_bp', __name__, url_prefix='/community')\n\n\n@community_bp.route('/')\n@login_required\ndef index():\n \"\"\"\n Returns the main community page where the user can see and create posts\n \"\"\"\n posts = Posts.query.all()\n posts.reverse()\n users = User.query.all()\n return render_template('community.html', title='Welcome to the Community Section', posts=posts, users=users)\n\n\n@community_bp.route('/')\ndef post(post_id):\n \"\"\"\n Takes the id of a specific post and returns a page showing that post\n \"\"\"\n post = Posts.query.filter_by(id=post_id).first()\n profile = Profile.query.filter_by(user_id=post.user_id).first()\n\n # Checks that the post exist in the db and returns a 404 error if not\n if post is None:\n abort(404)\n\n return render_template('post.html', post=post, profile=profile)\n\n\n@community_bp.route('/create', methods=('GET', 'POST'))\n@login_required\ndef create():\n \"\"\"\n Returns the page where new posts can be created\n \"\"\"\n\n # Requests the 'title' and 'content' that the user inputs to the form\n if request.method == 'POST':\n title = request.form['title']\n content = request.form['content']\n\n # Checks that there is a title and flashes a message if not\n if not title:\n flash('Title is required!')\n\n # Creates a new post using the info from the form and commits it to the db\n else:\n post = Posts(title=title, content=content, created=datetime.utcnow(), user_id=current_user.id)\n db.session.add(post)\n db.session.commit()\n return redirect(url_for('community_bp.index'))\n return render_template('create.html')\n\n\n@community_bp.route('//edit', methods=('GET', 'POST'))\n@login_required\ndef edit(post_id):\n \"\"\"\n Returns the page to edit a post given its id\n \"\"\"\n post = Posts.query.filter_by(id=post_id).first()\n\n # Checks that the current user wrote the post and returns a 403 error if not\n if post.user_id != current_user.id:\n abort(403)\n\n if post is None:\n abort(404)\n\n if request.method == 'POST':\n title = request.form['title']\n content = request.form['content']\n\n if not title:\n flash('Title is required!')\n\n # Changes the title and content of the current post and commits to the db\n else:\n post.title = title\n post.content = content\n db.session.commit()\n return redirect(url_for('community_bp.index'))\n\n return render_template('edit.html', post=post)\n\n\n@community_bp.route('//delete', methods=('POST',))\n@login_required\ndef delete(post_id):\n \"\"\"\n Returns the method to delete a post\n \"\"\"\n post = Posts.query.filter_by(id=post_id).first()\n db.session.delete(post)\n db.session.commit()\n\n return redirect(url_for('community_bp.index'))\n","sub_path":"my_app/community/community.py","file_name":"community.py","file_ext":"py","file_size_in_byte":3207,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"74771491","text":"# import libraries for passing arguements at the command line\nimport argparse\nimport sys\n# import all excercise methods\nfrom environment import *\nfrom agent import *\nfrom simple_reflex_agent import *\nfrom simple_reflex_with_state import *\nfrom zig_zag_vacuum import *\nfrom randomAgent import *\nfrom statebot2 import *\nfrom murphyBot import *\nfrom faultyBot import *\nfrom smallChildrenEnviron import *\n\n# get direction constants for traversing vacuum world \nfrom directions import *\n\n\ndef main():\n # get all arguements from the command line\n parser = argparse.ArgumentParser(\n prog=sys.argv[0],\n description=\"Implements a performance-measuring environment simulator \" +\n \"for vacuum-cleaner world with modular inputs for sensors, \" +\n \"actuators, and environment characteristics (size, shape, \" +\n \"dirt placement) can be changed easily\",\n )\n\n # getting width of the grid from the command line\n parser.add_argument(\n \"--width\",\n nargs=1,\n default=2,\n type=int,\n # choices=range(0,100),\n help=\"specify the width of the grid. defaults to 3\",\n )\n\n # getting height of the grid from the command line\n parser.add_argument(\n \"--height\",\n nargs=1,\n default=1,\n type=int,\n # choices=range(0,100),\n help=\"specify the height of the grid. defaults to 3\",\n )\n\n # getting excercise to run from the command line, either 8, 9, or any parts of 10, 11, or 12\n parser.add_argument(\n \"--exercise\",\n # nargs=1,\n default=\"2.8\",\n type=str,\n choices=[\"2.8\",\"2.9\",\"2.10a\",\"2.10b\",\"2.10c\",\"2.11a\",\n \"2.11b\",\"2.11c\",\"2.11d\",\"2.12a\",\"2.12b\"],\n help=\"specify the exercise to run. defaults to 2.8\",\n )\n\n #error validation for insufficient number of inputs from the command line\n if len(sys.argv) == 1:\n parser.print_help(sys.stderr)\n sys.exit(1)\n\n # assign variables for width, height, and excercise to use in the program\n args = parser.parse_args()\n envWidth = args.width\n envHeight = args.height\n exercise = args.exercise\n\n # only gets one width\n if type(envWidth) is list:\n envWidth = envWidth[0]\n \n\n # only gets one height\n if type(envHeight) is list:\n envHeight = envHeight[0]\n\n # checks for which excercise in the book to run\n if exercise == '2.8':\n # builds a performance measuring environment, a 'vacuum-cleaner world'\n print('2.8')\n print(\"Random version of vacuum world, where 1 is dirty and 0 is clean.\")\n vacuumWorld = Environment(envWidth,envHeight)\n vacuumWorld.RandomizeWithoutWalls()\n vacuumWorld.Visualize()\n \n elif exercise == '2.9':\n # builds a simple reflex agent to traverse an environment similar to the one\n # in excercise 2.8\n print('2.9')\n vacuumWorld = Environment(envWidth,envHeight)\n vacuumWorld.RandomizeWithoutWalls()\n print(\"Example run of simple reflex agent\")\n vacuumWorld.Visualize()\n reflexAgent = SimpleReflexAgent((0,0),EAST,vacuumWorld)\n reflexAgent.Run()\n reflexAgent.PrintLog()\n vacuumWorld.Visualize()\n print()\n print(\"Results of running through all possible variations:\")\n results=reflexAgent.RunAllEnvironVariations()\n # prints the appropriate value\n print(\"Percent Clean\\nAvg:{}%\\tMin:{}%\\tMax:{}%\\n\".format(results[\"cleanAvg\"],results[\"cleanMin\"],results[\"cleanMax\"]))\n print(\"Steps Taken\\nAvg:{}\\tMin:{}\\tMax:{}\\n\".format(results[\"stepsAvg\"],results[\"stepsMin\"],results[\"stepsMax\"]))\n\n\n elif exercise == '2.10a':\n # runs the simple reflex agent in the penalized movement environment\n print('2.10a')\n print('Simple reflex agent in random environment with penalized movement')\n vacuumWorld = Environment(envWidth,envHeight)\n vacuumWorld.RandomizeWithoutWalls()\n print(\"Example run of simple reflex agent\")\n vacuumWorld.Visualize()\n reflexAgent = SimpleReflexAgent((0,0),EAST,vacuumWorld)\n reflexAgent.Run()\n reflexAgent.PrintLog()\n vacuumWorld.Visualize()\n \n # print header, run all variations of environment traversal\n print(\"\\nResults of running through all possible variations:\")\n results=reflexAgent.RunAllEnvironVariations()\n\n # print the results from running all environment traversals\n print(\"Percent Clean\\nAvg:{}%\\tMin:{}%\\tMax:{}%\\n\".format(results[\"cleanAvg\"],results[\"cleanMin\"],results[\"cleanMax\"]))\n print(\"Steps Taken\\nAvg:{}\\tMin:{}\\tMax:{}\\n\".format(results[\"stepsAvg\"],results[\"stepsMin\"],results[\"stepsMax\"]))\n print(\"Score\\nAvg:{}\\tMin:{}\\tMax:{}\\n\\n\".format(results[\"scoreAvg\"],results[\"scoreMin\"],results[\"scoreMax\"]))\n\n\n elif exercise == '2.10b':\n # runs a state-based reflex agent in the penalized movement environment\n print('2.10b')\n vacuumWorld = Environment(envWidth, envHeight)\n vacuumWorld.RandomizeWithoutWalls()\n print(\"Example run of simple reflex agent with state\")\n vacuumWorld.Visualize()\n reflexAgentState = StateBot2((0,0),EAST,vacuumWorld)\n reflexAgentState.Run()\n vacuumWorld.Visualize()\n\n # print header, run all variations of environment traversal\n print(\"\\nResults of running through all possible variations:\")\n results=reflexAgentState.RunAllEnvironVariations()\n\n # print the results from running all environment traversals\n print(\"Percent Clean\\nAvg:{}%\\tMin:{}%\\tMax:{}%\\n\".format(results[\"cleanAvg\"],results[\"cleanMin\"],results[\"cleanMax\"]))\n print(\"Steps Taken\\nAvg:{}\\tMin:{}\\tMax:{}\\n\".format(results[\"stepsAvg\"],results[\"stepsMin\"],results[\"stepsMax\"]))\n print(\"Score\\nAvg:{}\\tMin:{}\\tMax:{}\\n\\n\".format(results[\"scoreAvg\"],results[\"scoreMin\"],results[\"scoreMax\"]))\n\n elif exercise == '2.10c':\n print('2.10c')\n print(\"An agent that knows the status of all clean/dirty tiles cannot be rational as it is omniscient\")\n print('Example')\n vacuumWorld = Environment(envWidth, envHeight)\n vacuumWorld.RandomizeWithoutWalls()\n print('BEFORE')\n vacuumWorld.Visualize()\n reflexAgentEntireState = SimpleReflexAgentWithEntireState((0,0),EAST,vacuumWorld)\n reflexAgentEntireState.Run()\n reflexAgentEntireState.PrintDirtyTiles()\n print('AFTER')\n vacuumWorld.Visualize()\n\n elif exercise == '2.11a':\n print('2.11a')\n print('A simple reflex agent can still be rational in an unknown environment, as it is still acting logically on its percepts')\n print('Example')\n vacuumWorld = Environment(envWidth, envHeight)\n vacuumWorld.RandomizeWithWalls()\n print('BEFORE')\n vacuumWorld.Visualize()\n agent = SimpleReflexAgent((0,0),EAST,vacuumWorld)\n agent.Run()\n print('AFTER')\n vacuumWorld.Visualize()\n\n elif exercise == '2.11b':\n print('2.11b')\n print('Random Agent')\n #Can a simple reflex agent with a randomized agent function outperform a simple reflex agent?\n #Design such an agent and measure its performance on several environments.\n print('NOTE: 2 in the grid represents a wall')\n vacuumWorld = Environment(5,5)\n vacuumWorld.RandomizeWithoutWalls()\n vacuumWorld.SetWallsFromBinary(9439748)\n print('BEFORE')\n vacuumWorld.Visualize()\n\n randomAgent = RandomAgent((0,0),EAST,vacuumWorld)\n randomAgent.Run()\n\n #randomAgent.PrintLog()\n print('AFTER')\n vacuumWorld.Visualize()\n print(\"*******************************************\")\n\n vacuumWorld = Environment(5,5)\n vacuumWorld.RandomizeWithoutWalls()\n vacuumWorld.SetWallsFromBinary(145536)\n print('BEFORE')\n vacuumWorld.Visualize()\n\n randomAgent = RandomAgent((0,0),EAST,vacuumWorld)\n randomAgent.Run()\n\n #randomAgent.PrintLog()\n print('AFTER')\n vacuumWorld.Visualize()\n\n print(\"*******************************************\")\n\n vacuumWorld = Environment(5,5)\n vacuumWorld.RandomizeWithoutWalls()\n vacuumWorld.SetWallsFromBinary(15360)\n print('BEFORE')\n vacuumWorld.Visualize()\n\n randomAgent = RandomAgent((0,0),EAST,vacuumWorld)\n randomAgent.Run()\n\n #randomAgent.PrintLog()\n print('AFTER')\n\n vacuumWorld.Visualize()\n\n elif exercise == '2.11c':\n print('2.11c')\n # environment designed to make our agent not behave rationally\n vacuumWorld = Environment(5,5)\n vacuumWorld.RandomizeWithoutWalls()\n vacuumWorld.SetWallsFromBinary(16864)\n vacuumWorld.Visualize()\n\n randomAgent = RandomAgent((0,0),EAST,vacuumWorld)\n randomAgent.Run()\n\n #randomAgent.PrintLog()\n\n vacuumWorld.Visualize()\n\n print(\"Note how unlikely the agent is to pass around the wall with a 1 tile gap\")\n elif exercise == '2.11d':\n print('2.11d')\n\n\n # trial 1\n #************************************************************************************************\n simpleWorld = Environment(5,5)\n stateWorld = Environment(5,5)\n \n simpleWorld.SetGridFromBinary(23821635)\n simpleWorld.SetWallsFromBinary(480)\n \n stateWorld.SetGridFromBinary(23821635)\n stateWorld.SetWallsFromBinary(480)\n\n print(\"TRIAL 1\\nBEFORE\")\n simpleWorld.Visualize()\n\n simpleAgent = SimpleReflexAgent((0,0),EAST,simpleWorld)\n stateAgent = StateBot2((0,0),EAST,stateWorld)\n\n simpleAgent.Run()\n stateAgent.Run()\n\n print(\"AFTER\")\n print(\"With state\")\n stateWorld.Visualize()\n \n print(\"Without state\")\n simpleWorld.Visualize()\n \n resultsWithoutState = simpleWorld.GetPerformanceMeasure()\n resultsWithState = stateWorld.GetPerformanceMeasure()\n\n # trial 2\n #************************************************************************************************\n simpleWorld = Environment(5,5)\n stateWorld = Environment(5,5)\n \n simpleWorld.SetGridFromBinary(23821635)\n simpleWorld.SetWallsFromBinary(145536)\n \n stateWorld.SetGridFromBinary(23821635)\n stateWorld.SetWallsFromBinary(145536)\n\n print(\"TRIAL 2\\nBEFORE\")\n simpleWorld.Visualize()\n\n simpleAgent = SimpleReflexAgent((0,0),EAST,simpleWorld)\n stateAgent = StateBot2((0,0),EAST,stateWorld)\n\n simpleAgent.Run()\n stateAgent.Run()\n\n print(\"AFTER\")\n print(\"With state\")\n stateWorld.Visualize()\n \n print(\"Without state\")\n simpleWorld.Visualize()\n \n resultsWithoutState = simpleWorld.GetPerformanceMeasure()\n resultsWithState = stateWorld.GetPerformanceMeasure()\n\n # trial 3\n #************************************************************************************************\n simpleWorld = Environment(5,5)\n stateWorld = Environment(5,5)\n \n simpleWorld.SetGridFromBinary(23821635)\n simpleWorld.SetWallsFromBinary(15360)\n \n stateWorld.SetGridFromBinary(23821635)\n stateWorld.SetWallsFromBinary(15360)\n\n print(\"TRIAL 3\\nBEFORE\")\n simpleWorld.Visualize()\n\n simpleAgent = SimpleReflexAgent((0,0),EAST,simpleWorld)\n stateAgent = StateBot2((0,0),EAST,stateWorld)\n\n simpleAgent.Run()\n stateAgent.Run()\n\n print(\"AFTER\")\n print(\"With state\")\n stateWorld.Visualize()\n \n print(\"Without state\")\n simpleWorld.Visualize()\n \n resultsWithoutState = simpleWorld.GetPerformanceMeasure()\n resultsWithState = stateWorld.GetPerformanceMeasure()\n\n\n elif exercise == '2.12a':\n print('2.12a')\n vacuumWorld = Environment(envWidth,envHeight)\n vacuumWorld.RandomizeWithoutWalls()\n print(\"Murphy's Law\")\n vacuumWorld.Visualize()\n agent = MurphyBot((0,0),EAST,vacuumWorld)\n agent.Run()\n vacuumWorld.Visualize()\n\n vacuumWorld = Environment(envWidth,envHeight)\n vacuumWorld.RandomizeWithoutWalls()\n print(\"With a faulty dirt sensor\")\n vacuumWorld.Visualize()\n agent = FaultyBot((0,0),EAST,vacuumWorld)\n agent.Run()\n vacuumWorld.Visualize()\n\n elif exercise == '2.12b':\n print('2.12b')\n\n vacuumWorld = SmallChildren(envWidth,envHeight)\n vacuumWorld.RandomizeWithoutWalls()\n print(\"Small Children\")\n vacuumWorld.Visualize()\n agent = BumpBot((0,0),EAST,vacuumWorld)\n agent.Run()\n vacuumWorld.Visualize()\n\n vacuumWorld = SmallChildren(envWidth,envHeight)\n vacuumWorld.RandomizeWithoutWalls()\n print(\"Using an algorithm that passes each tile exactly once:\")\n vacuumWorld.Visualize()\n agent = ZigZagVacuum((0,0),EAST,vacuumWorld)\n agent.Run()\n vacuumWorld.Visualize()\n print(\"Hypothetically could clean the room after infinite passes\")\n\n print(\"done\")\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":13298,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"188266598","text":"import pandas as pd\nimport numpy as np\nimport neurolab as nl\nimport matplotlib.pyplot as plt\nfrom matplotlib.pyplot import subplots, show\n\nfrom firebase import firebase\nfirebase = firebase.FirebaseApplication('https://thesis-10ad5.firebaseio.com', None)\n\n\nplt.rcParams[\"font.family\"] = \"Times New Roman\"\nplt.rcParams[\"font.size\"]=10.0\nplt.rcParams['figure.figsize'] = 5, 5\n\ndef get_train_data(inp_df, max_total_load, max_zonal_load, max_zonal_price):\n input = np.asarray([[inp_df.iloc[ind,0]/max_total_load, inp_df.iloc[ind,1]/max_zonal_load ] for ind in inp_df.index])\n target_1 = np.asarray([[inp_df['Zonal Price'].mean()/max_zonal_price, inp_df['Zonal Price'].std()/max_zonal_price] for ind in inp_df.index])\n target_2 = np.asarray([[inp_df['Zonal Price'].quantile(0.05)/max_zonal_price, inp_df['Zonal Price'].quantile(0.95)/max_zonal_price] for ind in inp_df.index])\n return input, target_1, target_2\n\ndef get_test_data(array1, max_total_load, max_zonal_load):\n output = np.asarray([[array1[0]/max_total_load, array1[1]/max_zonal_load]])\n return output\n\ndef get_sim_data(net, inp, max_zonal_price):\n temp_output = net.sim(inp)\n output1=[item[0]*max_zonal_price for item in temp_output]\n output2=[item[1]*max_zonal_price for item in temp_output]\n return output1, output2\n\ndef PI_consutruct_mean_var(var_mean, var_std):\n upper=[var_mean[ind]+1.96*var_std[ind] for ind in list(np.arange(0,len(var_mean),1))]\n lower=[var_mean[ind]-1.96*var_std[ind] for ind in list(np.arange(0,len(var_mean),1))]\n return upper, lower\n\n\ndef PI_construct_quantile(lower_bound, upper_bound):\n upper = upper_bound\n lower = lower_bound\n return lower, upper\n\ndef cal_PI(upper, lower, actual_price):\n number_of_price=len(actual_price)\n temp_df=pd.DataFrame()\n temp_df['upper']=upper\n temp_df['lower']=lower\n temp_df['actual price']=actual_price\n temp_df['actual price < upper']=[1 if temp_df.iloc[ind, 0] >= temp_df.iloc[ind, 2] else 0 for ind in list(temp_df.index)]\n temp_df['actual price > lower']=[1 if temp_df.iloc[ind, 1] <= temp_df.iloc[ind, 2] else 0 for ind in list(temp_df.index)]\n temp_df['within boundary']=[1 if temp_df.iloc[ind,3] == temp_df.iloc[ind,4] else 0 for ind in list(temp_df.index)]\n # print(temp_df)\n PICP=temp_df['within boundary'].sum()/number_of_price\n temp_df['upper - lower']=temp_df['upper']-temp_df['lower']\n MPIW = temp_df['upper - lower'].sum()/number_of_price\n NPIW = MPIW/(temp_df['actual price'].max()-temp_df['actual price'].min())\n # print(temp_df['actual price'].max()-temp_df['actual price'].min())\n return temp_df, PICP, MPIW, NPIW\n\n\n\ndef train_NN(inp_df, input, target):\n\n net = nl.net.newff([[0,1], [0,1]], [5, 2])\n err = net.train(input, target, epochs=500, show=100, goal=0.02)\n output = net.sim(input)\n return output\n\n\n\ndf1=pd.read_csv('GEFCom2014_Data/Price/Task 1/Task1_P.csv')\n\n#separate timestamp column into two date and time column\ndf1['Date'], df1['Time'] = df1['timestamp'].str.split(' ', 1).str\ndf1['Hour'] = np.asarray([int(str.replace(':00','')) for str in list(df1['Time'])])\ndf1=df1.drop(columns=['timestamp','Time','ZONEID'])\n#add column day of week {'Saturday':0, 'Sunday':1, ..., 'Friday':6} when 01012011 is Saturday\ndf1['day_type']=np.asarray([item//24%7 for item in list(np.arange(0,len(df1.index),1))])\n\n#add column holiday (0 is non-holiday, 1 is holiday)\n\nset_holiday=set(['01012011', '16012011', '20022011', '29052011', '04072011', '04092011', '09102011', '11112011', '25112011',\n '25122011', '31122011', '01012012', '15012012', '19022012', '27052012', '04072012', '02092012', '14102012',\n '11112012', '22112012', '25122012', '31122012', '01012013', '20012013', '17022013', '26052013', '04072013',\n '01092013', '13102013', '11112013', '28112013', '25122013', '31122013'])\n#set weekend (day_type: Sat and Sun)\n# Saturday ==0\n# Sunday ==1\nset_weekend = set(df1.iloc[list(set(np.where(df1['day_type']==0)[0]).union(set(np.where(df1['day_type']==1)[0]))),5])\ndf1['holiday']=[1 if date in list(set_holiday.union(set_weekend)) else 0 for date in df1['Date']]\n\nforecast_df = df1[pd.isnull(df1).any(axis=1)].copy()\nforecast_df= forecast_df.reset_index(drop=True)\n\ndf1=df1.dropna()\n\nactual_price=[34.02, 28.26,24.82,22.9,21.59,21.33,21.55,22.49,26.25,28.95,32.17,32.44,34.25,35.97,38.05,37.77,40.87,37.51,35.44,35.28,37.55,38.41,35,32.31]\n\n#model-I\nmodel=1\n\n\n\n\n#model-II\nmodel=2\n\n\n\n\n\n#model-III\n#mean + std method\nlist_upper_01=[]\nlist_lower_01=[]\nfor hour in list(np.arange(0,24,1)):\n print(hour)\n df2=df1.drop(list(set(np.where(df1['Hour']!=hour)[0]))).copy() #drop column which day_type not equal to 0 or select only day_type =0\n df2=df2.reset_index(drop=True)\n df2=df2.drop(list(set(np.where(df2['day_type'] != 0)[0])))\n df2=df2.reset_index(drop=True)\n\n input1, target1, target2 = get_train_data(df2, df2['Forecasted Total Load'].max(),\n df2['Forecasted Zonal Load'].max(), df2['Zonal Price'].max())\n net = nl.net.newff([[0, 1], [0, 1]], [5, 2])\n err = net.train(input1, target1, epochs=500, show=10, goal=0.02)\n\n input2 = get_test_data(np.asarray(forecast_df.loc[hour]), df2['Forecasted Total Load'].max(), df2['Forecasted Zonal Load'].max())\n var_mean, var_std = get_sim_data(net, input2, df2['Zonal Price'].max())\n upper, lower = PI_consutruct_mean_var(var_mean, var_std)\n list_upper_01.append(upper[0])\n list_lower_01.append(lower[0])\n\nPI_df_01, PICP, MPIW, NPIW = cal_PI(list_upper_01, list_lower_01, actual_price)\nprint(PICP)\nprint(MPIW)\nprint(NPIW)\n\nfirebase.put('GEFcom2014/Task_1/results/model-III', 'mean_std',{'lower_bound':list_lower_01, 'upper_bound':list_upper_01, 'PICP':PICP, 'MPIW':MPIW, 'NPIW':NPIW})\nfirebase.put('GEFcom2014/Task_1/', 'actual_price', actual_price)\n\nfig, ax = subplots()\nax.plot(list(np.arange(0, 24, 1)), list_upper_01, '-r*', alpha=0.8, label='upper')\nax.plot(list(np.arange(0, 24, 1)), actual_price, '-b*', alpha=0.8, label='actual price')\nax.plot(list(np.arange(0, 24, 1)), list_lower_01, '-r*', alpha=0.8, label='lower')\nax.fill_between(list(np.arange(0, 24, 1)), list_upper_01, list_lower_01, facecolor='magenta', alpha=0.05)\nax.legend(loc='best')\nax.set_xlabel('Hours')\nax.set_ylabel('$/MWhr')\nplt.show()\n\n#quantile method\nlist_upper_02=[]\nlist_lower_02=[]\nfor hour in list(np.arange(0,24,1)):\n print(hour)\n df2=df1.drop(list(set(np.where(df1['Hour']!=hour)[0]))).copy() #drop column which day_type not equal to 0 or select only day_type =0\n df2=df2.reset_index(drop=True)\n df2=df2.drop(list(set(np.where(df2['day_type'] != 0)[0])))\n df2=df2.reset_index(drop=True)\n input1, target1, target2 = get_train_data(df2, df2['Forecasted Total Load'].max(),\n df2['Forecasted Zonal Load'].max(), df2['Zonal Price'].max())\n net = nl.net.newff([[0, 1], [0, 1]], [5, 2])\n err = net.train(input1, target2, epochs=500, show=10, goal=0.02)\n\n input2 = get_test_data(np.asarray(forecast_df.loc[hour]), df2['Forecasted Total Load'].max(), df2['Forecasted Zonal Load'].max())\n lower_bound, upper_bound = get_sim_data(net, input2, df2['Zonal Price'].max())\n lower, upper = PI_construct_quantile(lower_bound, upper_bound)\n list_upper_02.append(upper[0])\n list_lower_02.append(lower[0])\n\nPI_df_02, PICP, MPIW, NPIW = cal_PI(list_upper_02, list_lower_02, actual_price)\nprint(PICP)\nprint(MPIW)\nprint(NPIW)\n\nfirebase.put('GEFcom2014/Task_1/results/model-III', 'QR_05',{'lower_bound':list_lower_02, 'upper_bound':list_upper_02, 'PICP':PICP, 'MPIW':MPIW, 'NPIW':NPIW})\nfirebase.put('GEFcom2014/Task_1/', 'actual_price', actual_price)\n\n# plt.figure()\n# plt.plot(list(np.arange(0, 24, 1)), list_upper_02, '--r', alpha=0.8, label='upper')\n# plt.plot(list(np.arange(0, 24, 1)), actual_price, '-b', alpha=0.8, label='actual price')\n# plt.plot(list(np.arange(0, 24, 1)), list_lower_02, '--r', alpha=0.8, label='lower')\n# plt.fill_between(list(np.arange(0, 24, 1)), list_upper_02, list_lower_02, facecolor='magenta', alpha=0.05)\n# plt.legend(loc='best')\n# plt.xlabel('sdf')\n# plt.show()\n\nfig, ax = subplots()\nax.plot(list(np.arange(0, 24, 1)), list_upper_02, '-r*', alpha=0.8, label='upper')\nax.plot(list(np.arange(0, 24, 1)), actual_price, '-b*', alpha=0.8, label='actual price')\nax.plot(list(np.arange(0, 24, 1)), list_lower_02, '-r*', alpha=0.8, label='lower')\nax.fill_between(list(np.arange(0, 24, 1)), list_upper_02, list_lower_02, facecolor='magenta', alpha=0.05)\nax.legend(loc='best')\nax.set_xlabel('Hours')\nax.set_ylabel('$/MWhr')\nplt.show()\n\n\nprint('finish')","sub_path":"code/GEFcom2014_04.py","file_name":"GEFcom2014_04.py","file_ext":"py","file_size_in_byte":8625,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"87619989","text":"import pygame\nfrom Text_Object import *\nfrom Globals import *\n\nclass GameStatsTracker (object):\n\tSTATE_PAUSED = 0\n\tSTATE_PLAYING = 1\n\tSTATE_GAMEOVER = 2\n\tdef __init__(\tself, \n\t\t\t\t\tstarting_beer_level, \n\t\t\t\t\tstarting_security_level, \n\t\t\t\t\tstarting_points_amount,\n\t\t\t\t\tmax_points_amount, \n\t\t\t\t\ttext_object_argument_list,\n\t\t\t\t\tcolorkey_color,\n\t\t\t\t\tbeer_bar_color,\n\t\t\t\t\tsecurity_bar_color\n\t\t\t\t\t):\n\t\t\"\"\"\n\t\ttext object argument list is:\n\t\t\tsurface\n\t\t\ttext for display\n\t\t\tduration\n\t\t\ttextObject type\n\t\t\tall textObject types\n\t\t\tfont\n\t\t\tfont color\n\t\t\tfont_size\n\t\t\tposition\n\t\t\tbackground_color\n\t\t\tcolorkey for alpha\n\t\t\ttextObject argument vector\n\t\t\"\"\"\n\t\tself.current_game_state = self.STATE_PLAYING\n\n\t\tself.t_o_a_l = text_object_argument_list\n\t\tself.update_necessary = False\n\n\t\tself.points_pos = self.t_o_a_l[8]\n\t\tself.beer_level_pos = (self.points_pos[0]-100, self.points_pos[1] - 50)\n\t\tself.security_level_pos = (self.points_pos[0]-100, self.points_pos[1] - 100)\n\n\t\tself.beer_level = starting_beer_level\n\t\tself.security_level = starting_security_level\n\t\tself.points_total = starting_points_amount\n\t\tself.bar_widths = 15\n\t\t\n\t\tself.security_bar_surface = pygame.Surface((100, self.bar_widths)).convert()\n\t\tself.security_bar_surface.set_alpha(200)\n\t\tself.security_bar_surface.set_colorkey(colorkey_color)\n\t\tself.security_bar_surface.fill(colorkey_color)\n\t\tself.security_bar_surface.fill(security_bar_color, ((0, 0), (self.security_level, self.bar_widths)))\n\t\tself.beer_bar_surface = pygame.Surface((100, self.bar_widths)).convert()\n\t\tself.beer_bar_surface.set_alpha(200)\n\t\tself.beer_bar_surface.set_colorkey(colorkey_color)\n\t\tself.beer_bar_surface.fill(colorkey_color)\t\t\n\t\tself.beer_bar_surface.fill(beer_bar_color, ((0, 0), (self.beer_level, self.bar_widths)))\n\n\t\tself.beer_counter = 0\n\t\tself.beer_counter_limit = 10\n\t\tself.security_counter = 0\n\t\tself.security_counter_limit = 30\n\t\t\n\t\tself.max_points_amount = max_points_amount\n\t\tself.points_image_sprite = textObject(\tself.t_o_a_l[0],\n\t\t\t\t\t\t\t\t\t\t\t\tself.t_o_a_l[1],\n\t\t\t\t\t\t\t\t\t\t\t\tself.t_o_a_l[2],\n\t\t\t\t\t\t\t\t\t\t\t\tself.t_o_a_l[3],\n\t\t\t\t\t\t\t\t\t\t\t\tself.t_o_a_l[4],\n\t\t\t\t\t\t\t\t\t\t\t\tself.t_o_a_l[5],\n\t\t\t\t\t\t\t\t\t\t\t\tself.t_o_a_l[6],\n\t\t\t\t\t\t\t\t\t\t\t\tself.t_o_a_l[7],\n\t\t\t\t\t\t\t\t\t\t\t\tself.t_o_a_l[8],\n\t\t\t\t\t\t\t\t\t\t\t\tself.t_o_a_l[9],\n\t\t\t\t\t\t\t\t\t\t\t\tself.t_o_a_l[10],\n\t\t\t\t\t\t\t\t\t\t\t\tself.t_o_a_l[11])\n\t\tself.stats_sprite_group = pygame.sprite.GroupSingle(self.points_image_sprite)\n\n\t\tself.colorkey_color = colorkey_color\n\t\tself.beer_bar_color = beer_bar_color\n\t\tself.security_bar_color = security_bar_color\n\n\tdef update(self, beer_adjustment, security_adjustment, points_adjustment):\n\t\t\n\t\tif not beer_adjustment:\n\t\t\tself.beer_counter += 1\n\t\t\tif self.beer_counter > self.beer_counter_limit:\n\t\t\t\tself.beer_counter = 0\n\t\t\t\tbeer_adjustment -= 1\n\t\tif beer_adjustment < 0:\n\t\t\tself.beer_bar_surface.fill(self.colorkey_color)\n\t\tif beer_adjustment:\n\t\t\tself.beer_level += beer_adjustment\n\t\t\tif self.beer_level > 100: self.beer_level = 100\n\t\t\tif self.beer_level < 0: self.beer_level = 0\t\t\t\t\t\t\t\t\n\t\t\tself.beer_bar_surface.fill(self.beer_bar_color, ((0, 0), (self.beer_level, self.bar_widths)))\n\t\t\n\t\tif not security_adjustment:\n\t\t\tself.security_counter += 1\n\t\t\tif self.security_counter > self.security_counter_limit:\n\t\t\t\tself.security_counter = 0\n\t\t\t\tsecurity_adjustment -= 1\n\t\tif security_adjustment < 0:\n\t\t\tself.security_bar_surface.fill(self.colorkey_color)\n\t\tif security_adjustment: \n\t\t\tself.security_level += security_adjustment\n\t\t\tif self.security_level > 100: self.security_level = 100\n\t\t\tif self.security_level < 0: self.security_level = 0\n\t\t\tself.security_bar_surface.fill(self.security_bar_color, ((0, 0), (self.security_level, self.bar_widths)))\n\t\t\n\t\tif points_adjustment: \n\t\t\tself.points_total += points_adjustment\n\t\t\tself.update_necessary = True\n\n\t\tif self.beer_level == 0: self.current_game_state = self.STATE_GAMEOVER\n\n\tdef draw(self):\n\t\t\n\t\tif self.update_necessary:\n\t\t\tself.points_image_sprite = textObject(\tself.t_o_a_l[0],\n\t\t\t\t\t\t\t\t\t\t\t\t\tstr(self.points_total),\n\t\t\t\t\t\t\t\t\t\t\t\t\tself.t_o_a_l[2],\n\t\t\t\t\t\t\t\t\t\t\t\t\tself.t_o_a_l[3],\n\t\t\t\t\t\t\t\t\t\t\t\t\tself.t_o_a_l[4],\n\t\t\t\t\t\t\t\t\t\t\t\t\tself.t_o_a_l[5],\n\t\t\t\t\t\t\t\t\t\t\t\t\tself.t_o_a_l[6],\n\t\t\t\t\t\t\t\t\t\t\t\t\tself.t_o_a_l[7],\n\t\t\t\t\t\t\t\t\t\t\t\t\tself.t_o_a_l[8],\n\t\t\t\t\t\t\t\t\t\t\t\t\tself.t_o_a_l[9],\n\t\t\t\t\t\t\t\t\t\t\t\t\tself.t_o_a_l[10],\n\t\t\t\t\t\t\t\t\t\t\t\t\tself.t_o_a_l[11]) \n\t\t\tself.stats_sprite_group.sprite = self.points_image_sprite\n\t\t\tself.update_necessary = False\n\n\t\tself.stats_sprite_group.draw(self.t_o_a_l[0])\n\t\tself.t_o_a_l[0].blit(self.security_bar_surface, self.security_level_pos)\n\t\tself.t_o_a_l[0].blit(self.beer_bar_surface, self.beer_level_pos)\n\n\tdef getTotals(self):\n\t\treturn self.beer_level, self.security_level, self.points_total","sub_path":"GameStatsTracker.py","file_name":"GameStatsTracker.py","file_ext":"py","file_size_in_byte":4635,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"155902172","text":"total = mulher = homem = 0\ncondicao = 1\n\nwhile condicao != 0:\n sexo = input(\"Digite um Sexo (M ou F) ou pressione apenas enter para revisar o total: \")\n sexo = sexo.upper()\n if sexo in \"MF\":\n total += 1\n\n if sexo == \"M\":\n homem += 1\n elif sexo == \"F\":\n mulher += 1\n else:\n print('O total de pessoas do sexo feminino : {}'.format(mulher))\n print('O total de pessoas do sexo masculino : {}'.format(homem))\n print('O total de pessoas do inteiro grupo : {}'.format(total-1))\n exit()\n\n else:\n print('Opção Inválida')\n","sub_path":"estruturaderepetiçao.py","file_name":"estruturaderepetiçao.py","file_ext":"py","file_size_in_byte":630,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"345662145","text":"#!/usr/bin/python3\r\nfrom __future__ import print_function\r\nfrom __future__ import division\r\n\r\n\r\nimport numpy as np\r\nimport pandas as pd\r\nimport csv\r\nimport sys\r\nimport datetime as dt\r\nimport joblib\r\nimport pickle\r\n\r\nimport sales_regression_cfg as cfg\r\n\r\nfrom dateutil.relativedelta import relativedelta\r\nfrom sklearn.preprocessing import LabelEncoder, OneHotEncoder, OrdinalEncoder\r\n\r\n\r\n#import matplotlib.pyplot as plt\r\n\r\nimport timeit\r\n\r\nfrom collections import Counter,OrderedDict\r\n \r\n\r\n\r\ndef save_model(model,filename):\r\n #filename = 'finalized_model.sav'\r\n # joblib.dump(regressor,open(\"SGDRegressorNS.p\",\"wb\"))\r\n\r\n joblib.dump(model, filename)\r\n return \r\n\r\ndef load_model(filename):\r\n # some time later...\r\n\r\n # load the model from disk\r\n\r\n loaded_model = joblib.load(filename)\r\n return loaded_model\r\n\r\n\r\ndef main():\r\n\r\n\r\n f=open(cfg.outfile_predict,\"w\")\r\n\r\n print(\"\\n\\nData Regression sales prediction tool - By Anthony Paech 12/12/19\\n\")\r\n# print(\"Loading\",sys.argv[1],\"into pandas for processing.....\")\r\n print(\"Loading\",cfg.datasetworking,\"into pandas for processing.....\")\r\n\r\n print(\"Results Reported to\",cfg.outfile_predict)\r\n\r\n f.write(\"\\n\\nData Regression sales prediction tool - By Anthony Paech 5/12/19\\n\\n\")\r\n # f.write(\"Loading \"+sys.argv[1]+\" into pandas for processing.....\\n\")\r\n f.write(\"Loading \"+cfg.datasetworking+\" into pandas for processing.....\\n\")\r\n\r\n f.write(\"Results Reported to \"+cfg.outfile_predict+\"\\n\")\r\n\r\n## df=read_excel(sys.argv[1],-1) # -1 means all rows\r\n## if df.empty:\r\n## print(sys.argv[1],\"Not found.\")\r\n## sys.exit()\r\n\r\n\r\n\r\n\r\n######################################################3\r\n\r\n\r\n # dbd=pd.DataFrame()\r\n \r\n dbd=pd.read_csv(cfg.datasetworking,header=0)\r\n\r\n dbd.drop(columns=[\"product\",\"date\",\"last_order_upspd\",\"prod_scaler\",\"day_delta\"],inplace=True)\r\n \r\n\r\n binmask=(dbd[\"bin_no\"]>=cfg.startbin)\r\n dbd2=dbd[binmask].copy(deep=True)\r\n \r\n dbd2.sort_values(by=[\"bin_no\",\"code_encode\",\"prod_encode\"],axis=0,ascending=[True,True,True],inplace=True)\r\n\r\n # print(df.head(10))\r\n print(\"data import shape:\",dbd2.shape)\r\n f.write(\"data import shape:\"+str(dbd2.shape)+\"\\n\")\r\n\r\n\r\n\r\n\r\n print(\"Prepare data....\")\r\n dbd2.dropna(inplace=True)\r\n\r\n\r\n\r\n #df['Date']= pd.to_datetime(df['Date'])\r\n # dbd['date'] = pd.to_datetime(dbd['date']) #pd.date().map(dt.datetime.toordinal).astype(int)\r\n # dbd['date_encode'] = dbd['date'].map(dt.datetime.toordinal).astype(int)\r\n\r\n # dbd['day_delta'] = (dbd.date-dbd.date.min()).dt.days.astype(int)\r\n # dbd.drop(columns=[\"date\"],inplace=True)\r\n\r\n # encode \"code\", \"product\"\r\n \r\n## label_encoder=LabelEncoder()\r\n## dbd[\"prod_encode\"] = label_encoder.fit_transform(dbd[\"product\"].to_numpy())\r\n## joblib.dump(label_encoder,open(cfg.product_encode_save,\"wb\"))\r\n## dbd.drop(columns=[\"product\"],inplace=True)\r\n##\r\n## label_encoder=LabelEncoder()\r\n## dbd[\"code_encode\"] = label_encoder.fit_transform(dbd[\"code\"].to_numpy())\r\n## joblib.dump(label_encoder,open(cfg.code_encode_save,\"wb\"))\r\n## dbd.drop(columns=[\"code\"],inplace=True)\r\n##\r\n# dbd.drop(columns=[\"day_delta\",\"productgroup\"],inplace=True)\r\n##\r\n## # print(\"dbd=\\n\",dbd)\r\n##\r\n qtyarray=dbd2.qty.to_numpy()\r\n binarray=dbd2.bin_no.to_numpy()\r\n prodgrouparray=dbd2.productgroup.to_numpy()\r\n## print(\"qtyarray.shape=\",qtyarray.shape,\"\\n\",qtyarray)\r\n## \r\n dbd2.drop(columns=[\"qty\",\"bin_no\",\"productgroup\"],inplace=True)\r\n print(dbd2.columns)\r\n\r\n\r\n##########################################################\r\n# create predictions\r\n\r\n\r\n regressor_best=joblib.load(open(cfg.RFR_save,\"rb\"))\r\n print(\"RFR model loaded from:\",cfg.RFR_save)\r\n f.write(\"RFR model loaded from:\"+str(cfg.RFR_save)+\"\\n\")\r\n# print(dbd)\r\n # Xr_df_cols=Xr_df[:,:4]\r\n# print(dbd.columns)\r\n predictions = regressor_best.predict(dbd2)\r\n\r\n dbd2[\"predict_qty\"]=predictions.reshape(-1,1).astype(float).round(0)\r\n # dbd2=pd.DataFrame(np.hstack((dbd.to_numpy(),predictions.reshape(-1,1))),columns=[\"day_order_delta\",\"code\",\"product\",\"date\",\"predict_qty\"])\r\n\r\n\r\n\r\n########################################################\r\n\r\n\r\n dbd2[\"qty\"]=qtyarray.tolist()\r\n dbd2[\"bin_no\"]=binarray.tolist()\r\n dbd2[\"productgroup\"]=prodgrouparray.tolist()\r\n\r\n\r\n #j=pd.DataFrame(dbd,columns=[\"code_encode\",\"prod_encode\",\"date_encode\",\"day_order_delta\"])\r\n encoder=joblib.load(open(cfg.code_encode_save,\"rb\"))\r\n dbd2[\"code\"]=encoder.inverse_transform(dbd2[\"code_encode\"].astype(int).to_numpy())\r\n dbd2.drop(columns=[\"code_encode\"],inplace=True)\r\n\r\n\r\n encoder=joblib.load(open(cfg.product_encode_save,\"rb\"))\r\n dbd2[\"product\"]=encoder.inverse_transform(dbd2[\"prod_encode\"].astype(int).to_numpy())\r\n dbd2.drop(columns=[\"prod_encode\"],inplace=True)\r\n\r\n# dbd2[\"date\"] = dbd2.date_encode.astype(int).map(dt.datetime.fromordinal)\r\n\r\n\r\n dbd2[\"date\"]=(pd.to_datetime(\"2/2/20\")+pd.to_timedelta((dbd2[\"bin_no\"]-cfg.startbin)*7,unit=\"d\")).dt.strftime('%Y/%m/%d')\r\n\r\n dbd2.drop(columns=[\"date_encode\",\"bin_no\",\"scaled_upspd\",\"day_order_delta\"],inplace=True)\r\n\r\n \r\n dbd2= dbd2[[\"date\",\"code\",\"productgroup\",\"product\",\"qty\",\"predict_qty\"]]\r\n # dbd2= dbd2[[\"date\",\"code\",\"product\",\"predict_qty\"]]\r\n \r\n dbd2.sort_values(by=[\"date\",\"code\",\"productgroup\",\"product\"],axis=0,ascending=[True,True,True,True],inplace=True)\r\n # dbd2[\"predict_qty\"]=dbd2[\"predict_qty\"].astype(float).round(0) #{\"predict_qty\" : 1})\r\n dbd2[\"predict_qty_ctnsof8\"]=(dbd2[\"predict_qty\"]/8).astype(float).round(0)\r\n # print(k)\r\n #print(\"Sales Qty Predictions=\\n\",dbd2[0:100].to_string())\r\n\r\n\r\n print(dbd2)\r\n print(dbd2.columns)\r\n\r\n dbd2.to_excel(\"dbd2.xlsx\")\r\n \r\n \r\n#############################################################################3\r\n # create a pivot table of code, product, day delta and predicted qty and export back to excel\r\n\r\n table = pd.pivot_table(dbd2, values='predict_qty', index=['productgroup','product', 'date'],columns=['code'], aggfunc=np.sum, margins=True, fill_value=0) #, observed=True)\r\n # print(\"\\ntable=\\n\",table.head(5))\r\n f.write(\"\\n\\n\"+table.to_string())\r\n\r\n table2 = pd.pivot_table(dbd2, values='predict_qty', index=['code', 'date'],columns=['productgroup','product'], aggfunc=np.sum, margins=True, fill_value=0)\r\n # print(\"\\ntable2=\\n\",table2.head(5))\r\n f.write(\"\\n\\n\"+table2.to_string())\r\n\r\n table3 = pd.pivot_table(dbd2, values='predict_qty', index=['date',\"code\"],columns=['productgroup','product'], aggfunc=np.sum, margins=True, fill_value=0)\r\n # print(\"\\ntable3=\\n\",table3.head(5))\r\n f.write(\"\\n\\n\"+table3.to_string())\r\n\r\n table4 = pd.pivot_table(dbd2, values='predict_qty', index=['date','productgroup',\"product\"],columns=['code'], aggfunc=np.sum, margins=True, fill_value=0)\r\n # print(\"\\ntable4=\\n\",table4.head(5))\r\n f.write(\"\\n\\n\"+table4.to_string())\r\n\r\n table5 = pd.pivot_table(dbd2, values='predict_qty_ctnsof8', index=['productgroup','product', 'date'],columns=['code'], aggfunc=np.sum, margins=True, fill_value=0)\r\n # print(\"\\ntable5=\\n\",table5.head(5))\r\n f.write(\"\\n\\n\"+table5.to_string())\r\n\r\n table6 = pd.pivot_table(dbd2, values='predict_qty_ctnsof8', index=['code', 'date'],columns=['productgroup','product'], aggfunc=np.sum, margins=True, fill_value=0)\r\n # print(\"\\ntable6=\\n\",table6.head(5))\r\n f.write(\"\\n\\n\"+table6.to_string())\r\n\r\n table7 = pd.pivot_table(dbd2, values='predict_qty_ctnsof8', index=['date',\"code\"],columns=['productgroup','product'], aggfunc=np.sum, margins=True, fill_value=0)\r\n # print(\"\\ntable7=\\n\",table7.head(5))\r\n f.write(\"\\n\\n\"+table7.to_string())\r\n\r\n table8 = pd.pivot_table(dbd2, values='predict_qty_ctnsof8', index=['date','productgroup',\"product\"],columns=['code'], aggfunc=np.sum, margins=True, fill_value=0)\r\n # print(\"\\ntable8=\\n\",table8.head(5))\r\n f.write(\"\\n\\n\"+table8.to_string())\r\n\r\n\r\n with pd.ExcelWriter(cfg.outxlsfile) as writer: # mode=\"a\" for append\r\n table.to_excel(writer,sheet_name=\"Units1\")\r\n table2.to_excel(writer,sheet_name=\"Units2\")\r\n table3.to_excel(writer,sheet_name=\"Units3\")\r\n table4.to_excel(writer,sheet_name=\"Units4\")\r\n table5.to_excel(writer,sheet_name=\"CtnsOfEight5\")\r\n table6.to_excel(writer,sheet_name=\"CtnsOfEight6\")\r\n table7.to_excel(writer,sheet_name=\"CtnsOfEight7\")\r\n table8.to_excel(writer,sheet_name=\"CtnsOfEight8\")\r\n\r\n print(\"\\n\\nSales Prediction results written to spreadsheet\",cfg.outxlsfile,\"\\n\\n\")\r\n f.write(\"\\n\\nSales Prediction results written to spreadsheet:\"+str(cfg.outxlsfile)+\"\\n\\n\")\r\n \r\n \r\n##############################################################################\r\n\r\n\r\n f.close()\r\n return\r\n\r\n \r\n\r\nif __name__ == '__main__':\r\n main()\r\n","sub_path":"sales_predict_v1-02.py","file_name":"sales_predict_v1-02.py","file_ext":"py","file_size_in_byte":8818,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"414434786","text":"#!/usr/bin/env python3\n\nimport rospy\n\nfrom std_msgs.msg import UInt16MultiArray\n\nimport sys\nfrom time import sleep\n\nfrom pysphero.core import Sphero\nfrom pysphero.driving import Direction\n\nglobal mac_address\nspeed = 0\nheading = 0\n\ndef send():\n with Sphero(mac_address=mac_address) as sphero:\n sphero.power.wake()\n\n sleep(2)\n print(f\"Send drive with speed {speed} and heading {heading}\")\n\n sphero.driving.drive_with_heading(speed, heading, Direction.forward)\n\n sphero.power.enter_soft_sleep()\n\ndef callback(new_data):\n global speed \n global heading\n speed = new_data.data[0]\n heading = new_data.data[1]\n send()\n\nif __name__ == \"__main__\":\n # Initilize the node\n rospy.init_node('sphero', log_level=rospy.DEBUG)\n\n name = rospy.get_name()\n # Get address from parameters\n try:\n mac_address = rospy.get_param('%s/address'%name)\n except KeyError:\n print(\"Address not provided\")\n sys.exit()\n \n # Setup subscriber\n sub = rospy.Subscriber('/analysis',UInt16MultiArray,callback)\n\n print(\"sphero setup\")\n \n # Turn control over to ROS\n rospy.spin()\n","sub_path":"ws/src/sphero_communication/src/sphero.py","file_name":"sphero.py","file_ext":"py","file_size_in_byte":1150,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"553184607","text":"import numpy as np\nimport pylab as plt\nimport cv2\nimport math\nimport cnvmats\nimport time\nfrom numpy import square as sq\nfrom progress import progress\nfrom multiprocessing import Pool\nfrom scipy.sparse.linalg import eigsh\n\n#! ======\n#! exp117\n#! ======\n#!\n#! **Question:**\n#!\n#! Is the objective function ``y' X inv(X'X) X' y`` really independent of ``norm2(x)``?\n#!\n#! **Answer:**\n#!\n#! Yes.\n\n \nclass InputData:\n \"\"\"Defines interface that handles input data.\n \"\"\"\n\n def __init__(self, y, sa):\n self.y = y\n self.sa = sa\n \n @property\n def x0(self):\n if not hasattr(self, '__x0'):\n y_mean = np.mean(self.y, axis=0)\n A = blurmat(self.sa, y_mean.shape, 'full')\n self.__x0 = A.dot(y_mean)\n self.sx = self.__x0.shape\n return self.__x0\n\n @property\n def sx(self):\n return np.add(self.sy, self.sa) - 1\n\n @property\n def sy(self):\n return self.y[0].shape\n\n def generate_eigfaces(self, m):\n Y = np.zeros((np.prod(self.y[0].shape), len(self.y)))\n for i in range(len(self.y)):\n Y[:,i] = self.y[i].flatten()\n YY_eigvalues, YY_eigfaces = compute_eigs(Y.dot(Y.T) / len(self.y), m)\n y = [None] * m\n for i in range(m):\n y[i] = math.sqrt(YY_eigvalues[i]) * YY_eigfaces[:,i].reshape(self.sy)\n return InputData(y, self.sa)\n\n\nclass ToyData(InputData):\n \"\"\"Specializes `InputData` s.t. the input data is computed from ground truth.\n \"\"\"\n\n def __init__(self, x_true, y, a):\n InputData.__init__(self, y, a[0].shape)\n self.x_true = x_true\n self.sx = x_true.shape\n self.a = a\n\n @staticmethod\n def from_ground_truth(filename, n, sa, noise_s2, seed=10, scale=4):\n np.random.seed(seed);\n x_true = cv2.imread(filename, 0)\n sx = tuple(np.divide(x_true.shape, scale))\n x_true = cv2.resize(x_true, sx)\n y, a = [None]*n, [None]*n\n def noisy(x): return x + math.sqrt(noise_s2) * np.random.randn(*x.shape)\n for i in range(n):\n msg = 'creating %d input images from %s' % (n, filename)\n progress(msg, 0, n-1, i)\n a[i] = np.random.random(sa)\n a[i] = a[i] / a[i].sum()\n A = cnvmats.cnvmat(a[i], x_true.shape, 'valid')\n y[i] = noisy(A.dot(x_true))\n return ToyData(x_true, y, a)\n \n def rms(self, x):\n return np.sqrt(sq(x - self.x_true).sum() / np.prod(x.shape))\n\n def generate_eigfaces(self, m):\n data = InputData.generate_eigfaces(self, m)\n return ToyData(self.x_true, data.y, self.a)\n\n\ndef compute_eigs(A, n):\n d,V = eigsh(A, n)\n idx = d.argsort()[::-1]\n eigvalues = [d[idx[i]].real for i in range(len(idx))]\n eigfaces = V[:,idx].real\n return (eigvalues, eigfaces)\n\n\ndata_actual = ToyData.from_ground_truth('cameraman.png', 1000, (5,5), 0, scale=16)\ndata_eigfaces = data_actual.generate_eigfaces(25)\nx_true = data_actual.x_true\n\ndef f(data, x):\n X = cnvmats.cnvmat(x, data.sa, 'valid').toarray()\n inner = X.dot(np.linalg.inv(X.T.dot(X))).dot(X.T)\n s = 0\n for y in data.y:\n yi = y.flatten()\n s += np.inner(np.inner(yi, inner), yi)\n return s / len(data.y)\n\nfactors = (1e-3, 1e-2, 1e-1, 1e0, 1e1, 1e2, 1e3)\nobjectives_actual = [f(data_actual, x_true * factor) for factor in factors]\nobjectives_eigfaces = [f(data_eigfaces, x_true * factor) for factor in factors]\n\nplt.figure()\nplt.title('on eigenvectors')\nplt.plot(factors, objectives_eigfaces, '--k')\nplt.plot(factors, objectives_eigfaces, 'or')\nplt.xscale('log')\nplt.xlabel('$\\\\alpha$')\nplt.ylabel('$\\\\phi(x_\\\\mathrm{true} \\\\cdot \\\\alpha)$')\nplt.grid()\nplt.show()\n\nplt.figure()\nplt.title('on original $y$')\nplt.plot(factors, objectives_actual, '--k')\nplt.plot(factors, objectives_actual, 'or')\nplt.xscale('log')\nplt.xlabel('$\\\\alpha$')\nplt.ylabel('$\\\\phi\\'(x_\\\\mathrm{true} \\\\cdot \\\\alpha)$')\nplt.grid()\nplt.show()\n","sub_path":"exp117.py","file_name":"exp117.py","file_ext":"py","file_size_in_byte":3942,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"262874332","text":"\"\"\"\n有这样一道智力题:“某商店规定:三个空汽水瓶可以换一瓶汽水。\n小张手上有十个空汽水瓶,她最多可以换多少瓶汽水喝?”答案是5瓶,\n方法如下:先用9个空瓶子换3瓶汽水,喝掉3瓶满的,喝完以后4个空瓶子,用3个再换一瓶,喝掉这瓶满的,这时候剩2个空瓶子。\n然后你让老板先借给你一瓶汽水,喝掉这瓶满的,喝完以后用3个空瓶子换一瓶满的还给老板。\n如果小张手上有n个空汽水瓶,最多可以换多少瓶汽水喝?\n\n输入描述:\n\n输入文件最多包含10组测试数据,每个数据占一行,仅包含一个正整数n(1<=n<=100),表示小张手上的空汽水瓶数。n=0表示输入结束,你的程序不应当处理这一行。\n\n输出描述:\n\n对于每组测试数据,输出一行,表示最多可以喝的汽水瓶数。如果一瓶也喝不到,输出0。\n\n输入例子:\n\n3 \n10 \n81 \n0\n\n输出例子:\n\n1 \n5 \n40\n\n\"\"\"\ndef changeWater(cout_0):\n # 喝水的瓶数\n cout_1=0\n if cout_0<2:\n return 0\n while cout_0>2:\n cout_1+=cout_0//3\n cout_0=cout_0//3+cout_0%3\n if cout_0==2:\n cout_1+=1\n return cout_1\n# 空盖子个数\nlines=[]\nwhile True:\n try:\n # PYTHON多行输入以空格“ ”结束\n lines.append(int(input()))\n except:\n # result=[]\n for i in lines:\n print(changeWater(i))\n # result.append(changeWater(i))\n # for i in result:\n # print(i)\n break","sub_path":"data_structure_review/to_offer/other/change_water.py","file_name":"change_water.py","file_ext":"py","file_size_in_byte":1544,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"367187262","text":"import os\nimport argparse\nimport numpy as np\nimport torch\nimport torch.nn as nn\nimport torch.optim as optim\nfrom torch.optim import lr_scheduler\n#from data_loader import get_loader\nfrom mm_corruption_attack_data_loader import get_loader\nfrom models import VqaModel\nfrom utils import text_helper\nimport torch.nn.functional as F\nimport cv2\nimport imageio\nfrom advs import *\nfrom mm_corruption_attack import *\n\ndevice = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\n\ndef main(args):\n\n data_loader = get_loader(\n input_dir=args.input_dir,\n input_vqa_train='train.npy',\n #input_vqa_valid='valid.npy',\n input_vqa_valid='valid_mm_corruption_attack.npy',\n max_qst_length=args.max_qst_length,\n max_num_ans=args.max_num_ans,\n batch_size=args.batch_size,\n num_workers=args.num_workers)\n\n qst_vocab_size = data_loader['train'].dataset.qst_vocab.vocab_size\n ans_vocab_size = data_loader['train'].dataset.ans_vocab.vocab_size\n ans_unk_idx = data_loader['train'].dataset.ans_vocab.unk2idx\n \n # Load model checkpoint\n checkpoint = torch.load('/scratch1/nvishwa/datasets/VQA/models/model-epoch-20.ckpt')\n model = VqaModel(\n embed_size=args.embed_size,\n qst_vocab_size=qst_vocab_size,\n ans_vocab_size=ans_vocab_size,\n word_embed_size=args.word_embed_size,\n num_layers=args.num_layers,\n hidden_size=args.hidden_size).to(device)\n\n model.load_state_dict(checkpoint['state_dict'])\n\n criterion = nn.CrossEntropyLoss()\n\n params = list(model.img_encoder.fc.parameters()) \\\n + list(model.qst_encoder.parameters()) \\\n + list(model.fc1.parameters()) \\\n + list(model.fc2.parameters())\n\n batch_step_size = len(data_loader['valid'].dataset) / args.batch_size\n\n #model.eval()\n model.train()\n\n max_samples = 500\n\n for batch_idx, batch_sample in enumerate(data_loader['valid']):\n\n image = batch_sample['image'].to(device)\n question_id = batch_sample['question_id']\n question = batch_sample['question'].to(device)\n label = batch_sample['answer_label'].to(device)\n multi_choice = batch_sample['answer_multi_choice'] # not tensor, list.\n\n #print(image_path)\n\n with torch.set_grad_enabled(True):\n\n #attack(args, model, image, question, label, question_id) \n attack(args, model, data_loader['valid']) \n\n if batch_idx > max_samples:\n break\n\n\nif __name__ == '__main__':\n\n parser = argparse.ArgumentParser()\n\n parser.add_argument('--input_dir', type=str, default='/scratch1/nvishwa/datasets/VQA',\n help='input directory for visual question answering.')\n\n parser.add_argument('--log_dir', type=str, default='./logs',\n help='directory for logs.')\n\n parser.add_argument('--model_dir', type=str, default='/scratch1/nvishwa/datasets/VQA/models',\n help='directory for saved models.')\n\n parser.add_argument('--max_qst_length', type=int, default=30,\n help='maximum length of question. \\\n the length in the VQA dataset = 26.')\n\n parser.add_argument('--max_num_ans', type=int, default=10,\n help='maximum number of answers.')\n\n parser.add_argument('--embed_size', type=int, default=1024,\n help='embedding size of feature vector \\\n for both image and question.')\n\n parser.add_argument('--word_embed_size', type=int, default=300,\n help='embedding size of word \\\n used for the input in the LSTM.')\n\n parser.add_argument('--num_layers', type=int, default=2,\n help='number of layers of the RNN(LSTM).')\n\n parser.add_argument('--hidden_size', type=int, default=512,\n help='hidden_size in the LSTM.')\n\n parser.add_argument('--learning_rate', type=float, default=0.001,\n help='learning rate for training.')\n\n parser.add_argument('--step_size', type=int, default=10,\n help='period of learning rate decay.')\n\n parser.add_argument('--gamma', type=float, default=0.1,\n help='multiplicative factor of learning rate decay.')\n\n parser.add_argument('--num_epochs', type=int, default=30,\n help='number of epochs.')\n\n parser.add_argument('--batch_size', type=int, default=1, # default batch size is 256\n help='batch_size.')\n\n parser.add_argument('--num_workers', type=int, default=1, # default was 8\n help='number of processes working on cpu.')\n\n parser.add_argument('--save_step', type=int, default=1,\n help='save step of model.')\n\n args = parser.parse_args()\n\n main(args)\n","sub_path":"test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":4882,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"315933379","text":"import io\nimport os\nfrom numpy import random\nfrom google.cloud import vision_v1\nfrom PIL import Image, ImageFont, ImageDraw\n# from Pillow_Utility import draw_borders, Image\nimport sys\nimport pandas as pd\nimport json\n\nos.environ['GOOGLE_APPLICATION_CREDENTIALS'] = r\"SecretAccountKey.json\"\nclient = vision_v1.ImageAnnotatorClient()\n\nfile_name = \"unnamed.png\"\nimage_path = os.path.join('./Images', file_name)\n\nim = Image.open(image_path)\n\nwith io.open(image_path, 'rb') as image_file:\n content = image_file.read()\n\nimage = vision_v1.types.Image(content=content)\nresponse = client.object_localization(image=image)\nlocalized_object_annotations = response.localized_object_annotations\n\n\nsys.argv = im\n\nblank = Image.open(image_path)\npixels = []\nwhile len(pixels):\n x = random.randint(0, blank.size[0] - 1)\n y = random.randint(0, blank.size[1] - 1)\n pixel = blank.getpixel((x, y))\n if pixel[-1] > 200:\n pixels.append(((x, y), pixel[:-1]))\n\n\ndef diff(a, b):\n return sum((a - b) ** 2 for a, b in zip(a, b))\n\n\nbest = []\n\nfor x in range(im.size[0]):\n for y in range(im.size[1]):\n d = 0\n for coor, pixel in pixelt:\n try:\n ipixel = im.getpixel((x + coor[0], y + coor[1]))\n d += diff(ipixel, pixel)\n except IndexError:\n d += 256 ** 2 * 3\n best.append((d, x, y))\n best.sort(key=lambda x: x[0])\n best = best[:3]\n\n\n# detectedObjects = []\n\n\n# class pixelPoint:\n# def __init__(self, x, y):\n# self.x = x\n# self.y = y\n\n\n# class detectedObject:\n# def __init__(self, name):\n# self.name = name\n# self.points = pixelPointsArray\n\n# detObj = json.dumps(localized_object_annotations)\n\n\n# for obj in localized_object_annotations:\n# currentObject = setattr(detectedObject, \"name\", obj.name)\n# pixelPointsArray = []\n# # print(obj.bounding_poly.normalized_vertices)\n# for nv in obj.bounding_poly.normalized_vertices:\n# setattr(pixelPoint, \"x\", nv.x)\n# setattr(pixelPoint, \"y\", nv.y)\n# pixelPointsArray.append(pixelPoint)\n# setattr(detectedObject, 'points',pixelPointsArray )\n# detectedObjects.append(detectedObject)\n# # print(obj)\n\n# for detObj in detectedObjects:\n# print(detObj)\n\n\ndf = pd.DataFrame(columns=['name', 'score', 'value'])\nfor obj in localized_object_annotations:\n df = df.append(\n dict(\n name=obj.name,\n score=obj.score,\n\n ),\n ignore_index=True)\n\npixels = list(im.getdata())\nwidth, height = im.size\npixels = [pixels[i * width:(i + 1) * width] for i in range(height)]\n\n\nwidth, height = im.size\nimage_editable = ImageDraw.Draw(im)\ntitle_font = ImageFont.truetype(\n \"/usr/share/fonts/truetype/freefont/FreeMono.ttf\", 12, encoding=\"unic\")\ntitle_text = \"The Beauty of Nature\"\ndraw_txt = ImageDraw.Draw(im)\nwidth, height, = draw_txt.textsize(title_text, font=title_font)\n\nprint(df)\n\nimage_editable.text((10, 10), title_text, (237, 230, 211), font=title_font)\nim.save(\"result.jpg\")\n\n\n\n\n\n","sub_path":"PostCreationBot.py","file_name":"PostCreationBot.py","file_ext":"py","file_size_in_byte":3042,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"300691566","text":"# %load q07_extras/build.py\n# Default Imports\nfrom greyatomlib.python_getting_started.q01_read_data.build import read_data\ndata = read_data()\n\n# Your Solution\ndef extras_runs(data=data):\n delivery1 = data['innings'][0]['1st innings']['deliveries']\n extras1=[]\n for i in delivery1:\n for key in i:\n extras1.append(i[key]['runs']['extras'])\n delivery2 = data['innings'][1]['2nd innings']['deliveries']\n extras2=[]\n for j in delivery2:\n for key2 in j:\n extras2.append(j[key2]['runs']['extras'])\n difference=sum(extras2)-sum(extras1)+4\n return difference\n\n\n\n\n","sub_path":"q07_extras/build.py","file_name":"build.py","file_ext":"py","file_size_in_byte":614,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"338242771","text":"import os\n\nimport tensorflow as tf\nfrom PIL import Image\nfrom tensorflow.examples.tutorials.mnist import input_data\n\nrows = 28\ncols = 28\n\nimages_to_extract = 100\n\nsave_dir = \"./tmp/result\"\n\nmnist = input_data.read_data_sets(\"./tmp/data/\", one_hot=False)\nsession = tf.Session()\n\nimage_shape = session.run(tf.shape(mnist.train.images))\nimages_count = image_shape[0]\npixels_per_image = image_shape[1]\n\nlabel_shape = session.run(tf.shape(mnist.train.labels))\nlabels_count = label_shape[0]\nlabels = mnist.train.labels\n\nif images_count == labels_count:\n print(\"数据集中共包含%s张图片,和%s个标签\" % (images_count, labels_count))\n print(\"每张图片包含%s个像素\" % pixels_per_image)\n print(\"数据类型:%s\" % mnist.train.images.dtype)\n\n if mnist.train.images.dtype == \"float32\":\n for i in range(0, images_to_extract):\n for n in range(pixels_per_image):\n if mnist.train.images[i][n] != 0:\n mnist.train.images[i][n] = 255\n if ((i + 1) % 50) == 0:\n print(\"图像浮点数值扩展进度:已转换%s张,共需转换%s张\" % (i + 1, images_to_extract))\n\n for i in range(10):\n save_path = \"%s/%s/\" % (save_dir, i)\n if not os.path.exists(save_path):\n os.makedirs(save_path)\n\n index = [0 for x in range(0, 10)]\n for i in range(0, images_to_extract):\n img = Image.new(\"L\", (cols, rows))\n for m in range(rows):\n for n in range(cols):\n img.putpixel((n, m), int(mnist.train.images[i][n + m * cols]))\n digit = labels[i]\n path = \"%s/%s/%s.png\" % (save_dir, digit, index[digit])\n index[digit] += 1\n img.save(path)\n","sub_path":"mnistConvert.py","file_name":"mnistConvert.py","file_ext":"py","file_size_in_byte":1712,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"619283482","text":"from django.db.models import ObjectDoesNotExist\nfrom django.core.exceptions import MultipleObjectsReturned\n\nfrom apps.core.models import Page, Header, Footer\n\n\ndef add_page(request):\n try:\n page = Page.objects.get(uri=request.path)\n except ObjectDoesNotExist:\n return {'page': None}\n except MultipleObjectsReturned:\n page = Page.objects.filter(uri=request.path)[0]\n breadcrumbs = page.get_ancestors(include_self=True)\n context = dict()\n context['page'] = page\n context['breadcrumbs'] = breadcrumbs\n if page.text_block_list:\n for block in page.text_block_list.all():\n context[block.placeholder] = block.text\n return context\n\n\ndef add_header(request):\n try:\n header = Header.objects.filter(mptt_level=0).prefetch_related(\n 'page',\n 'header_child__page'\n )\n except ObjectDoesNotExist:\n header = None\n return {'header': header}\n\n\ndef add_footer(request):\n try:\n footer = Footer.objects.filter(mptt_level=0).prefetch_related(\n 'page',\n 'footer_child__page'\n )\n except ObjectDoesNotExist:\n return {'footer': None}\n return {\n 'footer': footer,\n }\n","sub_path":"{{ cookiecutter.name }}_project/{{ cookiecutter.name }}/apps/core/context_processors.py","file_name":"context_processors.py","file_ext":"py","file_size_in_byte":1228,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"351797324","text":"def findMaxElementIndex(arr):\n max = 0\n maxIndex = 0\n for idx in range(len(arr)):\n if(arr[idx] >= max):\n max = arr[idx]\n maxIndex = idx\n return maxIndex\n\ndef findMinElementIndex(arr):\n min = 1000000\n minIndex = 0\n for idx in range(len(arr)):\n if arr[idx] <= min:\n min = arr[idx]\n minIndex = idx\n return minIndex\n\ndef selectionSort(arr, order = 'desc'):\n sortedArr = []\n for _ in range(len(arr)):\n if order.lower() == 'desc':\n sortedArr.append(arr.pop(findMaxElementIndex(arr)))\n elif order.lower() == 'asc':\n sortedArr.append(arr.pop(findMinElementIndex(arr)))\n else:\n if _ == 0 :\n print('Order type not found. Default asc')\n sortedArr.append(arr.pop(findMinElementIndex(arr)))\n return sortedArr\n\n# print(findMaxElementIndex([1, 5, 7]))\n# print(findMinElementIndex([5, 3, 2, 1, 123, 4, 17]))\nprint(selectionSort([5, 3, 2, 1, 123, 4, 17], 'kex'))\n","sub_path":"Algorithms/selectionSort.py","file_name":"selectionSort.py","file_ext":"py","file_size_in_byte":1018,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"355635785","text":"# Runtime 53 ms\n# Beats 97%\nclass Solution(object):\n def subsetsWithDup(self, nums):\n \"\"\"\n :type nums: List[int]\n :rtype: List[List[int]]\n \"\"\"\n nums.sort()\n rst = [[]]\n \n for i in range(len(nums)):\n rst += [r + [nums[i]] for r in rst if nums[:i].count(nums[i]) == r.count(nums[i])]\n return rst\n\n\n# Runtime 53 ms\n# Beats 97%\nclass Solution(object):\n def subsetsWithDup(self, nums):\n \"\"\"\n :type nums: List[int]\n :rtype: List[List[int]]\n \"\"\"\n rst = []\n nums.sort()\n self.dfs(nums, [], rst)\n return rst\n \n def dfs(self, nums, path, rst):\n # add path every time\n rst.append(path)\n \n for i in range(len(nums)):\n if i != 0 and nums[i] == nums[i-1]:\n continue\n self.dfs(nums[i+1:], path + [nums[i]], rst)\n","sub_path":"90-Subsets-II/solution.py","file_name":"solution.py","file_ext":"py","file_size_in_byte":904,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"282939974","text":"import pickle\nimport numpy as np\nimport pandas as pd\nfrom sklearn.decomposition import PCA\nfrom sklearn.neural_network import MLPClassifier\nimport matplotlib.pyplot as plt\n\ndef compute(nn, trn_X, trn_Y, tst_X, tst_Y):\n nn.fit(trn_X, trn_Y)\n result = nn.predict(tst_X)\n\n confusion = [[0, 0], [0, 0]]\n for i in range(len(tst_Y)):\n if (tst_Y[i] > 0.0 and result[i] > 0.0):\n confusion[1][1] += 1\n elif (tst_Y[i] > 0.0 and result[i] < 1.0):\n confusion[1][0] += 1\n elif (tst_Y[i] < 1.0 and result[i] > 0.0):\n confusion[0][1] += 1\n elif (tst_Y[i] < 1.0 and result[i] < 1.0):\n confusion[0][0] += 1\n\n return confusion[1][1] / (confusion[1][0] + confusion[1][1]), confusion[0][0] / (confusion[0][0] + confusion[0][1])\n\n# Train model (PCA)\ndf = pd.read_pickle('../data/diabetes_train.pickle')\ndf_validation = pd.read_pickle('../data/diabetes_validation.pickle')\ndf_train = np.array(pd.concat([df, df_validation]))\n\nX = df_train[:, 0:-1]\ntrn_Y = df_train[:, -1]\n\npca_config = pickle.load(open( \"../data/pca_config.pickle\", \"rb\" ))\npca_config = pca_config[5]\n\nn_component = pca_config['n_components']\nhidden_layer_size = pca_config['param']['hidden_layer_sizes']\nweight_decay = pca_config['param']['alpha']\n\npca = PCA(n_components=n_component)\npca_X = pca.fit_transform(X)\n\nnn = MLPClassifier(solver='lbfgs', max_iter=1000, alpha=weight_decay, hidden_layer_sizes=(hidden_layer_size))\n\n\ntestFile = np.array(pd.read_pickle('../data/diabetes_test.pickle'))\nX = testFile[:, 0:-1]\ntst_Y = testFile[:, -1]\n\ntst_X = pca.transform(X)\nsen_list = []\nspec_list = []\ni_list = []\nmaxi = 100\nfor i in range(maxi - 5):\n mean_sen, mean_spec = 0.0, 0.0\n print(i)\n i_list.append(i + 5)\n for j in range(i + 5):\n sen, spec = compute(nn, pca_X, trn_Y, tst_X, tst_Y)\n mean_sen += sen\n mean_spec += spec\n sen_list.append(mean_sen / (i + 5))\n spec_list.append(mean_spec / (i + 5))\n\n\nplt.style.use('ggplot')\nplt.xlim(0, maxi)\nplt.plot(i_list, sen_list, 'ro')\nplt.plot(i_list, spec_list, 'bo')\nplt.ylim(0.0, 1.0)\ntitle = 'Mean Sensitivity (r) and specificity (b)'\nplt.title(title)\nplt.xlabel('epochs')\nplt.ylabel('mean')\nplt.legend(loc='best')\nplt.savefig('../pic/meanSens&Spec.png')\nplt.close()","sub_path":"model_confusion.py","file_name":"model_confusion.py","file_ext":"py","file_size_in_byte":2282,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"106633756","text":"class Solution(object):\n def maxProfit(self, k, prices):\n \"\"\"\n :type k: int\n :type prices: List[int]\n :rtype: int\n \"\"\"\n if not prices:\n return 0\n\n size = len(prices)\n\n # 一次交易由买入和卖出构成,至少需要两天。\n # 所以说有效的限制 k 应该不超过 n/2,如果超过,就没有约束作用了,相当于 k = +infinity。\n if k > size // 2:\n return self.helper(prices)\n\n dp = [[[0, 0] for _ in range(k + 1)] for _ in range(size)]\n\n for i in range(size):\n for j in range(k, 0, -1):\n if i - 1 == -1:\n dp[i][j][0] = 0\n dp[i][j][1] = -prices[i]\n continue\n\n dp[i][j][0] = max(dp[i - 1][j][0], dp[i - 1][j][1] + prices[i])\n dp[i][j][1] = max(dp[i - 1][j][1], dp[i - 1][j - 1][0] - prices[i])\n\n return dp[-1][k][0]\n\n def helper(self, prices):\n result = 0\n\n for i in range(1, len(prices)):\n if prices[i] > prices[i - 1]:\n result += prices[i] - prices[i - 1]\n\n return result\n","sub_path":"Week_06/188.best-time-to-buy-and-sell-stock-iv.py","file_name":"188.best-time-to-buy-and-sell-stock-iv.py","file_ext":"py","file_size_in_byte":1175,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"127582649","text":"import unittest\n\nfrom dicom_factory.factory import DicomFactory\n\n\nclass TestFactory(unittest.TestCase):\n\n def test_create_factory_with_custom_data_size_works_properly(self):\n data_size = (100, 100)\n factory_args = {'Rows': data_size[0], 'Columns': data_size[1]}\n\n dicom = DicomFactory.build(factory_args)\n\n self.assertEqual(data_size, dicom.pixel_array.shape)\n\n def test_create_factory_with_custom_series_adds_series_description(self):\n expected_series = 'leg'\n factory_args = {'SeriesDescription': expected_series}\n\n dicom = DicomFactory.build(factory_args)\n\n self.assertEqual(expected_series, dicom.SeriesDescription)\n\n def test_create_factory_with_unsupported_arguments_raises_value_error(self):\n with self.assertRaises(ValueError):\n factory_args = {'FakeArg': 123}\n DicomFactory.build(factory_args)\n","sub_path":"dicom_factory/tests/factory.py","file_name":"factory.py","file_ext":"py","file_size_in_byte":897,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"640764847","text":"import random\nimport os\nimport numpy as np\nimport torch\nfrom torch.utils.data import Dataset\n#import opts_para\nfrom torch.utils.data import DataLoader\nfrom torch.autograd import Variable\nimport cv2\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport matplotlib.cm as cm\nfrom scipy import misc\nimport time\nimport torchvision\n#plt.switch_backend('agg')\nclass Load_data():\n\tdef __init__(self,path,shuffle):\n\n\t\tself.image_real_path = os.path.join(path,'crop_real_cars')\n\t\t#self.image_instance_path = os.path.join(path,'mask_image_raw_00')\n\t\tself.image_raw_path = os.path.join(path,'crop_syn_cars')\n\n\t\tself.light_path = os.path.join(path,'Light_Direction')\n\t\tself.real_mask = os.path.join(path,'crop_real_masks')\n\t\tself.sys_mask = os.path.join(path,'crop_syn_masks')\n\t\t# self.crop_mask = os.path.join(path,'crop_mask')\n\t\t# self.crop_real_mask = os.path.join(path,'crop_real_mask')\n\t\t#id_list = sorted(os.listdir(self.image_real_path))\n\t\tid_list = os.listdir(self.image_raw_path)\n\t\tid_list = np.array(id_list)\n\t\tif shuffle:\n\t\t\tsampleing_id = np.random.permutation(len(id_list))\n\t\t\tid_list = id_list[sampleing_id]\n\t\tself.id_list = id_list\n\t\tnormalize = torchvision.transforms.Normalize(mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5))\n\t\tself.transform = torchvision.transforms.Compose([torchvision.transforms.ToTensor(),normalize])\n\n\tdef getitem(self,index):\n\t\tsyn_image_name = self.id_list[index]\n\t\twhile True:\n\t\t\tnum=np.random.randint(1,10)\n\t\t\treal_image_name=syn_image_name[:-6]+str(num).zfill(2)+'.png'\n\t\t\t#print(syn_image_name)\n\t\t\tif os.path.isfile(os.path.join(self.image_real_path,real_image_name)):\n\t\t\t\tbreak\n\n\t\tlight_name = real_image_name.split('_')[0]\n\t\timg_real = misc.imread(os.path.join(self.image_real_path,real_image_name))\n\t\t#img_instance = cv2.imread(os.path.join(self.image_instance_path,image_name))\n\t\timg_raw = misc.imread(os.path.join(self.image_raw_path,syn_image_name))\n\n\t\t#real_mask = misc.imread(os.path.join(self.real_mask,image_name))\n\t\tsys_mask = misc.imread(os.path.join(self.sys_mask,syn_image_name))\n\t\treal_mask = misc.imread(os.path.join(self.real_mask, real_image_name))\n\t\t# crop_mask = misc.imread(os.path.join(self.crop_mask, image_name))\n\t\t# crop_real_mask = misc.imread(os.path.join(self.crop_real_mask, image_name))\n\t\t#real_mask[real_mask != 0] = 1\n\t\t# sys_mask[sys_mask != 0] = 1\n\n\t\t#img_raw=img_raw[:,:,0:3]\n\t\timg_light = np.loadtxt(os.path.join(self.light_path,light_name))\n\n\t\t#MASK CARS TO 0\n\t\timg_real[np.where(real_mask!=0)]=0\n\t\timg_raw[np.where(sys_mask != 0)] = 0\n\t\t#img_raw[np.where(real_mask != 0)] = 0\n\n\n\t\timg_real = misc.imresize(img_real,(128,256))\n\t\timg_raw = misc.imresize(img_raw, (128,256))\n\t\t#real_mask = misc.imresize(real_mask,(256,768),'nearest')\n\t\tsys_mask = misc.imresize(sys_mask,(128,256),'nearest')\n\t\treal_mask = misc.imresize(real_mask, (128, 256), 'nearest')\n\t\t# crop_mask = misc.imresize(crop_mask, (256, 768), 'nearest')\n\t\t# crop_real_mask = misc.imresize(crop_real_mask, (256, 768), 'nearest')\n\n\n\t\tdata = {}\n\t\tdata['real_img'] = self.transform(img_real).cuda().unsqueeze(0)#torch.from_numpy(img_real/255.).type(torch.FloatTensor)\n\t\tdata['sys_img'] = self.transform(img_raw).cuda().unsqueeze(0)#torch.from_numpy(img_output).type(torch.FloatTensor)\n\t\tdata['light'] = torch.from_numpy(img_light).cuda().unsqueeze(0) # also numpy\n\t\t#data['real_img'] = torch.from_numpy(img_real).type(torch.FloatTensor).cuda().unsqueeze(0)\n\t\t#data['sys_img'] = torch.from_numpy(img_raw).type(torch.FloatTensor).cuda().unsqueeze(0)\n\t\t#data['real_mask'] = torch.from_numpy(real_mask).type(torch.FloatTensor).cuda().unsqueeze(0).unsqueeze(0)\n\t\tdata['sys_mask'] = torch.from_numpy(sys_mask).type(torch.FloatTensor).cuda().unsqueeze(0)\n\t\tdata['real_mask'] = torch.from_numpy(real_mask).type(torch.FloatTensor).cuda().unsqueeze(0)\n\t\t# data['crop_mask'] = torch.from_numpy(crop_mask).type(torch.FloatTensor).cuda().unsqueeze(0).unsqueeze(0)\n\t\t# data['crop_real_mask'] = torch.from_numpy(crop_real_mask).type(torch.FloatTensor).cuda().unsqueeze(0).unsqueeze(0)\n\n\t\treturn data\n\tdef data_len(self):\n\t\treturn len(self.id_list)\n\nif __name__ == '__main__':\n\t#path = '/export/home/wyin/3D/ShadowNet/data'\n\tpath = '/export/home/enfita/SHADOWNET_DATA/data_CROP'\n\tdataset = Load_data(path,shuffle=True)\n\tprint(dataset.data_len())\n\t#dataloader = DataLoader(dataset, batch_size=1, shuffle=True)\n\tcount = 0\n\tfor i in range(100):\n\t\tt1 = time.time()\n\t\tloader = dataset.getitem(i)\n\t\treal_img = loader['real_img']\n\t\tsys_img = loader['sys_img']\n\t\tsys_mask = loader['sys_mask']\n\t\treal_mask = loader['real_mask']\n\t\t#print(real_img)\n\t\t#print(sys_img)\n\t\tprint(sys_mask.shape)","sub_path":"data_loader2.py","file_name":"data_loader2.py","file_ext":"py","file_size_in_byte":4588,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"242972903","text":"import flask\nfrom flask import request, jsonify\nimport json\nfrom firebase_admin import initialize_app\nfrom firebase_admin import credentials\nfrom firebase_admin import auth\nfrom firebase_admin import db\n\napp = flask.Flask(__name__)\napp.config[\"DEBUG\"] = True\n\ncred = credentials.Certificate('./secret/serviceaccount.json')\ndefault_app = initialize_app(cred,{\n 'databaseURL': 'https://countrygame-2b9bd.firebaseio.com'\n})\n\n@app.route('/api', methods=['GET'])\ndef api():\n query_parameters = request.args\n \n countryid = query_parameters.get('country')\n animalid = query_parameters.get('animal')\n colourid = query_parameters.get('colour')\n score = 0\n result = [] \n\n if countryid:\n result.append({'country': get_data('countries', countryid, score)})\n else:\n result.append({'country': False}) \n\n if animalid:\n result.append({'animal': get_data('animals', animalid, score)}) \n else:\n result.append({'animal': False})\n \n if colourid:\n result.append({'colour': get_data('colours', colourid, score)}) \n else:\n result.append({'colour': False})\n if not (countryid or colourid or animalid):\n return page_not_found(404)\n\n result.append({'score': score})\n return jsonify(result)\n\ndef get_data(endpoint, value, points):\n uri = \"\".join(['data/', endpoint, '/', value])\n ref = db.reference(uri)\n try:\n count = ref.get() \n except ValueError:\n return False\n finally:\n if type(count) == int:\n ref.set(count + 1)\n points += 10\n return True\n else:\n return False\n\n@app.errorhandler(404)\ndef page_not_found(e):\n return \"

404

The resource could not be found.

\", 404\n\napp.run(port='5000')","sub_path":"api/full.py","file_name":"full.py","file_ext":"py","file_size_in_byte":1796,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"481125289","text":"from exc.exceptions import JsonloadError\n\n__author__ = 'root'\n\nfrom utils import config\nimport json\nimport logging\nfrom operator import itemgetter\nfrom django.core.serializers.json import DjangoJSONEncoder\n\nlogger = logging.getLogger(__name__)\n\n\ndef get_json_template(file_name):\n json_file_name = config.get_configuration_file(file_name)\n with open(json_file_name) as json_file:\n json_data = json.load(json_file)\n return json_data\n\n\ndef get_json_data(file_name):\n json_file_name = config.get_configuration_file(file_name)\n with open(json_file_name) as json_file:\n json_data = json.load(json_file)\n return json_data\n\n\ndef print_json_data(dict_data):\n data = json.dumps(dict_data)\n logger.info(data)\n logger.info('-----------------------------')\n\n\ndef get_json_string_from_dict(dict_data):\n if 'SlotInfo' in dict_data:\n del dict_data['SlotInfo']\n\n if 'SlotMax' in dict_data:\n del dict_data['SlotMax']\n\n if 'Branch' in dict_data:\n del dict_data['Branch']\n\n data = json.dumps(dict_data)\n return data\n\n\n# Write json list to file\ndef export_json_to_file(data, file_name):\n file_name = config.get_configuration_file(file_name)\n with open(file_name, 'w') as outfile:\n json.dump(data, outfile)\n\n\ndef check_exist(compare, sck_list):\n for row in sck_list:\n if compare['value'] == row['value'] and compare['name'] == row['name']:\n return True\n\n return False\n\n\ndef merge_service_check(script_sck, response_sck):\n for row in script_sck:\n if check_exist(row, response_sck) is False:\n response_sck.append(row)\n\n return response_sck\n\n\ndef is_same_service_check(script_sck, response_sck):\n script_sck_len = len(script_sck)\n response_sck_len = len(response_sck)\n\n # check If script_sck or response_sck == []\n if script_sck_len == 0 and response_sck_len == 0:\n return True, []\n\n if script_sck_len > 0 and response_sck_len == 0:\n return False, sorted(script_sck, key=itemgetter('value'))\n\n if script_sck_len == 0 and response_sck_len > 0:\n return False, sorted(response_sck, key=itemgetter('value'))\n\n # Only sort when script_sck and response_sck is not []\n script_sck = sorted(script_sck, key=itemgetter('value'))\n response_sck = sorted(response_sck, key=itemgetter('value'))\n\n # Return result of equal operation\n is_same = is_same_dict(script_sck, response_sck)\n if not is_same:\n return False, script_sck\n\n return True, response_sck\n\ndef is_same_dict(result_dict, original_dict):\n \"\"\"\n :param result_dict:\n :param original_dict:\n :return:\n \"\"\"\n\n result = cmp(result_dict, original_dict)\n\n if result == 0:\n return True\n\n return False\n\n\ndef get_json_from_objs(django_obj):\n try:\n return json.loads(json.dumps(list(django_obj), cls=DjangoJSONEncoder))\n except JsonloadError:\n raise\n","sub_path":"utils/jsonload.py","file_name":"jsonload.py","file_ext":"py","file_size_in_byte":2925,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"591779637","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\nimport os\nimport re\nimport platform\n\nfid = open('README.md','w')\nfid.write('''# DataMining\nSome scripts about Data Mining and Machine Learning.\n\nIn this repository:\n\n1. There are some diplomas which were got from Coursera MOOC platform.\n\n2. Also, I have finished a scraper by using python language.\n\n3. And some assignments and projects about Maching Learning.\n\n ...(To be continue)\n\n---\n## Structure\n''')\nif platform.system()=='Windows':\n sp = '\\\\'\nelse:\n sp = '/'\ncwd = os.getcwd()\nfiles = os.listdir(cwd)\nfor handler in files:\n if handler == '.git':\n continue\n if os.path.isdir(handler):\n fid.write('* '+handler+'\\n')\n children = os.listdir(handler)\n if children.count('README.md')==1:\n fp = open(cwd+sp+handler+sp+'README.md','r')\n for line in fp:\n line = line.rstrip()\n if re.search('^\\* `',line):\n fid.write('\\t'+line+'\\n')\n fp.close()\n continue\n for child in children:\n fid.write('\\t* `'+child+'`\\n')\nfid.write('* `controller.py` can print *README.md* automatically.\\n')\nfid.write('* `README.md` is this file that you see now.')\nfid.write('''\\n\n---\nAuthor: [Renjie Li](https://github.com/lirenjie95)\n\nLicense: [MIT License](https://github.com/lirenjie95/DataMining/blob/master/LICENSE)\n''')\nfid.close()","sub_path":"controller.py","file_name":"controller.py","file_ext":"py","file_size_in_byte":1406,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"417519675","text":"import numpy as np\n\nvideos1 = np.load('4/video.npy', allow_pickle=True)\nregion_points1 = np.load('4/region_point.npy')\ngt1 = np.load('4/ground_truth.npy')\n\nvideos2 = np.load('5/video.npy', allow_pickle=True)\nregion_points2 = np.load('5/region_point.npy')\ngt2 = np.load('5/ground_truth.npy')\n\nn_v = []\nn_r = []\nn_g = []\n\nfor i in range(len(videos1)):\n n_v.append(videos1[i])\nfor i in range(len(videos2)):\n n_v.append(videos2[i])\n\nfor i in range(len(region_points1)):\n n_r.append(region_points1[i])\nfor i in range(len(region_points2)):\n n_r.append(region_points2[i])\n\nfor i in range(len(gt1)):\n n_g.append(gt1[i])\nfor i in range(len(gt2)):\n n_g.append(gt2[i])\n\nprint(len(n_v))\n\nprint(len(n_r))\n\nprint(len(n_g))\n\nnp.save('new/video', n_v)\nnp.save('new/region_point', n_r)\nnp.save('new/ground_truth', n_g)","sub_path":"Tool/joint.py","file_name":"joint.py","file_ext":"py","file_size_in_byte":819,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"231369441","text":"import sys\nfrom PyQt5.QtWidgets import QApplication, QMainWindow, QMessageBox\nfrom PyQt5 import uic\nfrom utils import show_message, can_convert_str_to_int, center_item\nfrom PyQt5.QtGui import QFont, QFontDatabase\nfrom Logic import Logic\nfrom ShowColoredTree import ShowColoredTree\n\n\ndef print_variant():\n number = 102 # Номер заліковки\n variant = number % 6 + 1\n output = \"Ім'я: Бугайчук Сергій Володимирович\" + \\\n \"\\nГрупа: ІО-01\" + \\\n \"\\nНомер у групі: 2\"\n output += \"\\nВаріант: \" + str(variant)\n show_message(output, QMessageBox.Information, \"Інформація про студента\")\n\n\nclass MainWindow(QMainWindow):\n def update_table(self):\n n = len(self.logic.graph)\n self.table.setRowCount(n)\n self.table.setColumnCount(n)\n self.table.setVerticalHeaderLabels([str(i) for i in range(n)])\n self.table.setHorizontalHeaderLabels([str(i) for i in range(n)])\n for i in range(n):\n for j in range(n):\n self.table.setItem(i, j, center_item(self.logic.graph[i][j]))\n self.showTree.plot()\n\n def save_info(self):\n node = self.enter_count_node.text()\n edge = self.enter_count_edge.text()\n if not can_convert_str_to_int(node):\n show_message(\"Неправильно введена кількість вершин\")\n return\n if not can_convert_str_to_int(edge):\n show_message(\"Неправильно введена кількість ребер\")\n return\n node, edge = int(node), int(edge)\n if edge > node * (node - 1) // 2:\n show_message(\"Неправильно введена кількість ребер та вершин\")\n return\n if node < 1:\n show_message(\"Кількість вершин повинна бути більша одиниці\")\n return\n if edge < node - 1:\n show_message(\"Кількість ребер повинна бути більша або рівна кількості вершин мінус один\")\n return\n self.logic.save_info(node, edge)\n self.logic.generate_empty_graph()\n self.update_table()\n\n def set_custom_graph(self):\n self.logic.generate_custom_graph()\n self.set_nodes_edges_info()\n self.update_table()\n\n def set_nodes_edges_info(self):\n self.enter_count_node.setText(str(self.logic.count_node))\n self.enter_count_edge.setText(str(self.logic.count_edge))\n\n def generate_random(self):\n self.logic.generate_random_graph()\n self.update_table()\n\n def change_item(self, item):\n i, j = item.row(), item.column()\n if item.text() != str(self.logic.graph[i][j]):\n if not can_convert_str_to_int(item.text()):\n show_message(\"Неправильно уведені дані\")\n self.update_table()\n else:\n self.logic.graph[i][j] = self.logic.nx_graph[i][j]['weight'] = int(item.text())\n self.showTree.plot()\n\n @staticmethod\n def show_window(value):\n if isinstance(value, list):\n def foo():\n for i in value:\n i.show()\n return foo\n return lambda: value.show()\n\n def __init__(self):\n super(MainWindow, self).__init__()\n uic.loadUi('MainWindowForm.ui', self)\n self.logic = Logic()\n self.showTree = ShowColoredTree(self.logic)\n self.info.triggered.connect(print_variant)\n self.generate_random_button.clicked.connect(self.generate_random)\n self.save.clicked.connect(self.save_info)\n self.table.itemChanged.connect(self.change_item)\n self.show_tree.clicked.connect(self.show_window([self.showTree]))\n self.set_graph.clicked.connect(self.set_custom_graph)\n\n\nif __name__ == \"__main__\":\n app = QApplication(sys.argv)\n font_database = QFontDatabase()\n font_database.addApplicationFont(\"./assets/Lucida Grande.ttf\")\n font = QFont(\"Lucida Grande\")\n QApplication.setFont(font)\n widget = MainWindow()\n widget.show()\n sys.exit(app.exec())\n","sub_path":"MainWindow.py","file_name":"MainWindow.py","file_ext":"py","file_size_in_byte":4257,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"45981208","text":"#!/usr/bin/env python3\n\nimport argparse, codecs, datetime, os, socket, sys, time # do not use any other imports/libraries\nfrom urllib.parse import urlparse\n\n# took x.y hours (please specify here how much time your solution required)\n\n# parse arguments\nparser = argparse.ArgumentParser(description='TLS v1.2 client')\nparser.add_argument('url', type=str, help='URL to request')\nparser.add_argument('--certificate', type=str, help='File to write PEM-encoded server certificate')\nargs = parser.parse_args()\n\n\ndef nb(i, length=False):\n # converts integer to bytes\n b = b''\n if length == False:\n length = (i.bit_length() + 7) // 8\n for _ in range(length):\n b = bytes([i & 0xff]) + b\n i >>= 8\n return b\n\n\ndef bn(b):\n # converts bytes to integer\n i = 0\n for char in b:\n i <<= 8\n i |= char\n return i\n\n\nSSL_VERSION = b\"\\x03\\x01\"\nHANDSHAKE = b\"\\x16\"\n\n\n# returns TLS record that contains ClientHello Handshake message\ndef client_hello():\n print(\"--> ClientHello()\")\n\n # list of cipher suites the client supports\n csuite = b\"\\x00\\x05\" # TLS_RSA_WITH_RC4_128_SHA\n csuite += b\"\\x00\\x2f\" # TLS_RSA_WITH_AES_128_CBC_SHA\n csuite += b\"\\x00\\x35\" # TLS_RSA_WITH_AES_256_CBC_SHA\n\n # add Handshake message header\n client_random = b\"\\xAB\" * 32\n # print(\"Client random: %s\" % client_random.encode(\"hex\"))\n CLIENT_HELLO = b\"\\x01\"\n client_version = SSL_VERSION\n unix_time = nb(int(time.time()))\n random_bytes = os.urandom(28) # client_random[4:]\n session_id_len = b\"\\x00\"\n session_id = b\"\"\n cipher_suites_len = nb(6, 2) # only TLS_DHE_RSA_WITH_AES_256_CBC_SHA\n compression_method_len = b\"\\x01\"\n compression_method = b\"\\x00\" # no compression\n # add record layer header\n client_hello_data = (b'\\x03\\x03' + unix_time + random_bytes +\n session_id_len + session_id + cipher_suites_len +\n csuite +\n compression_method_len + compression_method)\n client_hello_tlv = CLIENT_HELLO + nb(len(client_hello_data), 3) + client_hello_data\n record = HANDSHAKE + SSL_VERSION + nb(len(client_hello_tlv), 2) + client_hello_tlv\n return record\n\n\n# returns TLS record that contains 'Certificate unknown' fatal Alert message\ndef alert():\n print(\"--> Alert()\")\n\n # add alert message\n\n # add record layer header\n record = b'' ##del\n alert_ld = b'\\x02' + b'\\0x2e'\n record = b\"\\x15\" + SSL_VERSION + nb(len(alert_ld), 2) + alert_ld\n return record\n\n\n# parse TLS Handshake messages\ndef parsehandshake(r):\n global server_hello_done_received\n # read Handshake message type and length from message header\n htype = r['htype'] ## del\n print(\"<--- Handshake()\")\n if htype == 0x02:\n print(\"\t<--- ServerHello()\")\n server_random = r['server_random']\n gmt = r['gmt']\n sessid = r['sessid']\n cipher = r['cipher']\n compression = r['compression']\n print(\"\t[+] server randomness:\", server_random.hex().upper())\n print(\"\t[+] server timestamp:\", gmt)\n print(\"\t[+] TLS session ID:\", sessid.hex().upper())\n\n if cipher == b\"\\x00\\x2f\":\n print(\"\t[+] Cipher suite: TLS_RSA_WITH_AES_128_CBC_SHA\")\n elif cipher == b\"\\x00\\x35\":\n print(\"\t[+] Cipher suite: TLS_RSA_WITH_AES_256_CBC_SHA\")\n elif cipher == b\"\\x00\\x05\":\n print(\"\t[+] Cipher suite: TLS_RSA_WITH_RC4_128_SHA\")\n else:\n print(\"[-] Unsupported cipher suite selected:\", cipher.hex())\n sys.exit(1)\n\n if compression != 0x00:\n print(\"[-] Wrong compression:\", compression)\n sys.exit(1)\n\n elif htype == 0x0b:\n print(\"\t<--- Certificate()\")\n certlen = r['certlen']\n print(\"\t[+] Server certificate length:\", certlen)\n if args.certificate:\n print(\"\t[+] Server certificate saved in:\", args.certificate)\n elif htype == 0x0e:\n print(\"\t<--- ServerHelloDone()\")\n server_hello_done_received = True\n else:\n print(\"[-] Unknown Handshake type:\", htype)\n sys.exit(1)\n\n # handle the case of several handshake messages in one record\n # leftover = ...\n # if len(leftover):\n # parsehandshake(leftover)\n\n\n# parses TLS record\ndef parserecord(r):\n # parse TLS record header and pass the record body to the corresponding parsing method (i.e., parsehandshake())\n # htype = r.htype ## del\n # server_random = r.server_random\n # gmt = r.gmt\n # sessid = r.sessid\n # cipher = r.cipher\n # compression = r.compression\n # certlen = r.certlen\n header = {\n\n }\n type = bn(r[:1]) # recv_num_bytes(s, 1)\n header['htype'] = type\n if type == 0x02:\n length_b = r[1:4]\n length = bn(length_b)\n body = (r[4:]) # recv_num_bytes(s, length)\n version = body[:2]\n server_randomness = body[2: 2 + 32]\n timestamp = server_randomness[:4]\n session_id_len_b = body[34:35]\n session_id_len = bn(session_id_len_b)\n session_id = body[35: 35 + session_id_len]\n cipher_suit = body[35 + session_id_len: 35 + session_id_len + 2]\n compression_method = body[35 + session_id_len + 2: 35 + session_id_len + 2 + 1]\n extensions = body[35 + session_id_len + 2 + 1:]\n header['server_random'] = server_randomness\n header['gmt'] = datetime.datetime.fromtimestamp(bn(timestamp)).strftime('%Y-%m-%d %H:%M:%S')\n header['sessid'] = session_id\n header['cipher'] = cipher_suit\n header['compression'] = bn(compression_method)\n elif type == 0x0b:\n certificate_field_len = bn(r[1:4])\n certificates_len = bn(r[4:7])\n certificates = []\n cert_string_left = r[7: 7 + certificates_len]\n while cert_string_left:\n cert_len = bn(cert_string_left[:3])\n certificates.append(cert_string_left[3: 3 + cert_len])\n cert_string_left = cert_string_left[3 + cert_len:]\n header['certlen'] = len(certificates[0])\n f = open(args.certificate, 'wb')\n cert = certificates[0]\n encoded_hext = codecs.encode(cert, 'hex')\n f.write(cert)\n f.close()\n\n parsehandshake(header)\n # pass\n\n\n# read from the socket full TLS record\ndef recv_num_bytes(s, num):\n ret = b\"\"\n while len(ret) < num:\n ret += s.recv(min(4096, num - len(ret)))\n return ret\n\n\n\ndef readrecord():\n global s\n record = b\"\"\n # read the TLS record header (5 bytes)\n rec_type = recv_num_bytes(s, 1)\n tls_version = recv_num_bytes(s, 2)\n # find data length\n rec_len_b = recv_num_bytes(s, 2)\n rec_len = bn(rec_len_b)\n # read the TLS record body\n record = recv_num_bytes(s, rec_len)\n return record\n\n\ns = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\nurl = urlparse(args.url)\nhost = url.netloc.split(':')\nif len(host) > 1:\n port = int(host[1])\nelse:\n port = 443\nhost = host[0]\npath = url.path\n\ns.connect((host, port))\ns.send(client_hello())\n\nserver_hello_done_received = False\nwhile not server_hello_done_received:\n parserecord(readrecord())\ns.send(alert())\nprint(\"[+] Closing TCP connection!\")\ns.close()\n# https://www.eesti.ee/ --certificate server.pem\n","sub_path":"Others/academic/MS/ut/applied_crypto/lab11/helper_tls_getcert.py","file_name":"helper_tls_getcert.py","file_ext":"py","file_size_in_byte":7219,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"333907389","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Oct 29 13:43:00 2018\n\n@author: alberto.camina\n\"\"\"\n\ndef calcularPrecio(kg, fruta):\n \"\"\"float, str -> float\n OBJ: Muestra el precio total de la comrpa en funcion del kg y la fruta\n PRE: tipo: platanos, peras, mandarinas o caquis\n \"\"\"\n precio = 0\n \n if fruta == \"peras\":\n precio = 1.5 * kg\n elif fruta == \"mandarinas\":\n precio = 2.99 * kg\n elif fruta == \"platanos\":\n precio = 0.99 * kg\n elif fruta == \"caquis\":\n precio = 1.75 * kg \n \n return precio\n \n \nkg = 1\nfruta = \"peras\"\n\nprint(\"El precio de\", kg, \"de\", fruta, \"es:\", calcularPrecio(kg, fruta))\n","sub_path":"laboratorio/EjerciciosExamen/extra-2.py","file_name":"extra-2.py","file_ext":"py","file_size_in_byte":663,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"394494772","text":"from swap_meet.item import Item \n\nclass Vendor:\n\n def __init__(self, inventory = []):\n self.inventory = inventory\n\n def add(self, item):\n self.inventory.append(item)\n return item\n \n def remove(self, item):\n if item in self.inventory:\n self.inventory.remove(item)\n return item\n\n else:\n return False\n\n def get_by_category(self, category):\n self.category = category\n category_list = []\n\n for item in self.inventory:\n\n if item.category == category:\n category_list.append(item)\n\n return category_list\n\n def swap_items(self, other_vendor, my_item, their_item):\n \n self.other_vendor = other_vendor\n self.my_item = my_item\n self.their_item = their_item\n\n if (my_item in self.inventory) and (their_item in other_vendor.inventory):\n\n self.inventory.remove(my_item)\n self.inventory.append(their_item)\n other_vendor.inventory.append(my_item)\n other_vendor.inventory.remove(their_item)\n\n return True\n \n return False\n\n def swap_first_item(self, other_vendor):\n\n self.other_vendor = other_vendor\n\n if len(self.inventory) > 0 and len(other_vendor.inventory) > 0:\n my_first_item = self.inventory[0]\n their_first_item = other_vendor.inventory[0]\n\n self.inventory[0] = their_first_item\n other_vendor.inventory[0] = my_first_item\n return True\n\n else:\n return False\n\n def get_best_by_category(self, category):\n \n category_list = []\n top_counter = 0\n\n for item in self.inventory:\n if category == item.category:\n category_list.append(item)\n \n if len(category_list) == 0:\n return None \n \n for best in category_list:\n if best.condition > top_counter:\n top_counter = best.condition\n best_item = best \n\n return best_item\n \n \n def swap_best_by_category(self, other, my_priority, their_priority):\n \n my_best_item = self.get_best_by_category(their_priority)\n their_best_item = other.get_best_by_category(my_priority)\n\n if my_best_item and their_best_item:\n self.swap_items(other, my_best_item, their_best_item)\n return True \n else:\n return False\n\n\n\n\n","sub_path":"swap_meet/vendor.py","file_name":"vendor.py","file_ext":"py","file_size_in_byte":2472,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"394163115","text":"import pandas as pd\n\n\ndef Split_chronological_account(path1, path2):\n\n data = pd.read_excel(io=path1) # 获取需要拆分的序时账path\n data_df = data['科目名称'].str.split(r'-', expand=True) # 拆分科目名称列\n data_0 = pd.concat([data, data_df], axis=1) # 将拆分过后的data_0合并到序时账里面\n\n rows = data_0.shape[0] # 获取行数 shape[1]获取列数\n department_list = [] # 预处理每个科目数据列表\n df_list = [] # 预处理建表名称\n\n data2 = pd.DataFrame(data_0[0]) # 获取科目名称所在列\n data3 = data2.drop_duplicates(\n subset=None, keep='last', inplace=False) # 去重\n department_list = list(data3[0]) # 去处重复的科目名称\n n = len(department_list) # 获取科目名称数量,建表预处理过程\n\n # 根据每个一级科目创建一个DataFrame\n names = locals()\n for i in range(n):\n names['n' + str(i)] = pd.DataFrame()\n df_list.append(names['n' + str(i)])\n\n # 把每个一级科目对应的行数据写入预处理的科目DataFrame\n for department in range(n):\n for i in range(0, rows):\n if data_0[0][i] == department_list[department]:\n df_list[department] = pd.concat(\n [df_list[department], data_0.iloc[[i], :]], axis=0, ignore_index=True)\n\n writer = pd.ExcelWriter(path=path2) # 利用pd.ExcelWriter()存多张sheets\n\n # 写入数据\n for i in range(n):\n df_list[i].to_excel(\n writer,\n sheet_name=str(\n department_list[i]),\n index=False) # 去掉index列\n print('正在导入', i)\n\n writer.save()\n\nif __name__ == '__main__':\n path1 = \"C:/Users/Admin/Desktop/2020序时账-轿子山.xls\"\n path2 = 'C:/Users/Admin/Desktop/asd.xlsx'\n Split_chronological_account(path1,path2)","sub_path":"BP_NEURAL_NETWORK/2da.py","file_name":"2da.py","file_ext":"py","file_size_in_byte":1861,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"94475230","text":"#coding:utf-8\r\nimport time\r\nfrom time import sleep\r\n#from WeChatCon import *\r\nimport json\r\nimport threading\r\nimport os\r\nimport logging,logging.config\r\nfrom TypeDef import TypeDef\r\n\r\nclass Utill(object):\r\n @staticmethod\r\n def night_check_time():\r\n ret = 0\r\n local_hour = int(time.strftime('%H', time.localtime(time.time())))\r\n if (local_hour == 22) or (local_hour == 23) or (local_hour >= 0 and local_hour < 8):\r\n print(\"Night Time, don't send any message\")\r\n ret = -1\r\n return ret\r\n\r\n @staticmethod\r\n def log_init():\r\n #init log from the conf file\r\n logging.config.fileConfig(\"conf/logging.conf\")\r\n #every py module init it's logger\r\n #can overwrite the log level in logginf.conf for every log\r\n\r\n @staticmethod\r\n def create_qr_ticket():\r\n url = \"https://api.weixin.qq.com/cgi-bin/qrcode/create?access_token=%s\"\r\n\r\n QR_data = {\r\n \"expire_seconds\": 604800,\r\n \"action_name\": \"QR_STR_SCENE\",\r\n \"action_info\":\r\n {\r\n \"scene\":\r\n {\r\n \"scene_str\": \"test\"\r\n }\r\n }\r\n }\r\n token = WeChatHandler().getWeChatToken()\r\n r = requests.post(url % token, data=json.dumps(QR_data).encode('utf-8'))\r\n\r\n @staticmethod\r\n def asDigital(digital_str):\r\n temp_dig = \"\"\r\n if digital_str.isdigit():\r\n return digital_str\r\n\r\n for i in digital_str:\r\n if i in TypeDef.digital.keys():\r\n temp = TypeDef.digital[i]\r\n temp_dig += str(temp)\r\n else:\r\n temp_dig = \"-1\"\r\n\r\n return int(temp_dig)\r\n\r\n\r\n\r\n @staticmethod\r\n def is_last_day():\r\n ret = 0\r\n lastDays = [\"01-31\", \"0is_last_day3-31\", \"04-30\", \"05-31\", \"06-30\", \"07-31\", \"08-31\", \"09-30\", \"10-31\", \"11-30\", \"12-31\"]\r\n local_mmdd = time.strftime('%m-%d', time.localtime(time.time()))\r\n if local_mmdd in lastDays:\r\n ret = 1\r\n elif int(time.strftime('%Y', time.localtime(time.time()))) % 4 != 0 and local_mmdd == \"2-28\":\r\n ret = 1\r\n elif int(time.strftime('%Y', time.localtime(time.time()))) % 4 == 0 and local_mmdd == \"2-29\":\r\n ret = 1\r\n else:\r\n ret = 0\r\n return ret\r\n\r\nclass Test1(object):\r\n __singleton = None\r\n\r\n def __init__(self):\r\n self.__lock = threading.Lock()\r\n\r\n @staticmethod\r\n def get_instance():\r\n if Test1.__singleton is None:\r\n Test1.__singleton = Test1()\r\n return Test1.__singleton\r\n\r\n def test1(self):\r\n self.__lock.acquire()\r\n print(\"test1\")\r\n sleep(5)\r\n print(\"test1 END\")\r\n self.__lock.release()\r\n\r\n def test2(self):\r\n self.__lock.acquire()\r\n print(\"test2\")\r\n sleep(30)\r\n print(\"test2 END\")\r\n self.__lock.release()\r\n\r\nif __name__ == \"__main__\":\r\n #aws_create_vm_cmd = \"/root/aws_script/terraform_sample/aws_start.sh\"\r\n #os.system(aws_create_vm_cmd)\r\n resut = Utill.is_last_day()\r\n vm_status = os.popen(\"cd /root/aws_script/terraform_sample; terraform show\")\r\n vm = {}\r\n for line in vm_status.readlines():\r\n key_value = line.split(\"=\")\r\n if type(key_value) == list and len(key_value) == 2:\r\n key = key_value[0].strip()\r\n value = key_value[1].strip().strip(\"\\n\")\r\n vm[key] = value\r\n vm_status.close()\r\n\r\n msg = \"Create VM id <%s> with AMI <%s>. Public IP <%s>, Private IP <%s>\" % \\\r\n (vm['id'], vm['ami'], vm['private_ip'], vm['public_ip'])\r\n print(msg)","sub_path":"WeChatServer/utill.py","file_name":"utill.py","file_ext":"py","file_size_in_byte":3792,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"31367936","text":"__author__ = 'Oleksandr_Raskosov'\n\n\nfrom setuptools import setup\n\n\nsetup(\n name='cloudify-libcloud-provider',\n version='1.0',\n author='Oleksandr_Raskosov',\n author_email='Oleksandr_Raskosov@epam.com',\n packages=['cloudify_libcloud'],\n license='LICENSE',\n description='Cloudify Libclouod provider',\n package_data={'cloudify_libcloud': ['cloudify-config.yaml',\n 'cloudify-config.defaults.yaml']},\n install_requires=[\n 'apache-libcloud==0.14.1',\n 'IPy==0.81',\n 'cloudify-cli==3.0'\n ]\n)\n","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":573,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"567941153","text":"import socket\nimport hashlib\nimport base64\nimport threading\nimport urllib.request\n\n\nWS_MAGIC_STRING = \"258EAFA5-E914-47DA-95CA-C5AB0DC85B11\"\n\nclass server:\n sock = 0\n\n def __init__(self, ip, port, connections):\n self.sock = MySocket(ip, port, connections, self )\n\nclass MyUser:\n user_id = 0\n socket = 0\n handshack = 0\n\n def __init__(self, socket, user_id):\n self.user_id = user_id\n self.socket = socket\n\n\nclass MySocket:\n uid = 0\n users = []\n server = 0\n\n def __init__(self, address, port, connections, server):\n self.server = server\n server = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n server.bind((TCP_IP, TCP_PORT))\n server.listen(connections)\n while True:\n channel, detials = server.accept()\n self.uid = self.uid + 1\n self.users.append(MyUser(channel, self.uid))\n WS(channel, detials, self).start()\n\n\n\nclass WS(threading.Thread):\n def __init__(self, channel, details, websocket):\n self.channel = channel\n self.details = details\n self.websocket = websocket\n threading.Thread.__init__(self)\n\n def run(self):\n print(\"Sockey> Received connection \", self.details[0],self.details[1])\n self.hand_shake_connect(self.channel)\n while True:\n # Alway Connection\n self.interact(self.channel)\n\n def hand_shake_connect(self, channel):\n # self.request is the TCP socket connected to the client\n self.data = channel.recv(1024).strip()\n dd = str(self.data.decode('ascii'))\n headers = dd.split(\"\\r\\n\")\n print(headers)\n\n # is it a websocket request?\n if \"Connection: Upgrade\" in headers and \"Upgrade: websocket\" in headers:\n # getting the websocket key out\n for h in headers:\n if \"Sec-WebSocket-Key\" in h:\n key = h.split(\" \")[1]\n #print(key)\n # let's shake hands shall we?\n self.shake_hand(key, client=channel)\n else:\n channel.sendall(bytes(\"HTTP/1.1 400 Bad Request\\r\\n\" + \\\n \"Content-Type: text/plain\\r\\n\" + \\\n \"Connection: close\\r\\n\" + \\\n \"\\r\\n\" + \\\n \"Incorrect request\", encoding=\"utf-8\"))\n\n def finduser(self, client):\n for user in self.websocket.users:\n if user.socket == client:\n return user\n return 0\n\n def interact(self, client):\n users = self.websocket.users\n this_user = self.finduser(client)\n\n aaa = client.recv(1024).strip()\n try:\n payload = self.decode_frame(bytearray(aaa))\n except:\n print(\"Frame is Unknow\")\n\n try:\n decoded_payload = payload.decode('utf-8')\n print(decoded_payload)\n\n # Exit Command\n if \"p\" == decoded_payload.lower():\n print(\"Bidding goodbye to our client...\", self.details)\n exit(0)\n except:\n print(\"error Cannot decode, Exit connection\")\n exit(0)\n self.send_frame(payload)\n\n def shake_hand(self,key, client):\n # calculating response as per protocol RFC\n key = key + WS_MAGIC_STRING\n aa = hashlib.sha1(key.encode('ascii')).digest()\n resp_key = base64.standard_b64encode(aa)\n\n resp = \"HTTP/1.1 101 Switching Protocols\\r\\n\" + \\\n \"Upgrade: websocket\\r\\n\" + \\\n \"Connection: Upgrade\\r\\n\" + \\\n \"Sec-WebSocket-Accept: %s\\r\\n\\r\\n\"%(resp_key.decode('ascii'))\n client.sendall(resp.encode('ascii'))\n\n def decode_frame(self,frame):\n opcode_and_fin = frame[0]\n # assuming it's masked, hence removing the mask bit(MSB) to get len. also assuming len is <125\n payload_len = frame[1] - 128\n\n mask = frame [2:6]\n encrypted_payload = frame [6: 6+payload_len]\n\n payload = bytearray([ encrypted_payload[i] ^ mask[i%4] for i in range(payload_len)])\n\n return payload\n\n def send_frame(self, payload):\n # setting fin to 1 and opcpde to 0x1\n frame = [129]\n # adding len. no masking hence not doing +128\n frame += [len(payload)]\n # adding payload\n frame_to_send = bytearray(frame) + payload\n\n self.channel.sendall(frame_to_send)\n\n\n\nif __name__ == \"__main__\":\n url = \"https://gist.githubusercontent.com/kandation/73be69f40ae02471573cc488630614ab/raw/ippop\"\n with urllib.request.urlopen(url) as response:\n html = response.read()\n html = str(html.decode('ascii')).split(\";\")\n html = ['','','']\n\n print(html)\n TCP_IP = str(html[0]) if html[0] != '' else \"127.0.0.1\"\n TCP_PORT = int(html[1]) if html[1] != '' else 9999\n CONNECTIONS = int(html[2]) if html[2] != '' else 100\n web = server(TCP_IP, TCP_PORT, CONNECTIONS)\n","sub_path":"server_socket_without_iot.py","file_name":"server_socket_without_iot.py","file_ext":"py","file_size_in_byte":4958,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"638543053","text":"from io import BytesIO\n\nfrom flask import Flask, jsonify, request, abort\nfrom flask_cors import CORS\n\nfrom ssc.Invites.invites import fetch_user_invites, process_invite, insert_user_invite\nfrom ssc.Users.users import fetch_users, add_user, fetch_user_workspaces\nfrom ssc.Workspaces.workspaces import *\nfrom ssc.audio_analysis.acr_api_requests import identify_audio, upload_audio\nfrom ssc.audiokey_api.audiokey import add_audio_key\nfrom ssc.audiokey_api.audiokey import get_audio_key\nfrom ssc.login.get_logged_in import fetch_user_details\n\napp = Flask(__name__)\nCORS(app)\n\n\n@app.route(\"/\")\ndef homeDummy():\n return 'Hello'\n\n\n@app.route(\"/api/encryptFile\", methods = ['POST'])\ndef post_encrypted_file():\n key = get_audio_key(request.form[\"session_id\"])\n if (\"error\" in key):\n return jsonify(key), 404\n return encrypt_file(request.files['file'], request.form['bucket_name'], key)\n\n\n@app.route(\"/api/decryptFile//\", methods = ['GET', 'POST'])\ndef download_decrypted_file(workspace_name, file):\n if (not request.files):\n abort(400)\n\n audio_file = request.files[\"file\"].read()\n audio_file_bytes = BytesIO(audio_file)\n sample_bytes = len(file)\n acr_response = identify_audio(audio_file_bytes, sample_bytes)\n\n if acr_response[\"status\"][\"msg\"] == 'No result':\n return jsonify({\"notIdentified\": True})\n else:\n audio_key = acr_response[\"metadata\"][\"music\"][0][\"acrid\"]\n return decrypt_file(workspace_name, file, audio_key)\n\n\n@app.route('/api/login', methods = ['POST'])\ndef login():\n username = request.json['username']\n password = request.json['password']\n res = fetch_user_details(username, password)\n res_json = jsonify(res)\n\n if (\"error\" in res):\n return res_json, 404\n else:\n return res_json, 200\n\n\n@app.route(\"/api/users\")\ndef get_users():\n res = fetch_users()\n res_json = jsonify(res)\n\n if (\"error\" in res):\n return res_json, 404\n else:\n return res_json, 200\n\n\n@app.route(\"/api/users\", methods = ['POST'])\ndef post_user():\n username = request.json['username']\n password = request.json['password']\n res = add_user(username, password)\n res_json = jsonify(res)\n if (\"error\" in res):\n return res_json, 404\n else:\n return res_json, 201\n\n\n@app.route('/api/users/', methods = [\"GET\"])\ndef get_user_workspaces(username):\n res = fetch_user_workspaces(username)\n res_json = jsonify(res)\n return res_json, 200\n\n\n@app.route(\"/api/deleteUser\", methods = ['DELETE'])\ndef delete_user():\n if (not request.json) | ('username' not in request.json) | ('admin_username' not in request.json) | (\n 'workspace_name' not in request.json):\n abort(400)\n\n res = delete_user_from_workspace(request.json)\n res_json = jsonify(res)\n if (\"error\" in res):\n return res_json, 404\n else:\n return res_json, 204\n\n\n@app.route(\"/api/invites\", methods = [\"POST\"])\ndef invite_user():\n if (not request.json) | ('username' not in request.json) \\\n | ('workspace' not in request.json) | ('invitedBy' not in request.json):\n abort(400)\n\n res = insert_user_invite(request.json)\n res_json = jsonify(res)\n if (\"error\" in res):\n return res_json, 404\n else:\n return res_json, 201\n\n\n@app.route(\"/api/invites/\", methods = [\"GET\"])\ndef get_user_invites(username):\n res = fetch_user_invites(username)\n res_json = jsonify(res)\n return res_json, 200\n\n\n@app.route(\"/api/invites/\", methods = [\"POST\"])\ndef update_invite(username):\n if (not request.json) | ('accept' not in request.json) | ('workspace' not in request.json):\n abort(400)\n\n res = process_invite(username, request.json)\n res_json = jsonify(res);\n if (\"error\" in res):\n return res_json, 404\n else:\n return res_json, 201\n\n\n@app.route('/api/workspaces', methods = ['POST'])\ndef handle_create_workspace():\n if (not request.json) | ('name' not in request.json) | ('admin' not in request.json):\n abort(400)\n else:\n if ('users' in request.json):\n res = create_workspace_with_users(request.json)\n else:\n res = create_workspace_only(request.json)\n print(res)\n res_json = jsonify(res);\n if (\"error\" in res):\n return res_json, 404\n else:\n return res_json, 201\n return jsonify(res_json);\n\n\n@app.route(\"/api/workspaces\", methods = [\"DELETE\"])\ndef handle_delete_workspace():\n if (not request.json) | ('workspace' not in request.json) | ('deleted_by' not in request.json):\n abort(400)\n\n res = delete_workspace(request.json)\n res_json = jsonify(res)\n if (\"error\" in res):\n return res_json, 404\n else:\n return res_json, 204\n\n\n@app.route(\"/api/workspaces//files\", methods = [\"GET\"])\ndef get_workspace_file(name):\n res = fetch_workspace_files(name)\n res_json = jsonify(res)\n return res_json, 200\n\n\n@app.route(\"/api/workspaces//users\", methods = [\"GET\"])\ndef get_workspace_users(name):\n res = fetch_workspace_users(name)\n res_json = jsonify(res);\n if (\"error\" in res):\n return res_json, 404\n else:\n return res_json, 200\n\n\n@app.route(\"/api/workspaces/\", methods = [\"PUT\"])\ndef handle_update_workspace(workspace_name):\n if (not request.json) | ('username' not in request.json) \\\n | ('admin_username' not in request.json) | ('make_admin' not in request.json):\n abort(400)\n\n res = update_admin(workspace_name, request.json)\n res_json = jsonify(res);\n if (\"error\" in res):\n return res_json, 404\n else:\n return res_json, 201\n print(res)\n res_json = {'workspace_admin_updated': res}\n if (res == False): res_json['error'] = 'Could not update workspace. ' \\\n 'Check admin user is an admin'\n\n return jsonify(res_json);\n\n\n@app.route(\"/api/audiokey\", methods = [\"POST\"])\ndef post_audio_key():\n if (not request.files) | (\"session_id\" not in request.values) | (\"filename\" not in request.values):\n abort(400)\n\n file = request.files[\"file\"].read()\n audio_file_copy1 = BytesIO(file)\n audio_file_copy2 = BytesIO(file)\n sample_bytes = len(file)\n session_id = request.values.get(\"session_id\")\n file_name = request.values.get(\"filename\")\n acr_response = identify_audio(audio_file_copy1, sample_bytes)\n if acr_response[\"status\"][\"msg\"] == 'No result' and (\"isRecorded\" in request.values):\n return jsonify({\"recordedNotRecognised\": True})\n if acr_response[\"status\"][\"msg\"] == 'No result':\n acr_upload_response = upload_audio(audio_file_copy2, file_name, session_id)\n add_audio_key(acr_upload_response[\"acr_id\"], session_id)\n return jsonify({\"notRecognised\": True})\n if 'custom_files' in acr_response[\"metadata\"].keys():\n return jsonify({\"fileError\": True})\n if acr_response[\"status\"][\"msg\"] == 'Success':\n add_audio_key(acr_response[\"metadata\"][\"music\"][0][\"acrid\"], session_id)\n return jsonify({\"title\": acr_response[\"metadata\"][\"music\"][0][\"title\"],\n \"artist\": acr_response[\"metadata\"][\"music\"][0][\"artists\"][0][\"name\"]})\n\n return jsonify('Error check')\n\n\nif __name__ == \"__main__\":\n port = int(os.environ.get('PORT', 9090))\n app.run(host = '0.0.0.0', port = port)\n","sub_path":"ssc/server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":7410,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"325867778","text":"\"\"\"\nmap() is a higher-order built-in function that takes a function and iterable as inputs, and returns an iterator that applies the function to each element of the iterable. \n\"\"\"\n\n# 1. basic\ndef two_times(numberList):\n result = [ ]\n for number in numberList:\n result.append(number*2)\n return result\n\nresult = two_times([1, 2, 3, 4])\nprint(result)\n\n\n# 2. map3\ndef two_times(x): \n return x*2\n\nlist(map(two_times, [1, 2, 3, 4]))\n[2, 4, 6, 8]\n\n\n# 3. map + lambda\nlist(map(lambda a: a*2, [1, 2, 3, 4]))\n[2, 4, 6, 8]\n\n\n# 4. the other example\nnumbers = [\n [34, 63, 88, 71, 29],\n [90, 78, 51, 27, 45],\n [63, 37, 85, 46, 22],\n [51, 22, 34, 11, 18]\n ]\n\ndef mean(num_list):\n return sum(num_list) / len(num_list)\n\naverages = list(map(mean, numbers))\nprint(averages)\n\n# Answer\nnumbers = [\n [34, 63, 88, 71, 29],\n [90, 78, 51, 27, 45],\n [63, 37, 85, 46, 22],\n [51, 22, 34, 11, 18]\n ]\n\naverages = list(map(lambda x: sum(x) / len(x), numbers))\nprint(averages)","sub_path":"map.py","file_name":"map.py","file_ext":"py","file_size_in_byte":1090,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"77597229","text":"\"\"\"A Google Cloud Python Pulumi program\"\"\"\n\nimport pulumi\nfrom pulumi import ResourceOptions, Output\nfrom pulumi_gcp import storage, bigtable\nfrom config import getResourceName, subnet_cidr_blocks, project\nimport network\nimport postgres\nimport serverless\n\nimport pulumi_random as random\n\n# Name prefix\nmyname = \"demo\"\n\n# creating project name tag\nprojectName = pulumi.get_project()\n\n# creating stack name tag\nstackName = pulumi.get_stack()\n\n# common tags. need to pass in\ncommonTags = {\n \"project\": projectName,\n \"stack\": stackName,\n}\n\n# Create a GCP resource (Storage Bucket)\n# With no getresourcename\n#bucket = storage.Bucket('shaht-my-bucket', labels=commonTags)\n# Expected output: gs://shaht-my-bucket-7477081\n\nbucket = storage.Bucket(getResourceName(f\"{myname}-bucket\"), labels=commonTags)\n# Expected output: gs://gcp-reference-architecture-py-shaht-my-bucket-b89e42f\n\n#bucket = storage.Bucket(getResourceName(), labels=commonTags)\n# Expected output: gs://gcp-reference-architecture-py-1d70b6d\n\n#mynetwork = network.Vpc(\"shaht-vpc\", network.VpcArgs(subnet_cidr_blocks=subnet_cidr_blocks,))\n#mynetwork = network.Vpc(getResourceName(), network.VpcArgs(subnet_cidr_blocks=subnet_cidr_blocks,))\n\n# creates vpc\nmynetwork = network.Vpc(getResourceName(f\"{myname}-vpc\"), network.VpcArgs(subnet_cidr_blocks=subnet_cidr_blocks,))\n# creates postgres sql server in cloud\nmydatabase = postgres.Database(getResourceName(f\"{myname}-database\"), postgres.DbArgs(private_network=mynetwork.id, tags = commonTags))\n# creates google cloud function\nmyfunction = serverless.Function(getResourceName(f\"{myname}-function\"), serverless.FuncArgs(tags=commonTags))\n\n# Creating subnet and cidr block outputs\nmy_subnet_names = []\nmy_subnet_cidrs_blocks = []\n\nmy_subnet_names.append(mynetwork.subnets[0].name.apply(lambda subnet: subnet))\nmy_subnet_names.append(mynetwork.subnets[1].name.apply(lambda subnet: subnet))\nmy_subnet_names.append(mynetwork.subnets[2].name.apply(lambda subnet: subnet))\n\nmy_subnet_cidrs_blocks.append(mynetwork.subnets[0].ip_cidr_range.apply(lambda subnet: subnet))\nmy_subnet_cidrs_blocks.append(mynetwork.subnets[1].ip_cidr_range.apply(lambda subnet: subnet))\nmy_subnet_cidrs_blocks.append(mynetwork.subnets[2].ip_cidr_range.apply(lambda subnet: subnet))\n\n\n# Export the DNS name of the bucket\npulumi.export('bucket_name', bucket.url)\n\n# Export the vpc information\npulumi.export('network_vpc_name', mynetwork.network.name)\n# Export the subnet names and cidr blocks\npulumi.export('network_subnets_names',my_subnet_names)\npulumi.export('network_subnets_cidr_blocks',my_subnet_cidrs_blocks)\npulumi.export('database_instance', mydatabase.sql.name)\npulumi.export('database_user', mydatabase.users.name)\npulumi.export('database_user_password', mydatabase.users.password)\npulumi.export('database_name', mydatabase.database.name)\n\npulumi.export('function_bucket_name', myfunction.bucket.name)\n#pulumi.export('function_bucket_object_name', myfunction.bucket_object.name)\npulumi.export('function_name', myfunction.function.name)\npulumi.export('function_trigger_url', myfunction.function.https_trigger_url)","sub_path":"gcp-ref-py/__main__.py","file_name":"__main__.py","file_ext":"py","file_size_in_byte":3106,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"97556742","text":"import logging\n\nfrom celery import shared_task, current_app\nfrom kombu import Exchange\n\nlogger = logging.getLogger(\"cmsite.crossbar\")\n\ncmsite_exchange = Exchange('cmsite', 'fanout', durable=False)\n\n\ndef send_message(channel, **payload):\n data = {\n \"channel\": \"cmsite.\" + channel,\n \"payload\": payload\n }\n with current_app.pool.acquire(block=True) as conn:\n producer = conn.Producer(serializer='json')\n producer.publish(data, exchange=cmsite_exchange)\n\n\n@shared_task(ignore_result=True)\ndef send_broadcast_message(*args, **data):\n payload = {}\n if args:\n payload['args'] = args\n if data:\n payload['data'] = data\n send_message(\"broadcast\", **payload)\n\n\n@shared_task(ignore_result=True)\ndef send_user_toast(user_id, message, mtype=\"info\"):\n payload = {\n \"event\": \"toast\",\n \"mtype\": mtype,\n \"message\": message,\n }\n send_message(\"user.%d\" % user_id, **payload)\n\n\n@shared_task(ignore_result=True)\ndef send_user_message(user_id, **data):\n send_message(\"user.%d\" % user_id, **data)\n\n\n@shared_task(ignore_result=True)\ndef notify_link_task(result, user_id, send_result=True, *args, **data):\n \"\"\"\n notify task um andere tasks zu linken\n \"\"\"\n logger.info(\"R:%r S:%r A:%r D:%r\", result, send_result, args, data)\n payload = {}\n if send_result:\n data['result'] = result\n if args:\n payload['args'] = args\n if data:\n payload['data'] = data\n\n send_message(\"user.%d\" % user_id, **payload)\n","sub_path":"crossbar_server/tasks.py","file_name":"tasks.py","file_ext":"py","file_size_in_byte":1513,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"84449541","text":"from GCForest import gcForest\nfrom sklearn.datasets import load_iris, load_digits\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.metrics import accuracy_score\nimport numpy as np\n\n#1.iris Example\n# loading the data\niris = load_iris()\nX = iris.data\ny = iris.target\nX_tr, X_te, y_tr, y_te = train_test_split(X, y, test_size=0.33)\n\ngcf = gcForest(shape_1X=4, window=2, tolerance=0.0)\ngcf.fit(X_tr, y_tr)\n\npred_X = gcf.predict(X_te)\nprint(pred_X)\n\n# evaluating accuracy\naccuracy = accuracy_score(y_true=y_te, y_pred=pred_X)\nprint('The iris datasets, gcForest accuracy : {}'.format(accuracy))\n\n\n#2.Digits Example\n# loading the data\ndigits = load_digits()\nX = digits.data\ny = digits.target\nX_tr, X_te, y_tr, y_te = train_test_split(X, y, test_size=0.4)\n\ngcf = gcForest(shape_1X=[8,8], window=[4,6], tolerance=0.0, min_samples_mgs=10, min_samples_cascade=7)\ngcf.fit(X_tr, y_tr)\n\npred_X = gcf.predict(X_te)\nprint(pred_X)\n\n\n# evaluating accuracy\naccuracy = accuracy_score(y_true=y_te, y_pred=pred_X)\nprint('gcForest accuracy : {}'.format(accuracy))\n\n\n#3.Saving Models to Disk\nfrom sklearn.externals import joblib\njoblib.dump(gcf, 'gcf_model.sav')\n\ngcf = joblib.load('gcf_model.sav')\n\n\n#4.Using mg-scanning and cascade_forest Sperately\n\ngcf = gcForest(shape_1X=[8,8], window=5, min_samples_mgs=10, min_samples_cascade=7)\nX_tr_mgs = gcf.mg_scanning(X_tr, y_tr)\nX_te_mgs = gcf.mg_scanning(X_te)\n\ngcf = gcForest(tolerance=0.0, min_samples_mgs=10, min_samples_cascade=7)\n_ = gcf.cascade_forest(X_tr_mgs, y_tr)\n\npred_proba = gcf.cascade_forest(X_te_mgs)\ntmp = np.mean(pred_proba, axis=0)\npreds = np.argmax(tmp, axis=1)\naccuracy_score(y_true=y_te, y_pred=preds)\n\ngcf = gcForest(tolerance=0.0, min_samples_mgs=20, min_samples_cascade=10)\n_ = gcf.cascade_forest(X_tr_mgs, y_tr)\n\npred_proba = gcf.cascade_forest(X_te_mgs)\ntmp = np.mean(pred_proba, axis=0)\npreds = np.argmax(tmp, axis=1)\naccuracy_score(y_true=y_te, y_pred=preds)\n\n#Skipping mg_scanning\ngcf = gcForest(tolerance=0.0, min_samples_cascade=20)\n_ = gcf.cascade_forest(X_tr, y_tr)\n\npred_proba = gcf.cascade_forest(X_te)\ntmp = np.mean(pred_proba, axis=0)\npreds = np.argmax(tmp, axis=1)\naccuracy_score(y_true=y_te, y_pred=preds)\n\n","sub_path":"ML/3-a.GCForest/gcForest_test.py","file_name":"gcForest_test.py","file_ext":"py","file_size_in_byte":2187,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"39743088","text":"from django.shortcuts import render, redirect\nfrom .models import Contact\nfrom django.contrib import messages\nfrom django.core.mail import send_mail\n\n# Create your views here.\ndef contact(request):\n if request.method == 'POST':\n listing_id = request.POST['listing_id']\n listing = request.POST['listing']\n name = request.POST['name']\n email = request.POST['email']\n phone = request.POST['phone']\n message = request.POST['message']\n user_id = request.POST['user_id']\n realtor_email = request.POST['realtor_email']\n\n if request.user.is_authenticated:\n user_id = request.user.id\n has_contacted = Contact.objects.all().filter(listing_id=listing_id,user_id=user_id)\n if has_contacted:\n messages.error(request,'you have already made an inquiry for this listing')\n return redirect('/listings/'+listing_id)\n contact = Contact(listing=listing, listing_id=listing_id,name=name,email=email,phone=phone,message=message,user_id=user_id)\n contact.save()\n send_mail(\n 'property listing inqury',\n 'Dear tang, please buy me a house and marry me as soon as possible. there has been inqury for ' + listing + 'sign into the admin panel for more info. love you so much!',\n '1093804614@qq.com',\n [realtor_email,'1093804614@qq.com','1987492924@qq.com'],\n # [realtor_email,'1987492924@qq.com'],\n fail_silently=False\n )\n messages.success(request,'your request has been submitted, the realtor will get back to you soon')\n return redirect('/listings/'+listing_id)","sub_path":"contacts/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1672,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"427555266","text":"#!/usr/bin/env python\n# encoding: utf-8\n#\n# Copyright © 2019, SAS Institute Inc., Cary, NC, USA. All Rights Reserved.\n# SPDX-License-Identifier: Apache-2.0\n\nfrom six.moves import mock\n\nimport sasctl.services.model_repository as mr\n\n\ndef test_create_model():\n import copy\n from sasctl import current_session\n\n MODEL_NAME = 'Test Model'\n PROJECT_NAME = 'Test Project'\n PROJECT_ID = '12345'\n USER = 'username'\n\n with mock.patch('sasctl.core.requests.Session.request'):\n current_session('example.com', USER, 'password')\n\n TARGET = {'name': MODEL_NAME,\n 'projectId': PROJECT_ID,\n 'modeler': USER,\n 'description': 'model description',\n 'function': 'Classification',\n 'algorithm': 'Dummy Algorithm',\n 'tool': 'pytest',\n 'champion': True,\n 'role': 'Champion',\n 'properties': [{'name': 'custom1', 'value': 123},\n {'name': 'custom2', 'value': 'somevalue'}]}\n\n # Passed params should be set correctly\n target = copy.deepcopy(TARGET)\n with mock.patch('sasctl.services.model_repository.get_project') as get_project:\n with mock.patch('sasctl.services.model_repository.post') as post:\n get_project.return_value = {'id': PROJECT_ID}\n _ = mr.create_model(MODEL_NAME,\n PROJECT_NAME,\n description=target['description'],\n function=target['function'],\n algorithm=target['algorithm'],\n tool=target['tool'],\n is_champion=target['champion'],\n properties=dict(custom1=123, custom2='somevalue'))\n assert post.call_count == 1\n url, data = post.call_args\n\n # dict isn't guaranteed to preserve order\n # so k/v pairs of properties=dict() may be\n # returned in a different order\n assert sorted(target['properties'],\n key=lambda d: d['name']) \\\n == sorted(data['json']['properties'],\n key=lambda d: d['name'])\n\n target.pop('properties')\n data['json'].pop('properties')\n assert target == data['json']\n\n # Model dict w/ parameters already specified should be allowed\n # Explicit overrides should be respected.\n target = copy.deepcopy(TARGET)\n with mock.patch('sasctl.services.model_repository.get_project') as get_project:\n with mock.patch('sasctl.services.model_repository.post') as post:\n get_project.return_value = {'id': PROJECT_ID}\n _ = mr.create_model(copy.deepcopy(target), PROJECT_NAME, description='Updated Model')\n target['description'] = 'Updated Model'\n assert post.call_count == 1\n url, data = post.call_args\n\n # dicts don't preserve order so property order may not match\n assert target['properties'] == data['json']['properties']\n target.pop('properties')\n data['json'].pop('properties')\n assert target == data['json']\n\n","sub_path":"tests/unit/test_model_repository.py","file_name":"test_model_repository.py","file_ext":"py","file_size_in_byte":3232,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"335466601","text":"from PIL import Image\n\nimage = Image.open(\"Bird.JPG\")\n\nimage.show()\n\n\nwidth = image.width\nheight = image.height\n\nprint(\"De afbeelding is \" + str(width) + \" pixels breed en \" + str(height) + \" pixels hood\")\n\n\nprint(image.format, image.size, image.mode)","sub_path":"03-MemesGifs/Meme Format Script.py","file_name":"Meme Format Script.py","file_ext":"py","file_size_in_byte":251,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"338795489","text":"#!/usr/bin/python\n\nfrom __future__ import print_function\nfrom os.path import join as opj\nimport numpy as np\nfrom glob import glob\nfrom scipy.ndimage import label\nfrom statsmodels.stats.inter_rater import fleiss_kappa\nimport sys\n\n\n# until the first black frame\nmaxmovietime = 7082.2\n\n\ndef load_annotations(dir):\n return [np.recfromcsv(obs) for obs in sorted(glob(opj(dir, '*')))]\n\n\ndef get_timecode_stats(data, fx):\n return fx(np.array([(fx(d['start']), fx(d['end'])) for d in data]))\n\n\ndef get_value_union(data, var):\n vals = []\n for d in data:\n vals.extend(list(d[var]))\n return set(vals)\n\n\ndef get_multivalue_union(data, var):\n vals = set()\n for d in data:\n for v in d[var]:\n vals = vals.union(v.split())\n return vals\n\n\ndef get_ioa_ts(data, props, match='contains'):\n # for every second\n ioas = np.zeros(int(maxmovietime), dtype=int)\n for obs in data:\n ioa = np.zeros(int(maxmovietime), dtype=int)\n for ev in obs:\n hit = True\n for k, v in props.items():\n if match == 'contains':\n if not v in ev[k]:\n hit = False\n break\n else:\n if not v == ev[k]:\n hit = False\n break\n if hit:\n ioa[int(ev['start']):int(ev['end']) + 1] += 1\n ioa[ioa > 1] = 1\n ioas += ioa\n return ioas\n\n\ndef get_events(data, aggreement=.5):\n # events are defined as consecutive timepoints where a specified fraction\n # of observer agree on the presence of an event\n # we decide to only consider annotations that overlap in time and share the\n # same 'sender'\n actors = get_value_union(data, 'actor')\n global_ioa = None\n for actor in actors:\n ioa = get_ioa_ts(data, dict(actor=actor), match='exact')\n rel_ioa = ioa.astype(float) / len(data)\n if rel_ioa.max() > 1.0:\n print(\"% WARNING: broken timestamps!!!\")\n ioa = rel_ioa >= aggreement\n if global_ioa is None:\n global_ioa = ioa.astype(int)\n else:\n global_ioa += ioa\n\n events = []\n segments, nsegments = label(global_ioa)\n for i in range(nsegments):\n segment = segments == i + 1\n events.append((segment.argmax(),\n len(segment) - segment[::-1].argmax()))\n return global_ioa, events\n\n\ndef get_rater_counts(data, events, prop, categories):\n counts = []\n for ev_start, ev_end in events:\n # add one column for \"found nothing\"\n ev_count = [0] * (len(categories) + 1)\n # for all observers\n for i, d in enumerate(data):\n # avoid double-counting\n counted = False\n # find the right annotation\n for annot in d:\n # start before event's end and end after event start\n if annot['start'] <= ev_end and annot['end'] >= ev_start:\n for c, cat in enumerate(categories):\n if isinstance(cat, int):\n if cat == annot[prop]:\n ev_count[c] += 1\n counted = True\n elif cat in annot[prop]:\n ev_count[c] += 1\n counted = True\n if counted:\n break\n if sum(ev_count) < len(data):\n # fill the \"who said nothing category\n ev_count[-1] = len(data) - sum(ev_count)\n #print(ev_count)\n assert len(data) == sum(ev_count)\n counts.append(ev_count)\n return counts\n\n\n# tex format help\ndef _ft(key, value, fmt='s'):\n key = key.replace('_', '')\n val_tmpl = '{{{{value:{}}}}}'.format(fmt)\n tex = '\\\\newcommand{{{{\\\\{{key}}}}}}{{{val_tmpl}}}'.format(val_tmpl=val_tmpl)\n return tex.format(key=key, value=value)\n\n\ndef _stats_helper(data, events, prop, categories, label, aggstr):\n counts = get_rater_counts(data, events, prop, categories)\n # events where more observers are in favor of a property than not\n print(_ft('Agg{}N{}'.format(aggstr, label),\n sum([e[0] > e[1] for e in counts]),\n '.0f'))\n print(_ft('Agg{}FK{}'.format(aggstr, label),\n fleiss_kappa(counts),\n '.2f'))\n\n\ndef print_descriptive_stats_as_tex(data):\n nevents = [len(o) for o in data]\n print(_ft('NEventsMin', min(nevents), 'd'))\n print(_ft('NEventsMax', max(nevents), 'd'))\n print(_ft('NEventsMedian', int(np.median(nevents)), 'd'))\n # do per aggreement-level\n print(_ft('UniqueActors', ', '.join([c.decode('utf-8') for c in sorted(get_value_union(data, 'actor'))])))\n print(_ft('UniqueRecipients', ', '.join([c.decode('utf-8') for c in sorted(get_value_union(data, 'recipient')) if c])))\n for aggreement, aggstr in ((.2, 'Twenty'), (.6, \"Sixty\"), (1.0, 'Hundred')):\n ts, events = get_events(data, aggreement=aggreement)\n print(_ft('Agg{}NEvents'.format(aggstr), len(events), 'd'))\n event_durations = [ev[1] - ev[0] for ev in events]\n event_distances = [ev[0] - events[i - 1][1] for i, ev in enumerate(events) if i]\n print(_ft('Agg{}MeanEventDuration'.format(aggstr), np.mean(event_durations), '.1f'))\n print(_ft('Agg{}MeanEventDistance'.format(aggstr), np.mean(event_distances), '.1f'))\n _stats_helper(data, events, 'intensity_of_body_contact', [1], 'IntenseStrong', aggstr)\n _stats_helper(data, events, 'intensity_of_body_contact', [0], 'IntenseWeak', aggstr)\n _stats_helper(data, events, 'intention', [1], 'Intention', aggstr)\n\n for var in ('valence_actor', 'valence_recipient', 'bodypart_actor', 'bodypart_recipient'):\n for value in get_multivalue_union(data, var):\n print(var, value, file=sys.stderr)\n _stats_helper(data, events, var, [value], var + value, aggstr)\n\nif __name__ == '__main__':\n data = load_annotations(sys.argv[1])\n print_descriptive_stats_as_tex(data)\n\n #import pylab as pl\n #pl.plot(get_ioa_ts(data, dict(sender=b'FORREST'), match='exact'))\n #for i in (.5, .75, 1.0):\n # ts, events = get_events(data, aggreement=i)\n # pl.plot(ts, label=str(i))\n # print(events)\n #pl.legend()\n #pl.show()\n","sub_path":"code/descriptive_stats.py","file_name":"descriptive_stats.py","file_ext":"py","file_size_in_byte":6337,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"111115473","text":"'''\nThis script collects all student repositories for a given assignment in a given organization at a given deadline\nRun this script from inside a directory that contains/will contain all the repos for this assignment\n'''\n\nfrom github3 import login\nimport getpass\nimport subprocess\nimport csv\nimport argparse\nimport os\n\ndef run(username, tokenFile, organization, deadline,assignment):\n\twith open(tokenFile, 'r') as file:\n\t\t\ttoken = file.readline().strip()\n\tgithub = login(username,token=token)\n\tmemberships = github.organization_memberships()\n\tcourse = None\n\n\tfor membership in memberships:\n\t\tif membership.organization.login.lower() == organization.lower():\n\t\t\tcourse = membership.organization\n\t\t\tbreak\n\n\tcurrentDir = os.getcwd()\n\n\tfor repo in course.repositories():\n\t\tif(repo.name.startswith(assignment)):\n\t\t\tif (os.path.exists(repo.name)): #check first that we don't already have a local copy of this repo\n\t\t\t\tos.chdir(currentDir + \"/\" + repo.name)\n\t\t\t\tsubprocess.call(\"git pull\", shell=True)\n\t\t\t\tsubprocess.call(\"git checkout \\\"`git rev-list --all -n 1 --first-parent --before=\\\"\" + deadline + \"\\\"`\\\"\", shell=True);\n\t\t\t\tos.chdir(currentDir)\n\t\t\telse:\n\t\t\t\tsubprocess.call(\"git clone \" + repo.ssh_url, shell=True)\n\t\t\t\tos.chdir(currentDir + \"/\" + repo.name)\n\t\t\t\tsubprocess.call(\"git checkout \\\"`git rev-list --all -n 1 --first-parent --before=\\\"\" + deadline + \"\\\"`\\\"\", shell=True);\n\t\t\t\tos.chdir(currentDir)\n\n\nparser = argparse.ArgumentParser(description='Collect repos')\nparser.add_argument('--username', help='Github username')\nparser.add_argument('--tokenFile', help='File containing your github token')\nparser.add_argument('--deadline', help=\"Deadline of the given assignment in format \\\"YYYY-MM-DD HH:MM:SS -0700\\\" where -0700 indicates the MST time zone\")\nparser.add_argument('--organization', help=\"Course github organization\")\nparser.add_argument('--assignment', help=\"Assignment name\")\n\nargs = parser.parse_args()\nrun(username=args.username, tokenFile=args.tokenFile, organization=args.organization, deadline=args.deadline, assignment=args.assignment)\n","sub_path":"collect_assignment_at_deadline.py","file_name":"collect_assignment_at_deadline.py","file_ext":"py","file_size_in_byte":2060,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"648621059","text":"import gym\nfrom ai_traineree.types import StateType, TaskType\n\nfrom typing import Callable, Optional, Tuple\n\n\nclass GymTask(TaskType):\n def __init__(self, env_name: str, state_transform: Optional[Callable]=None, reward_transform: Optional[Callable]=None, can_render=True):\n \n self.name = env_name\n self.env = gym.make(env_name)\n self.can_render = can_render\n self.is_discrete = \"Discrete\" in str(type(self.env.action_space))\n\n self.state_size = self.env.observation_space.shape[0]\n self.action_size = self.__determine_action_size(self.env.action_space)\n self.state_transform = state_transform\n self.reward_transform = reward_transform\n\n @staticmethod\n def __determine_action_size(action_space):\n if \"Discrete\" in str(type(action_space)):\n return action_space.n\n else:\n return sum(action_space.shape)\n\n def reset(self) -> StateType:\n if self.state_transform is not None:\n return self.state_transform(self.env.reset())\n return self.env.reset()\n\n def render(self, mode=\"rgb_array\"):\n if self.can_render:\n # In case of OpenAI, mode can be ['human', 'rgb_array']\n return self.env.render(mode=mode)\n else:\n print(\"Can't render. Sorry.\") # Yes, this is for haha\n\n def step(self, actions) -> Tuple:\n \"\"\"\n Each action results in a new state, reward, done flag, and info about env.\n \"\"\"\n if self.is_discrete:\n actions = int(actions)\n state, reward, done, info = self.env.step(actions)\n if self.state_transform is not None:\n state = self.state_transform(state)\n if self.reward_transform is not None:\n reward = self.reward_transform(reward)\n return (state, reward, done, info)\n","sub_path":"ai_traineree/tasks.py","file_name":"tasks.py","file_ext":"py","file_size_in_byte":1840,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"237802025","text":"from sklearn.preprocessing import StandardScaler\nfrom sklearn import datasets\nimport numpy as np\n\n# Load iris data set and apply standard scaler.\niris = datasets.load_iris()\nX = iris.data\nfeatureNames = ['sepal length (cm)', 'sepal width (cm)', 'petal length (cm)',\n 'petal width (cm)']\ny = iris.target\nX_std = StandardScaler().fit_transform(X)\n\nprint(featureNames)\n\n# Generate covariance matrix to show bivariate relationships.\ncov_mat = np.cov(np.transpose(X_std))\nprint('\\nCovariance matrix: \\n%s' % cov_mat)\n\n# When data is standardized, the covariance matrix is same as the\n# correlation matrix.\ncor_mat = np.corrcoef(np.transpose(X_std))\nprint('\\nCorrelation matrix: \\n%s' % cor_mat)\n\n# Perform an Eigen decomposition on the covariance matrix:\neig_vals, eig_vecs = np.linalg.eig(cov_mat)\nprint('\\nEigenvectors \\n%s' % eig_vecs)\nprint('\\nEigenvalues \\n%s' % eig_vals)\n\n#######\n# Show the scree plot.\nfrom sklearn.preprocessing import StandardScaler\nfrom sklearn.decomposition import PCA as sklearnPCA\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn.metrics import classification_report\nfrom sklearn import datasets\nimport numpy as np\nimport pandas as pd\n\n# Load iris data set and apply standard scaler.\niris = datasets.load_iris()\nX = iris.data\ny = iris.target\nX_std = StandardScaler().fit_transform(X)\n\n# Split x and y into test and training.\nX_train, X_test, y_train, y_test = train_test_split(\n X_std, y, test_size=0.25, random_state=0)\n\n# Create principal components.\nsklearn_pca = sklearnPCA(n_components=2)\n\n# Transform the data.\nX_train = sklearn_pca.fit_transform(X_train)\n\n# Transform test data.\nX_test = sklearn_pca.transform(X_test)\n\n# Perform logistic regression.\nlogisticModel = LogisticRegression(fit_intercept=True, random_state=0,\n solver='liblinear')\nlogisticModel.fit(X_train, y_train)\n\n# Generate predictions.\ny_pred = logisticModel.predict(X_test)\n\n# Show model coefficients and intercept.\nprint(\"\\n*** Intercept: \")\nprint(logisticModel.intercept_)\n\nprint(\"\\n*** Model Coefficients: \")\nprint(logisticModel.coef_)\n\n# Show confusion matrix and accuracy scores.\ncm = pd.crosstab(y_test, y_pred, rownames=['Actual'],\n colnames=['Predicted'])\n\nprint(\"\\n*** Confusion Matrix\")\nprint(cm)\n\nprint(\"\\n*** Classification Report\")\nprint(classification_report(y_test, y_pred))\n\n# For each X, calculate VIF and save in dataframe\nfrom statsmodels.stats.outliers_influence import variance_inflation_factor\nvif = pd.DataFrame()\nvif[\"VIF Factor for Components\"] = \\\n [variance_inflation_factor(X_train, i) for i in range(X_train.shape[1])]\nprint(vif)\nprint(X_train)\n","sub_path":"Labs/Lab12/exercise13.py","file_name":"exercise13.py","file_ext":"py","file_size_in_byte":2719,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"494600242","text":"# Словари\n#Это питон!\nD = {'a':1,'b':2,'c':3}\nprint(D)\n\nK = D.keys()\nV = D.values()\n\nprint(list(K))\nprint(list(V))\n\ndel D['b']\nprint(D)\n\nprint(list(K))\nprint(list(V))\n\nK | {'x': 4}\n#V & {'x': 4}\n#V & {'x':4}.values()\n\nD = {'a':1, 'b':2, 'c':3}\nD.keys() & D.keys()\n#D.keys() & D.keys()\nD.keys() & {'b'}\nD.keys() & {'b': 1}\nD.keys() | {'b','c','b','d'}\n\n\n# Сортировка ключей словаря\nD = {'a':1,'b':2,'c':3}\nprint(D)\n\nKs = D.keys()\nKs = list(Ks)\nKs.sort()\n\nKs = list(Ks)\nKs.sort()\nfor k in Ks:\n print(k, D[k])\n # Это сортировка но работает она только в цикле. Для того чтобы \n # Сдклать нормально мужно использовать sorted()\n\nKs = D.keys()\nfor k in Ks:\n print(k, D[k])\n\nprint(k)\n\nfor k in sorted(D): print(k, D[k])\n\n # page 281\n\n \n","sub_path":"Словари.py","file_name":"Словари.py","file_ext":"py","file_size_in_byte":867,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"369986536","text":"import numpy\nimport pickle\nfrom matplotlib import pyplot\nfrom matplotlib import cm\nfrom matplotlib.colors import LogNorm\n\nwith open(r'/home/11027428/Documents/Thesis/Other/MR_curves_11') as f:\n MR_curves = pickle.load(f)\n \nwith open(r'/home/11027428/Documents/Thesis/Other/Probabilities', 'rb') as f:\n Prob = pickle.load(f)\n \nwith open(r'/home/11027428/Documents/Thesis/Other/Parameters', 'rb') as f:\n Parameters = pickle.load(f)\n \nProb = numpy.array(Prob)\nParameters = numpy.array(Parameters)\n\nindices = numpy.where((Parameters[:,0]>1.74e34) & (Parameters[:,0]<1.84e34))[0]\n\n\nfig, ax = pyplot.subplots(1,1, figsize=(10,8))\nax.scatter(numpy.log10(Parameters[indices][:,1]), numpy.log10(Parameters[indices][:,2]), s=300, c=Prob[indices], marker = 's', cmap='Blues')\n#ax.set_xlim(1e34, 2e36)\n#ax.set_ylim(1e35, 2e37)\n#ax.set_xscale('log')\n#ax.set_yscale('log')\nax.set_xlabel('P$_2$ (dyn cm$^{-2}$)', fontsize=25)\nax.set_ylabel('P$_3$ (dyn cm$^{-2}$)', fontsize=25)\nax.tick_params(width=2, labelsize=20)\nfor axis in ['top', 'bottom', 'left', 'right']:\n ax.spines[axis].set_linewidth(1.5)\npyplot.show()\n","sub_path":"Prob_dist.py","file_name":"Prob_dist.py","file_ext":"py","file_size_in_byte":1121,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"434271061","text":"from redis_cache import SimpleCache, logging\n\nfrom melange import settings\nfrom .singleton import Singleton\n\n\nlogger = logging.getLogger(__name__)\n\n\n@Singleton\nclass Cache:\n def __init__(self):\n self.cache = SimpleCache(expire=3600,\n host=settings.CACHE_REDIS_HOST,\n port=settings.CACHE_REDIS_PORT,\n db=settings.CACHE_REDIS_DB,\n password=settings.CACHE_REDIS_PASSWORD,\n namespace=settings.CACHE_NAMESPACE)\n\n if not self.cache.connection:\n logger.warning(\"Could not establish a connection with redis. Message deduplication won't work\")\n\n def store(self, key, value, expire=None):\n if not self.cache.connection:\n return\n\n return self.cache.store(key, value, expire)\n\n def get(self, key):\n if not self.cache.connection:\n return\n\n return self.cache.get(key)\n\n def __contains__(self, key):\n return self.cache.connection and key in self.cache\n","sub_path":"melange/infrastructure/cache.py","file_name":"cache.py","file_ext":"py","file_size_in_byte":1091,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"587439679","text":"import platform, distutils.core, distutils.extension, setuptools, sys, os\nfrom setuptools.command.install import install\ntry:\n import Cython.Build\nexcept:\n os.system('pip3 install Cython')\n import Cython.Build\n\nclass CustomInstallCommand(install):\n \"\"\"Customized setuptools install command\"\"\"\n def run(self):\n if sys.platform == \"darwin\":\n os.system('brew install spatialindex')\n elif sys.platform.startswith('linux'):\n os.system('sudo apt install python3-rtree')\n else:\n exception_message = '''You are trying to install spatial_access on an unsupported \n platform. Note: We DO NOT support Windows.'''\n\n raise Exception(exception_message, os.system)\n install.run(self)\n\nouff_mac = []\nextra_dependency = []\nif sys.platform == \"darwin\":\n ouff_mac = ['-mmacosx-version-min=10.9']\n extra_dependency = ['rtree>=0.8.3']\n\nEXTENSION = distutils.extension.Extension(\n name = 'transitMatrixAdapter', language = 'c++',\n sources = ['spatial_access/transitMatrixAdapter.pyx'],\n extra_compile_args = ['-Wno-unused-function', \n '-std=c++11', '-Wall', '-O3'\n ] + ouff_mac,\n undef_macros = [\"NDEBUG\"],\n extra_link_args = ouff_mac\n )\n\nEXT_MODULES=Cython.Build.cythonize([EXTENSION],\n #include_path = [\"/usr/local/include/\"],\n language='c++')\n\nREQUIRED_DEPENDENCIES = ['fiona>=1.7.12',\n 'cython>=0.28.2',\n 'matplotlib>=2.0.2',\n 'jellyfish>=0.5.6',\n 'geopandas>=0.3.0',\n 'psutil>=5.4.3',\n 'pandas>=0.19.2',\n 'numpy>=1.12.0',\n 'osmnet>=0.1.4',\n 'pandana>=0.4.0',\n 'scipy>=0.18.1',\n 'geopy>=1.11.0',\n 'Shapely>=1.6.1',\n 'scikit_learn>=0.19.1',\n 'atlas>=0.27.0',\n 'jupyter_contrib_nbextensions>=0.5.0',\n 'jupyter_nbextensions_configurator>=0.1.7']\n\nREQUIRED_DEPENDENCIES += extra_dependency\n\nSUBMODULE_NAMES = ['spatial_access.p2p', \n 'spatial_access.ScoreModel', \n 'spatial_access.CommunityAnalytics',\n 'spatial_access.ConfigInterface',\n 'spatial_access.NetworkInterface',\n 'spatial_access.MatrixInterface']\n\nsetuptools.setup(\n cmdclass = {'install':CustomInstallCommand},\n name = 'spatial_access',\n author = 'Logan Noel (lmnoel)',\n url='https://github.com/GeoDaCenter/spatial_access',\n author_email='lnoel@uchicago.edu',\n version='0.1.1',\n ext_modules=EXT_MODULES,\n install_requires=REQUIRED_DEPENDENCIES,\n py_modules=SUBMODULE_NAMES\n )\n","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":2989,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"38845901","text":"#####################################################\n#\tAlgoritmo de prueba de rendimiento para RND \t#\n#\tusando dataset de entrenamiento y prueba\t\t#\n#####################################################\n\nimport os\nimport gc\nimport numpy\nimport h5py\nimport math\nfrom pandas import read_csv\nfrom keras.callbacks import CSVLogger\nfrom keras.models import *\nfrom keras.layers import *\nfrom sklearn.metrics import mean_squared_error\n\nporcentajeEntrenamiento = 0.8\nfeatures = 4\nsizeSeq = 10\nnombreModelo = 'modeloGPS001e34'\n\ndef cls():\n\tos.system('cls' if os.name=='nt' else 'clear')\n\ndef normalizarX(dataset):\n#\tdataset[:, 0] = [k / (5) for k in dataset[:, 0]]\n#\tdataset[:, 1] = [k / (7) for k in dataset[:, 1]]\n#\tdataset[:, 2] = [k / (43) for k in dataset[:, 2]]\n\tdataset[:, 3] = [k / (1.2) for k in dataset[:, 3]]\n\treturn dataset\n\ndef denormalizarX(dataset):\n#\tdataset[:, 0] = [k * (5) for k in dataset[:, 0]]\n#\tdataset[:, 1] = [k * (7) for k in dataset[:, 1]]\n#\tdataset[:, 2] = [k * (43) for k in dataset[:, 2]]\n\tdataset[:, 3] = [k * (1.2) for k in dataset[:, 3]]\n\treturn dataset\n\ndef normalizarY(dataset):\n\tdataset = [k/(1.2) for k in dataset]\n\treturn dataset\n\ndef denormalizarY(dataset):\n\tdataset = [k*(1.2) for k in dataset]\n\treturn dataset\n\n# Permite hacer print con colores\nclass bcolors:\n HEADER = '\\033[95m'\n OKBLUE = '\\033[94m'\n OKGREEN = '\\033[92m'\n WARNING = '\\033[93m'\n FAIL = '\\033[91m'\n ENDC = '\\033[0m'\n BOLD = '\\033[1m'\n UNDERLINE = '\\033[4m'\n\n# Objeto viaje\nclass Viaje():\n\tdef __init__(self,id_viaje):\n\t\tself.id = id_viaje;\n\t\tself.x=numpy.array([]);\n\t\tself.y=numpy.array([]);\n\ndef mean_absolute_percentage_error(y_true, y_pred):\n\tmape = 0.0\n\tfor i in range(len(y_true)):\n\t\tif y_true[i] == 0:\n\t\t\ty_true[i] = 0.001\n\t\tmape = mape + numpy.abs((y_true[i] - y_pred[i]) / y_true[i])[0]\n\treturn mape * 100/len(y_true)\n\n# Carga el dataset\ndataframe = read_csv('../../datos/datasetDefinitivoGPSv4.tsv', engine = 'python', sep = \"\\t\")\ndataset = dataframe.values\ndataset = dataset.astype(float)\n\n# Calculo de valores maximos y minimos por columna\nmaxColumnas = dataset.max(axis = 0)\nminColumnas = dataset.min(axis = 0)\n\n# Toma los viajes por cada id distinto (columna 0) e ingresarlo a otro arreglo \ndataset_viajes = []\nfor i in range(len(dataset)):\n\tid_lectura = dataset[i][0]\n\t#En caso que el dataset_viaje tenga datos\n\tif len(dataset_viajes) > 0:\n\t\t#Agrega puntos al mismo viaje\n\t\tif dataset_viajes[len(dataset_viajes)-1].id == id_lectura:\n\t\t\tdataset_viajes[len(dataset_viajes)-1].x = numpy.append(dataset_viajes[len(dataset_viajes)-1].x,[dataset[i][1:5]],axis=0)\n\t\t\tdataset_viajes[len(dataset_viajes)-1].y = numpy.append(dataset_viajes[len(dataset_viajes)-1].y,[dataset[i][5]],axis=0)\n\t\t#Agrega un nuevo viaje\n\t\telse:\n\t\t\tdataset_viajes.append(Viaje(id_lectura))\n\t\t\tdataset_viajes[len(dataset_viajes)-1].x = numpy.array([dataset[i][1:5]])\n\t\t\tdataset_viajes[len(dataset_viajes)-1].y = numpy.array([dataset[i][5]])\n\t#En caso que sea el primer viaje\n\telse:\n\t\tdataset_viajes.append(Viaje(id_lectura))\n\t\tdataset_viajes[len(dataset_viajes)-1].x = numpy.array([dataset[i][1:5]])\n\t\tdataset_viajes[len(dataset_viajes)-1].y = numpy.array([dataset[i][5]])\n\ndataset = dataset_viajes\ndel(dataset_viajes)\ngc.collect()\n\n\n# separa entre dataset de entrenamiento y prueba\ntrain_size = int(len(dataset) * porcentajeEntrenamiento)\ntest_size = len(dataset) - train_size\ntrain, test = dataset[0:train_size], dataset[train_size:len(dataset)]\ndel(dataset)\ngc.collect()\n\n# Corrige dimensionalidad de entrada a [samples, time steps, features]\n# Normaliza los datos para estar entre 0 y 1 \nfor i in range(len(train)):\n\ttrain[i].x = normalizarX(train[i].x)\n\ttrain[i].x = numpy.reshape(train[i].x, (1,len(train[i].x), len(train[i].x[0])))\n\ttrain[i].y = normalizarY(train[i].y)\n\ttrain[i].y = numpy.reshape(train[i].y, (len(train[i].y),1))\n\nfor i in range(len(test)):\n\ttest[i].x = normalizarX(test[i].x)\n\ttest[i].x = numpy.reshape(test[i].x, (1,len(test[i].x), len(test[i].x[0])))\n\ttest[i].y = normalizarY(test[i].y)\n\ttest[i].y = numpy.reshape(test[i].y, (len(test[i].y),1))\n\n#Carga modelo\nmodel = load_model('../../resultados/modelos/{}.h5'.format(nombreModelo))\n\n# Calcula el las metricas a partir de las predicciones de la red\nMSE_train = 0.0\nRMSE_train = 0.0\nMAE_train = 0.0\nMAPE_train = 0.0\nDESV_train = 0.0\n\nf = open(\"../../resultados/pruebas/prueba_{}_entrenamiento.txt\".format(nombreModelo),\"w\")\nf.write(\"Viaje\\tMSE\\tRMSE\\tMAE\\tMAPE\\tDESV\\n\")\n\n# Prueba con datos de entrenamiento\nfor i in range(len(train)):\n\n\tentrada_metadatos = numpy.array([])\n\tentrada_recurrente = numpy.array([])\n\tsalida_secuencia = numpy.array([])\n\t\n\tfor j in range(len(train[i].x[0])-sizeSeq):\n\n\n\t\tfor k in range(len(train[j].x[0])-sizeSeq): \n\t\t\t# Genera las secuencias para un viaje\n\t\t\tif k==0:\n\t\t\t\tentrada_metadatos = numpy.array( [train[j].x[0][k+sizeSeq][0:3]] )\n\t\t\t\tentrada_recurrente = numpy.array( train[j].x[:,k:k+sizeSeq,3:4] )\n\t\t\t\tsalida_secuencia = numpy.array( [ train[j].y[sizeSeq] ] )\n\t\t\telse:\n\t\t\t\tentrada_metadatos = numpy.append(entrada_metadatos , [ train[j].x[0][k+sizeSeq][0:3] ] , axis = 0)\n\t\t\t\tsecuenciaTemp = train[j].x[:,k:k+sizeSeq,3:4]\n\t\t\t\tentrada_recurrente = numpy.append(entrada_recurrente , secuenciaTemp , axis = 0)\n\t\t\t\tindice = int(k + sizeSeq)\n\t\t\t\tsalida_secuencia = numpy.append(salida_secuencia , [ train[j].y[indice] ] , axis = 0)\n\t\t\t\t\n\tprint(bcolors.ENDC + 'Prueba red GPS (modelo 1)\\n-Dataset: entrenamiento\\n')\n\tprint(bcolors.WARNING + 'Viaje {0}/{1}'.format(i+1,len(train)) + bcolors.ENDC)\n\t\n\tpredicciones = model.predict([entrada_metadatos,entrada_recurrente], batch_size = 1, verbose = 1)\n\tpredicciones = denormalizarY(predicciones)\n\treferencia = denormalizarY(salida_secuencia)\n\t\t\n\t# Calcula Metricas\n\terrores=numpy.array([])\n\terrores = [abs(referencia[k] - predicciones[k]) for k in range(len(referencia))]\n\n\tMSE = mean_squared_error(referencia, predicciones)\n\tRMSE = math.sqrt(MSE)\n\tMAE = numpy.mean(errores)\n\tMAPE = mean_absolute_percentage_error(referencia, predicciones)\n\tDESV = numpy.std(errores)\n\t\t\n\tf.write(\"{}\\t{}\\t{}\\t{}\\t{}\\t{}\\n\".format(i+1,MSE,RMSE,MAE,MAPE,DESV))\n\tMSE_train = MSE_train + MSE\n\tRMSE_train = RMSE_train + RMSE\n\tMAE_train = MAE_train + MAE\n\tMAPE_train = MAPE_train + MAPE\n\tDESV_train = DESV_train + DESV\n#\tmodel.reset_states()\n\tcls()\n\nRMSE_train = RMSE_train/len(train)\nMSE_train = MSE_train/len(train)\nMAE_train = MAE_train/len(train)\nMAPE_train = MAPE_train/len(train)\nDESV_train = DESV_train/len(train)\nf.write(\"PROMEDIO\\t{}\\t{}\\t{}\\t{}\\t{}\\n\".format(MSE_train,RMSE_train,MAE_train,MAPE_train,DESV_train))\nf.close()\ndel(train)\ngc.collect()\n\nf = open(\"../../resultados/pruebas/prueba_{0}_prueba.txt\".format(nombreModelo),\"w\")\nf.write(\"Viaje\\tMSE\\tRMSE\\tMAE\\tMAPE\\tDESV\\n\")\n\n# Prueba con datos de prueba\nMSE_test = 0.0\nRMSE_test = 0.0\nMAE_test = 0.0\nMAPE_test = 0.0\nDESV_test = 0.0\n\nfor i in range(len(test)):\n\t\n\tentrada_metadatos = numpy.array([])\n\tentrada_recurrente = numpy.array([])\n\tsalida_secuencia = numpy.array([])\n\t\n\tfor j in range(len(test[i].x[0])-sizeSeq):\n\n\t\tif j==0:\n\t\t\tentrada_metadatos = numpy.array( [ test[i].x[0][j+sizeSeq][0:3] ] )\n\t\t\tentrada_recurrente = numpy.array( test[i].x[:,j:j+sizeSeq,3:4] )\n\t\t\tsalida_secuencia = numpy.array( [ test[i].y[sizeSeq] ] )\n\t\telse:\n\t\t\tentrada_metadatos = numpy.append(entrada_metadatos , [ test[i].x[0][j+sizeSeq][0:3] ] , axis = 0)\n\t\t\tsecuenciaTemp = test[i].x[:,j:j+sizeSeq,3:4]\n\t\t\tentrada_recurrente = numpy.append(entrada_recurrente , secuenciaTemp , axis = 0)\n\t\t\tindice = int(j + sizeSeq)\n\t\t\tsalida_secuencia = numpy.append(salida_secuencia , [ test[i].y[indice] ] , axis = 0)\n\t\t\n\tprint(bcolors.ENDC + 'Prueba red GPS (modelo 1)\\n-Dataset: prueba\\n')\n\tprint(bcolors.WARNING + 'Viaje {0}/{1}'.format(i+1,len(test)) + bcolors.ENDC)\n\t\n\tpredicciones = model.predict([entrada_metadatos,entrada_recurrente], batch_size = 1, verbose = 1)\n\tpredicciones = denormalizarY(predicciones)\n\treferencia = denormalizarY(salida_secuencia)\n\t\n\t# Calcula Metricas\n\terrores=numpy.array([])\n\terrores = [abs(referencia[k] - predicciones[k]) for k in range(len(referencia))]\n\n\t#Calcula el MSE y RMSE\n\tMSE = mean_squared_error(referencia, predicciones)\n\tRMSE = math.sqrt(MSE)\n\tMAE = numpy.mean(errores)\n\tMAPE = mean_absolute_percentage_error(referencia, predicciones)\n\tDESV = numpy.std(errores)\n\n\t\n\tf.write(\"{}\\t{}\\t{}\\t{}\\t{}\\t{}\\n\".format(i+1,MSE,RMSE,MAE,MAPE,DESV))\n\tMSE_test = MSE_test + MSE\n\tRMSE_test = RMSE_test + RMSE\n\tMAE_test = MAE_test + MAE\n\tMAPE_test = MAPE_test + MAPE\n\tDESV_test = DESV_test + DESV\n\tcls()\n\nRMSE_test = RMSE_test/len(test)\nMSE_test = MSE_test/len(test)\nMAE_test = MAE_test/len(test)\nMAPE_test = MAPE_test/len(test)\nDESV_test = DESV_test/len(test)\nf.write(\"PROMEDIO\\t{}\\t{}\\t{}\\t{}\\t{}\\n\".format(MSE_test,RMSE_test,MAE_test,MAPE_test,DESV_test))\nf.close()\ndel(test)\ngc.collect()\n\n# Muestra RMSE Y MSE promedios para cada dataset\n# Muestra RMSE Y MSE promedios para cada dataset\nprint(bcolors.FAIL +'RESULTADOS:')\nprint(\"DATASET\\tMSE\\tRMSE\\tMAE\\tMAPE\\tDESV_MAE\")\nprint(\"Entrenamiento \\t{}\\t{}\\t{}\\t{}\\t{}\".format(MSE_train,RMSE_train,MAE_train,MAPE_train,DESV_train))\nprint(\"Prueba \\t{}\\t{}\\t{}\\t{}\\t{}\".format(MSE_test,RMSE_test,MAE_test,MAPE_test,DESV_test)+bcolors.ENDC)\n","sub_path":"codigo/prueba/pruebaRND.py","file_name":"pruebaRND.py","file_ext":"py","file_size_in_byte":9192,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"373322777","text":"\"\"\" Some code are learned from learning sample at Kai Nai's blog from csdn tech blog. \n https://blog.csdn.net/weixin_36279318/article/details/79082273\n\"\"\"\nimport csv\nfrom datetime import datetime\nfrom matplotlib import pyplot as plt\nfrom matplotlib import dates as mdates\n# plt.rcParams['font.sans-serif'] = ['SimHei']\n# plt.rcParams['axes.unicode_minus'] = False\n\nyears = mdates.YearLocator() # every year\nmonths = mdates.MonthLocator() # every month\nyearFmt = mdates.DateFormatter('%m-%d')\n\nfilename='sense_data.csv'\nwith open(filename,'r')as file:\n reader=csv.reader(file)\n header_row=next(reader)\n\n dates, temps, humids= [], [], []\n for row in reader:\n current_date = datetime.strptime(row[0],\"%Y-%m-%d\")\n dates.append(current_date)\n #4.将字符串转换为整型数据\n temps.append(int(row[1]))\n humids.append(int(row[2]))\n\n print(dates)\n print(temps)\n print(humids)\n #5.根据数据绘制图形\n fig=plt.figure(dpi=128,figsize=(8, 5.4))\n\n #6.将列表temps传个plot()方法\n # ax1 = plt.plot()\n # format the ticks\n # plt.xaxis.set_major_locator(years)\n # plt.gca().xaxis.set_major_formatter(yearFmt)\n # plt.xaxis.set_minor_locator(months)\n\n # ax1.fmt_xdata = mdates.DateFormatter('%m-%d')\n #7.设置图形的格式\n plt.title('Temperature and Humidity in March/April 2019',fontsize=20)\n plt.plot(dates,temps,c='red')\n plt.xlabel('',fontsize=22)\n plt.ylabel('Temperature(*C)', fontsize=14, color='r')\n plt.tick_params(axis='both',which='major',labelsize=14)\n fig.autofmt_xdate()\n\n ax2 = plt.gca().twinx()\n ax2.plot(dates,humids,c='blue')\n plt.ylabel('Humidity(%)', fontsize=14, color='b')\n \n plt.show()\n","sub_path":"analytics.py","file_name":"analytics.py","file_ext":"py","file_size_in_byte":1739,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"204615835","text":"import math\n\ndef sigmoid(z):\n return 1/(1+math.e**(-z))\n\ndef costfunction(y,a):\n return (y-a)**2\n\nx1=[]\nx2=[]\nx3=[]\ny=[]\n\nwith open('breast-cancer-train.csv', 'r') as f:\n line=f.readline()\n while line:\n line=line.strip(\"\\r\\n\")\n line=line.split(',')\n x1.append(int(line[0]))\n x2.append(int(line[1]))\n x3.append(int(line[2]))\n y.append(int(line[3]))\n line=f.readline()\n\nlength=len(x1)\nw1=0.0\nw2=0.0\nw3=0.0\nb=0.0\nJ=100\nalpha=0.1\naccuracy=0.03\nprint(\"Calculating, please wait for some time\")\n\nwhile J>accuracy:\n dw1=0.0\n dw2=0.0\n dw3=0.0\n db=0.0\n J=0.0\n for i in range(0,length):\n z=w1*x1[i]+w2*x2[i]+w3*x3[i]+b\n a=sigmoid(z)\n dz=(a-y[i])*2*a*(1-a)/length\n J+=costfunction(y[i],a)/length\n dw1+=dz*x1[i]/length\n dw2+=dz*x2[i]/length\n dw3+=dz*x3[i]/length\n db+=dz/length\n w1=w1-alpha*dw1\n w2=w2-alpha*dw2\n w3=w3-alpha*dw3\n b=b-alpha*db\n\nprint(w1,w2,w3,b)\nwhile int(input(\"Do you want to continue test?:\"))==1:\n test1=float(input(\"Input first para:\"))\n test2=float(input(\"Input second para:\"))\n test3=float(input(\"Input final para:\"))\n result=((sigmoid(w1*test1+w2*test2+w3*test3+b)))\n print(result)\n","sub_path":"Chapter2/ThreeVarForLoop.py","file_name":"ThreeVarForLoop.py","file_ext":"py","file_size_in_byte":1255,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"293300166","text":"import unittest\nimport os\nfrom ..text_representations.bag_of_words import BagOfWords\nfrom ..convert.text_process import *\n\nclass TestNew(unittest.TestCase):\n\n def test_get_lang(self):\n self.assertEqual(Text(\"hey you\", 'en').get_language(), 'en')\n\n def test_bigrams(self):\n bigrams = get_bigrams(Text(\"Я увидел кошку.\", 'ru'))\n print(list(bigrams))\n self.assertIsInstance(list(bigrams), list)\n\n def test_construct_bow(self):\n with open(os.path.join(\n os.path.abspath(os.path.dirname(__file__)),\n 'test_data/text_1.txt')) as txt_file:\n text = ''.join(txt_file.readlines())\n bag_of_words = BagOfWords(text, 'en')\n self.assertIsInstance(construct_bow(bag_of_words.get_counter()), list)\n self.assertIsInstance(construct_bow_with_filtering(bag_of_words.get_counter()), list)\n\n def test_word_count(self):\n with open(os.path.join(\n os.path.abspath(os.path.dirname(__file__)),\n 'test_data/text_1.txt')) as txt_file:\n text = ''.join(txt_file.readlines())\n bag_of_words = BagOfWords(text, 'en')\n self.assertIsInstance(construct_bow(bag_of_words.get_counter()), list)\n self.assertIsInstance(construct_bow_with_filtering(bag_of_words.get_counter()), list)\n\nif __name__ == '__main__':\n unittest.main()\n","sub_path":"test_packages/text_process_tests.py","file_name":"text_process_tests.py","file_ext":"py","file_size_in_byte":1381,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"607686121","text":"from django.conf.urls import url, include\nfrom . import views\n\nurlpatterns = [\n url(r'^$', views.index, name='index'),\n url(r'^login_reg$', views.login_reg, name='login_reg'),\n url(r'^wall$', views.wall, name='wall'),\n url(r'^doodle$', views.doodle, name='doodle'),\n url(r'^destroy/(?P\\d+)$', views.destroy),\n url(r'^logout$', views.logout, name='logout')\n]\n","sub_path":"apps/doodle/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":380,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"454810449","text":"import pygame\nfrom pygame.sprite import Sprite\n\nclass Ship(Sprite):\n def __init__(self, ai_settings,screen):\n \"\"\"初始化飞船并设置初始位置\"\"\"\n super(Ship, self).__init__()\n self.screen = screen\n self.ai_settings=ai_settings\n \n #加载飞船图像并获取外接矩形\n self.image = pygame.image.load(r\"D:\\Algorithm\\Python_code\\alien_invasion\\images\\ship.bmp\")\n self.rect = self.image.get_rect()\n self.screen_rect=screen.get_rect()\n \n #将每艘飞船放在屏幕底部中央\n self.rect.centerx = self.screen_rect.centerx\n self.rect.bottom = self.screen_rect.bottom\n\n #在飞船的center属性中储存小数值\n self.center=float(self.rect.centerx)\n\n #移动标志\n self.moving_right = False\n self.moving_left = False\n \n def update(self):\n #更新飞船的center值\n if self.moving_right and self.rect.right0:\n self.center -= self.ai_settings.ship_speed_factor\n \n #根据self.center更新rect\n self.rect.centerx=self.center\n\n def blitme(self):\n \"\"\"在指定位置画飞船\"\"\"\n self.screen.blit(self.image, self.rect)\n \n def center_ship(self):\n \"\"\"飞船在屏幕中央居中\"\"\"\n self.center=self.screen_rect.centerx","sub_path":"alien_invasion/ship.py","file_name":"ship.py","file_ext":"py","file_size_in_byte":1486,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"354682837","text":"from django.utils.timezone import now\nfrom django.shortcuts import render, get_object_or_404, redirect\nfrom django.core.exceptions import ObjectDoesNotExist\n\n\nfrom math import ceil\n\nfrom stories.forms import ProposalForm\nfrom stories.models import Seed, Proposal, Vote\n\nfrom account.models import Fav\n\n\ndef detail_seed(request, pk):\n \"\"\"\n Detail seed view return:\n -if the user has already voted\n -if the user has already added this story to his favorite\n -the beginning of the story and the valid proposals\n -pendding proposals with number of vote for each\n -a form if user is logged and want to participate\n \"\"\"\n # define default values\n already_participate = False\n already_favorite = False\n proposal_vote = None\n\n seed = get_object_or_404(Seed, pk=pk)\n total_vote = Vote.objects.filter(related_seed=seed).count()\n\n valid_proposals = Proposal.objects.filter(\n is_valid=True,\n related_seed=seed\n ).order_by('created_at')\n\n proposals = list(\n Proposal.objects.filter(\n related_seed=seed, is_valid=False\n )\n )\n\n # associate each proposal with rate value in %\n for index, proposal in enumerate(proposals):\n if total_vote > 0:\n rate = ceil(\n (Vote.objects.filter(related_proposal=proposal).count() * 100) / total_vote\n )\n else:\n rate = 0\n proposals[index] = (proposal, rate)\n\n if request.user.is_authenticated:\n try:\n Proposal.objects.get(\n related_seed=seed, author=request.user, is_valid=False\n )\n already_participate = True\n except ObjectDoesNotExist:\n # let default value\n pass\n try:\n fav = Fav.objects.get(user=request.user)\n if seed in fav.stories.all():\n already_favorite = True\n except ObjectDoesNotExist:\n # let default value\n pass\n try:\n proposal_vote = Vote.objects.get(\n user=request.user,\n related_seed=seed,\n )\n except ObjectDoesNotExist:\n # let default value\n pass\n\n if len(request.POST) > 0:\n form = ProposalForm(request.POST)\n if form.is_valid():\n proposal = form.save(commit=False)\n proposal.author = request.user\n proposal.created_at = now()\n proposal.related_seed = seed\n proposal.save()\n return redirect('detail-seed', pk=seed.id)\n else:\n return render(request, 'stories/detail.html', {\n 'form': form,\n 'seed': seed,\n 'valid_proposals': valid_proposals,\n 'proposals': proposals\n })\n else:\n form = ProposalForm()\n return render(request, 'stories/detail.html', {\n 'form': form,\n 'seed': seed,\n 'valid_proposals': valid_proposals,\n 'proposals': proposals,\n 'already_participate': already_participate,\n 'already_favorite': already_favorite,\n 'proposal_vote': proposal_vote,\n })\n","sub_path":"stories/views/detail_seed.py","file_name":"detail_seed.py","file_ext":"py","file_size_in_byte":3206,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"597098648","text":"from app.main import db\n\nclass FollowModel(db.Model):\n __tablename__ = \"follow\"\n\n id = db.Column(db.Integer, primary_key=True, autoincrement=True)\n public_id = db.Column(db.String(100), unique=True, nullable=False)\n registered_on = db.Column(db.DateTime, nullable=False)\n following_user_username = db.Column(db.String, db.ForeignKey(\"follower.username\"), nullable=False)\n followed_pet_id = db.Column(db.String, db.ForeignKey(\"followed.public_id\"), nullable=False)\n\n def __repr__(self):\n return \"\".format(self.following_user_username, self.followed_pet_id)","sub_path":"app/main/models/follow_model.py","file_name":"follow_model.py","file_ext":"py","file_size_in_byte":605,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"211864107","text":"from Field_D_SupportingClasses import *\r\n\r\nProgramID = \"DF Word Score Sonifier v1.0\"\r\nWorkTitle = \"Untitled Sonification\"\r\nLyricist = \"\"\r\n\r\nInput = DF_TextInput()\r\nWorkTitle = Input.provideTitle()\r\nLyricist = Input.provideLyricist()\r\nverses = Input.provideVerses()\r\npositions = Input.providePositions()\r\nscores = Input.provideScrabbleScores()\r\nPlanner = DF_SongPlanner(verses, positions, scores)\r\nverseKeys = Planner.getVerseKeys()\r\nPlanner.getBassPart(Planner.homeKey)\r\nPlanner.getTenorPart(Planner.homeKey)\r\nPlanner.getAltoPart(Planner.homeKey)\r\nPlanner.getSopPart(Planner.homeKey)\r\n\r\nX = DF_MusicXML(WorkTitle, ProgramID, Lyricist)\r\nbasNotes = Planner.bassNotes\r\nbasDurations = Planner.bassRhythms\r\nbasLyric = Planner.bassWords\r\nbasPos = Planner.bassPositions\r\nbasTies = Planner.bassTies\r\ntenNotes = Planner.tenNotes\r\ntenDurations = Planner.tenRhythms\r\ntenLyric = Planner.tenWords\r\ntenPos = Planner.tenPositions\r\ntenTies = Planner.tenTies\r\naltoNotes = Planner.altoNotes\r\naltoDurations = Planner.altoRhythms\r\naltoLyric = Planner.altoWords\r\naltoPos = Planner.altoPositions\r\naltoTies = Planner.altoTies\r\nsopNotes = Planner.sopNotes\r\nsopDurations = Planner.sopRhythms\r\nsopLyric = Planner.sopWords\r\nsopPos = Planner.sopPositions\r\nsopTies = Planner.sopTies\r\nX.writeSop(sopNotes, sopDurations, sopLyric, sopPos, sopTies)\r\nX.writeAlto(altoNotes, altoDurations, altoLyric, altoPos, altoTies)\r\nX.writeTenor(tenNotes, tenDurations, tenLyric, tenPos, tenTies)\r\nX.writeBass(basNotes, basDurations, basLyric, basPos, basTies)\r\nX.endXMLFile()","sub_path":"Field_D_main.py","file_name":"Field_D_main.py","file_ext":"py","file_size_in_byte":1531,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"645555879","text":"\n#Python Program 01\n#Made By William Keenan\n#9.14.21\n\na = int(input('Enter 1st number: ')) \nb = int(input('Enter 2nd number: '))\n\n\ndef doMath(a, b, operation): #SETS UP THE FUNCTION TO CALL IN THE CALCULATOR\n if operation == 1: # DEFINES SUM\n return int(a + b)\n if operation == 2: #DEFINES DIFFERENCE\n return int(a - b)\n if operation == 3: #DEFINES PRODUCT\n return int(a * b)\n if operation == 4: #DEFINES PRODUCT\n return int(a / b)\n if operation == 5: #DEFINES MODULO\n return int(a % b)\n\nprint(\"CALCULATOR PROGRAM\") # \\/ ALL LINES OF FUNCTION CALLED ONE AT A TIME \\/\nprint(\"Sum:\\t\\t\" + str(doMath(a,b,1))) \nprint(\"Difference:\\t\" + str(doMath(a,b,2)))\nprint(\"Product:\\t\" + str(doMath(a,b,3)))\nprint(\"Quotient:\\t\" + str(doMath(a,b,4)))\nprint(\"Modulo:\\t\\t\" + str(doMath(a,b,5)))\n","sub_path":"Python/Python_Calculator.py","file_name":"Python_Calculator.py","file_ext":"py","file_size_in_byte":842,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"321924581","text":"# -*- coding: utf-8 -*-\n\nimport re\nimport json\nimport requests\nfrom datetime import datetime\nfrom datetime import timezone\nfrom datetime import timedelta\nfrom html.parser import HTMLParser\n\n'''\n浜松市コロナサイトのお知らせ部分からnews.jsonを作成する\n[浜松市コロナサイト](https://www.city.hamamatsu.shizuoka.jp/koho2/emergency/korona.html)\n\n
\n \n
\nこのDOM要素を以下のJSONに変換する\n{\n \"newsItems\": [\n {\n \"date\": \"2021\\/03\\/12\",\n \"url\": \"https://www.city.hamamatsu.shizuoka.jp//koho2/emergency/20210312_2.html\",\n \"text\": \"新型コロナウイルス感染症による患者確認について【3例目】\"\n },\n ]\n}\n'''\n\nclass NewsParser(HTMLParser):\n def __init__(self):\n HTMLParser.__init__(self)\n self.BASE_URL = 'https://www.city.hamamatsu.shizuoka.jp'\n self.inContents = False\n self.inDay = False\n self.ulInDay = False\n self.listInDay = False\n self.link = False\n self.news = []\n self.currentDate = ''\n self.supplement = ''\n self.starttag = ''\n self.endtag = ''\n\n def handle_starttag(self, tag, attrs):\n attrs = dict(attrs)\n self.starttag = tag\n #
\n if tag == \"div\" and \"class\" in attrs and attrs['class'] == \"box_info_cnt\":\n self.inContents = True\n return\n #
  • x月y日\n if tag == \"li\" and self.inContents and not self.inDay:\n self.inDay = True\n return\n #
  • x月y日
      \n if tag == \"ul\" and self.inDay:\n self.ulInDay = True\n return\n #
    • x月y日
      • \n if tag == \"li\" and self.ulInDay:\n self.listInDay = True\n return\n #
      • x月y日
        • yyyyyyyy\n if tag == \"a\" and self.listInDay:\n self.link = True\n if attrs[\"href\"].startswith(\"http\"):\n self.news.append({\"date\": self.currentDate,\"url\": attrs[\"href\"]})\n else:\n self.news.append({\"date\": self.currentDate,\"url\": self.BASE_URL + attrs[\"href\"]})\n return\n\n\n def handle_endtag(self, tag):\n self.endtag = tag\n if tag == \"a\" and self.link:\n self.link = False\n return\n if tag == \"li\" and self.listInDay:\n self.listInDay = False\n return\n if tag == \"ul\" and self.ulInDay:\n self.ulInDay = False\n return\n if tag == \"li\" and self.inDay:\n self.inDay = False\n return\n if tag == \"div\" and self.inContents:\n self.inContents = False\n return\n\n def handle_data(self, data):\n if self.listInDay and not self.link:\n data = data.strip().rstrip(\"/\")\n if data and self.lasttag == 'li':\n self.news.append({\"date\": self.currentDate,\"url\":\"\",\"text\": data})\n return\n if data:\n text = self.news[-1].get(\"text\")\n self.news[-1].update({\"text\": text + data.strip()})\n return\n if self.link:\n self.news[-1].update({\"text\": data.strip() + self.supplement})\n return\n if self.inDay and not self.ulInDay:\n data = data.strip()\n tokyo_tz = timezone(timedelta(hours=+9))\n currentTime = datetime.now(tokyo_tz)\n if data:\n m = re.match(r'([0-9]{1,2})月([0-9]{1,2})日', data)\n if m:\n month, day = m.groups()\n year = currentTime.year\n if int(month) == 12 and currentTime.month == 1:\n year = year - 1\n self.currentDate = \"{}/{}/{}\".format(year,month.zfill(2),day.zfill(2))\n else:\n m = re.match(r'([0-9]{4})年([0-9]{1,2})月([0-9]{1,2})日', data)\n year, month, day = m.groups()\n self.currentDate = \"{}/{}/{}\".format(year, month.zfill(2),day.zfill(2))\n return\n\ndef main():\n response = requests.get('https://www.city.hamamatsu.shizuoka.jp/koho2/emergency/korona.html')\n response.encoding = response.apparent_encoding\n parser = NewsParser()\n parser.feed(response.text)\n parser.close()\n\n print(json.dumps({\"newsItems\": parser.news}, indent=2, ensure_ascii=False))\nif __name__ == '__main__':\n main()\n","sub_path":"tool/create_news_json.py","file_name":"create_news_json.py","file_ext":"py","file_size_in_byte":4911,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"327306021","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\nimport django.db.models.deletion\nimport members.models\nimport django.utils.timezone\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('members', '0024_auto_20160212_1303'),\n ]\n\n operations = [\n migrations.CreateModel(\n name='DonationLineItem',\n fields=[\n ('id', models.AutoField(serialize=False, verbose_name='ID', auto_created=True, primary_key=True)),\n ('value', models.DecimalField(max_digits=6, help_text='The amount for a monetary donation or the assessed value for physical item(s) donated.', decimal_places=2)),\n ('description', models.TextField(max_length=2048, help_text='Description, if physical items are being donated. Blank for monetary donation.')),\n ],\n ),\n migrations.CreateModel(\n name='MembershipGiftCard',\n fields=[\n ('id', models.AutoField(serialize=False, verbose_name='ID', auto_created=True, primary_key=True)),\n ('redemption_code', models.CharField(unique=True, max_length=20, help_text='A random string printed on the card, used during card redemption / membership activation.')),\n ('date_created', models.DateField(help_text='The date on which the gift card was created.', default=django.utils.timezone.now)),\n ('price', models.DecimalField(max_digits=6, help_text='The price to buy this gift card.', decimal_places=2)),\n ('month_duration', models.IntegerField(help_text='The number of months of membership this gift card grants when redeemed.')),\n ],\n ),\n migrations.CreateModel(\n name='MembershipGiftCardLineItem',\n fields=[\n ('id', models.AutoField(serialize=False, verbose_name='ID', auto_created=True, primary_key=True)),\n ('card', models.OneToOneField(to='members.MembershipGiftCard', help_text='The membership gift card that was purchased.', on_delete=django.db.models.deletion.PROTECT)),\n ],\n ),\n migrations.CreateModel(\n name='PaidMembershipNudge',\n fields=[\n ('id', models.AutoField(serialize=False, verbose_name='ID', auto_created=True, primary_key=True)),\n ('when', models.DateField(help_text='Date on which the member was reminded.', default=django.utils.timezone.now)),\n ('member', models.ForeignKey(help_text='The member we reminded.', to='members.Member')),\n ],\n ),\n migrations.CreateModel(\n name='Purchase',\n fields=[\n ('id', models.AutoField(serialize=False, verbose_name='ID', auto_created=True, primary_key=True)),\n ('payment_date', models.DateField(help_text='The date on which the payment was made. Best guess if exact date not known.', default=django.utils.timezone.now)),\n ('payer_name', models.CharField(max_length=40, blank=True, help_text='Name of person who made the payment.')),\n ('payer_email', models.EmailField(max_length=40, blank=True, help_text='Email address of person who made the payment.')),\n ('payment_method', models.CharField(choices=[('$', 'Cash'), ('C', 'Check'), ('S', 'Square'), ('2', '2Checkout'), ('W', 'WePay'), ('P', 'PayPal')], max_length=1, help_text='The payment method used.', default='$')),\n ('total_paid_by_customer', models.DecimalField(max_digits=6, help_text='The full amount paid by the person, including payment processing fee IF CUSTOMER PAID IT.', decimal_places=2)),\n ('processing_fee', models.DecimalField(max_digits=6, help_text=\"Payment processor's fee, REGARDLESS OF WHO PAID FOR IT. Zero for cash/check.\", default=0, decimal_places=2)),\n ('ctrlid', models.CharField(max_length=40, help_text=\"Payment processor's id for this payment.\", default=members.models.next_payment_ctrlid)),\n ],\n ),\n migrations.RemoveField(\n model_name='paymentreminder',\n name='member',\n ),\n migrations.DeleteModel(\n name='PaymentReminder',\n ),\n migrations.AddField(\n model_name='membershipgiftcardlineitem',\n name='purchase',\n field=models.ForeignKey(to='members.Purchase', help_text='The payment that includes this gift card as a line item.', on_delete=django.db.models.deletion.PROTECT),\n ),\n migrations.AddField(\n model_name='donationlineitem',\n name='purchase',\n field=models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, to='members.Purchase', help_text='If donation is monetary, this is the payment that includes it as a line item. Else blank.', blank=True, null=True),\n ),\n ]\n","sub_path":"members/migrations/0025_auto_20160215_1529.py","file_name":"0025_auto_20160215_1529.py","file_ext":"py","file_size_in_byte":4905,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"24843497","text":"import os \n# run this from inside processing dir\ndirs = []\nfor i in os.listdir(os.getcwd()):\n if os.path.exists(os.path.join(i, 'FASTQ')):\n dirs.append(i)\n\nall_fds = []\nfor d in dirs:\n dd = os.path.join(d, 'FASTQ')\n ddd = os.path.join(dd, \"Demux\")\n ddt = os.path.join(dd, 'Trim')\n all_fds += [(i[:-9], os.path.join(ddd, i), os.path.join(ddt, i[:-9]+\"_F_filt.fastq\")) for i in os.listdir(ddd) if i.endswith('R1.fastq')]\n\ndef blocks(files, size=65536):\n while True:\n b = files.read(size)\n if not b: break\n yield b\n\ndef count_lines2(filepath):\n with open(filepath, \"r\", encoding=\"utf-8\", errors='ignore') as f:\n count = sum(bl.count(\"\\n\") for bl in blocks(f))\n return count/4\n\ndef all_counts(arg):\n x, y, z = arg\n if os.path.exists(y):\n rc = count_lines2(y)\n else:\n rc = 'NA'\n if os.path.exists(z):\n tc = count_lines2(z)\n else:\n tc = 'NA'\n print(x, rc, tc)\n return (x, rc, tc)\n\nexisters = map(all_counts, all_fds)\n\nto_write = \"\\n\".join([\"\\t\".join([i, str(j), str(k)]) for i, j, k in existers])\n\nf_out = \"read_counts.tsv\"\n\nwith open(f_out, 'w') as foh:\n foh.write(to_write)\n","sub_path":"otu_scripts/gather_trim_info.py","file_name":"gather_trim_info.py","file_ext":"py","file_size_in_byte":1185,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"156892986","text":"# -*- coding: utf-8 -*-\r\n# @Time : 2019-07-11 18:27\r\n# @Author : ForestNeo\r\n# @Email : dr.forestneo@gmail.com\r\n# @Software: PyCharm\r\n\r\n#\r\n\r\nfrom mean import duchi\r\nfrom mean import piecewise\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\n\r\n\r\nif __name__ == '__main__':\r\n # 产生数据并归一化\r\n data = np.random.normal(loc=0.2, scale=0.3, size=100000)\r\n data = np.clip(data, -1, 1)\r\n\r\n mean_ori = np.average(data)\r\n print(\"original mean: \", mean_ori)\r\n\r\n # 存放结果\r\n epsilon_list, error_duchi, error_piecewise = [], [], []\r\n\r\n for i in range(1, 50):\r\n epsilon = 0.02 * i\r\n epsilon_list.append(epsilon)\r\n\r\n # duchi's solution\r\n duchi_data = [duchi.encode_duchi(value, epsilon) for value in data]\r\n mean_duchi = np.average(duchi_data)\r\n err_duchi = np.fabs(mean_duchi-mean_ori)\r\n error_duchi.append(err_duchi)\r\n\r\n # piecewise solution\r\n pm_data = [piecewise.encode_piecewise_mine(value, epsilon) for value in data]\r\n mean_pm = np.average(pm_data)\r\n err_pm = np.fabs(mean_pm-mean_ori)\r\n print(epsilon, err_duchi, err_pm)\r\n\r\n error_piecewise.append(err_pm)\r\n\r\n # 画图\r\n fig = plt.figure(figsize=[12, 5])\r\n plt.plot(epsilon_list, error_duchi, label=\"duchi\")\r\n plt.plot(epsilon_list, error_piecewise, label=\"piecewise\")\r\n plt.xlabel(\"epsilon\")\r\n plt.ylabel(\"error\")\r\n plt.legend()\r\n plt.show()\r\n\r\n","sub_path":"mean/compare_different_methods.py","file_name":"compare_different_methods.py","file_ext":"py","file_size_in_byte":1455,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"648869959","text":"import math\r\nT = int(input())\r\nfor i in range(T):\r\n A,B = input().split()\r\n A = int(A)\r\n B = int(B)\r\n len = B-A\r\n if (len<=3):\r\n print(len)\r\n else:\r\n n = int(math.sqrt(len))\r\n if( len == n**2):\r\n print(2*n-1)\r\n elif(n**2 < len <= n**2+n):\r\n print(2*n)\r\n else:\r\n print(2*n+1)\r\n\r\n\r\n \r\n\r\n \r\n\r\n","sub_path":"Algorithm/Beakjoon/1011.py","file_name":"1011.py","file_ext":"py","file_size_in_byte":390,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"326810588","text":"import jax\nimport jax.numpy as np\nfrom jax.flatten_util import ravel_pytree\nfrom tqdm import tqdm\n\nfrom mcx import sample_forward\nfrom mcx.core import compile_to_logpdf\n\n\n__all__ = [\"sample\", \"generate\", \"sequential\"]\n\n\n# -------------------------------------------------------------------\n# == THE SAMPLING EXECUTION MODEL ==\n# -------------------------------------------------------------------\n\n\nclass sample(object):\n def __init__(\n self, rng_key, model, program, num_warmup_steps=1000, num_chains=4, **kwargs\n ):\n \"\"\" Initialize the sampling runtime.\n \"\"\"\n self.program = program\n self.num_chains = num_chains\n self.rng_key = rng_key\n\n init, warmup, build_kernel, to_trace, adapt_loglikelihood = self.program\n\n print(\"Initialize the sampler\\n\")\n\n validate_conditioning_variables(model, **kwargs)\n loglikelihood = build_loglikelihood(model, **kwargs)\n\n print(\"Find initial states...\")\n initial_position, unravel_fn = get_initial_position(\n rng_key, model, num_chains, **kwargs\n )\n loglikelihood = flatten_loglikelihood(loglikelihood, unravel_fn)\n loglikelihood = adapt_loglikelihood(loglikelihood)\n initial_state = jax.vmap(init, in_axes=(0, None))(\n initial_position, jax.value_and_grad(loglikelihood)\n )\n\n print(\"Warmup the chains...\")\n parameters, state = warmup(initial_state, loglikelihood, num_warmup_steps)\n\n print(\"Compile the log-likelihood...\")\n loglikelihood = jax.jit(loglikelihood)\n\n print(\"Build and compile the inference kernel...\")\n kernel = build_kernel(loglikelihood, parameters)\n kernel = jax.jit(kernel)\n\n self.kernel = kernel\n self.state = state\n self.to_trace = to_trace\n self.unravel_fn = unravel_fn\n\n def run(self, num_samples=1000):\n _, self.rng_key = jax.random.split(self.rng_key)\n\n @jax.jit\n def update_chains(state, rng_key):\n keys = jax.random.split(rng_key, self.num_chains)\n new_states, info = jax.vmap(self.kernel, in_axes=(0, 0))(keys, state)\n return new_states, info\n\n state = self.state\n chain = []\n\n rng_keys = jax.random.split(self.rng_key, num_samples)\n with tqdm(rng_keys, unit=\"samples\") as progress:\n progress.set_description(\n \"Collecting {:,} samples across {:,} chains\".format(\n num_samples, self.num_chains\n ),\n refresh=False,\n )\n for key in progress:\n state, info = update_chains(state, key)\n chain.append((state, info))\n self.state = state\n\n trace = self.to_trace(chain, self.unravel_fn)\n\n return trace\n\n\n# -------------------------------------------------------------------\n# == THE GENERATOR EXECUTION MODEL ==\n# -------------------------------------------------------------------\n\n\ndef generate(rng_key, model, program, num_warmup_steps=1000, num_chains=4, **kwargs):\n \"\"\" The generator runtime \"\"\"\n\n init, warmup, build_kernel, to_trace, adapt_loglikelihood = program\n\n print(\"Initialize the sampler\\n\")\n\n validate_conditioning_variables(model, **kwargs)\n loglikelihood = build_loglikelihood(model, **kwargs)\n\n print(\"Find initial states...\")\n initial_position, unravel_fn = get_initial_position(\n rng_key, model, num_chains, **kwargs\n )\n loglikelihood = flatten_loglikelihood(loglikelihood, unravel_fn)\n loglikelihood = adapt_loglikelihood(loglikelihood)\n initial_state = jax.vmap(init, in_axes=(0, None))(\n initial_position, jax.value_and_grad(loglikelihood)\n )\n\n print(\"Warmup the chains...\")\n parameters, state = warmup(initial_state, loglikelihood, num_warmup_steps)\n\n print(\"Compile the log-likelihood...\")\n loglikelihood = jax.jit(loglikelihood)\n\n print(\"Build and compile the inference kernel...\")\n kernel = build_kernel(loglikelihood, parameters)\n kernel = jax.jit(kernel)\n\n def run(rng_key, state):\n while True:\n _, rng_key = jax.random.split(rng_key)\n\n keys = jax.random.split(rng_key, num_chains)\n state, info = jax.vmap(kernel)(keys, state)\n\n yield (state, info)\n\n return run(rng_key, initial_state)\n\n\n# -------------------------------------------------------------------\n# == THE SEQUENTIAL EXECUTION MODEL ==\n# -------------------------------------------------------------------\n\n\nclass sequential(object):\n def __init__(\n self, rng_key, model, program, num_samples=1000, num_warmup_steps=1000\n ):\n \"\"\" Sequential Markov Chain Monte Carlo sampling.\n \"\"\"\n self.model = model\n self.program = program\n self.num_samples = num_samples\n self.num_warmup_steps = num_warmup_steps\n self.rng_key = rng_key\n\n init, warmup, build_kernel, to_trace, adapt_loglikelihood = self.program\n self.prg_init = init\n self.prg_warmup = warmup\n self.prg_build_kernel = build_kernel\n self.prg_to_trace = to_trace\n self.prg_adapt_loglikelihood = adapt_loglikelihood\n\n self.state = None\n\n def _initialize(self, **kwargs):\n loglikelihood = build_loglikelihood(self.model, **kwargs)\n initial_position, self.unravel_fn = get_initial_position(\n self.rng_key, self.model, self.num_samples, **kwargs\n )\n loglikelihood = flatten_loglikelihood(loglikelihood, self.unravel_fn)\n loglikelihood = self.prg_adapt_loglikelihood(loglikelihood)\n initial_state = jax.vmap(self.prg_init, in_axes=(0, None))(\n initial_position, jax.value_and_grad(loglikelihood)\n )\n return initial_state\n\n def _update_loglikelihood(self, **kwargs):\n loglikelihood = build_loglikelihood(self.model, **kwargs)\n loglikelihood = flatten_loglikelihood(loglikelihood, self.unravel_fn)\n loglikelihood = self.prg_adapt_loglikelihood(loglikelihood)\n loglikelihood = jax.jit(loglikelihood)\n return loglikelihood\n\n def _update_kernel(self, loglikelihood, parameters):\n kernel = self.prg_build_kernel(loglikelihood, parameters)\n kernel = jax.jit(kernel)\n return kernel\n\n def update(self, **kwargs):\n _, self.rng_key = jax.random.split(self.rng_key)\n\n validate_conditioning_variables(self.model, **kwargs)\n\n if self.state is None:\n self.state = self._initialize(**kwargs)\n\n # Since the data changes the log-likelihood, and thus the\n # kernel, need to be updated.\n #\n # Although there is no mention of this in the aforementionned\n # papers, we re-run the warmup to adapt the kernel parameters\n # to the new posterior geometry. Unlike the initial warmup, however,\n # we re-start the chains at the initial position.\n loglikelihood = self._update_loglikelihood(**kwargs)\n parameters, _ = self.prg_warmup(\n self.state, loglikelihood, self.num_warmup_steps\n )\n kernel = self._update_kernel(loglikelihood, parameters)\n\n @jax.jit\n def update_chains(state, rng_key):\n keys = jax.random.split(rng_key, self.num_samples)\n new_states, info = jax.vmap(kernel, in_axes=(0, 0))(keys, state)\n return new_states\n\n state = self.state\n\n rng_keys = jax.random.split(self.rng_key, self.num_samples)\n with tqdm(rng_keys, unit=\"samples\") as progress:\n progress.set_description(\n \"Collecting {:,} samples\".format(self.num_samples), refresh=False,\n )\n for key in progress:\n state = update_chains(state, key)\n self.state = state\n\n trace = self.prg_to_trace(self.state, self.unravel_fn)\n\n return trace\n\n\n#\n# SHARED UTILITIES\n#\n\n\ndef validate_conditioning_variables(model, **kwargs):\n \"\"\" Check that all variables passed as arguments to the sampler\n are random variables or arguments to the sampler. And converserly\n that all of the model definition's positional arguments are given\n a value.\n \"\"\"\n conditioning_vars = set(kwargs.keys())\n model_randvars = set(model.random_variables)\n model_args = set(model.arguments)\n available_vars = model_randvars.union(model_args)\n\n # The variables passed as an argument to the initialization (variables\n # on which the logpdf is conditionned) must be either a random variable\n # or an argument to the model definition.\n if not available_vars.issuperset(conditioning_vars):\n unknown_vars = list(conditioning_vars.difference(available_vars))\n unknown_str = \", \".join(unknown_vars)\n raise AttributeError(\n \"You passed a value for {} which are neither random variables nor arguments to the model definition.\".format(\n unknown_str\n )\n )\n\n # The user must provide a value for all of the model definition's\n # positional arguments.\n model_posargs = set(model.posargs)\n if model_posargs.difference(conditioning_vars):\n missing_vars = model_posargs.difference(conditioning_vars)\n missing_str = \", \".join(missing_vars)\n raise AttributeError(\n \"You need to specify a value for the following arguments: {}\".format(\n missing_str\n )\n )\n\n\ndef build_loglikelihood(model, **kwargs):\n artifact = compile_to_logpdf(model.graph, model.namespace)\n logpdf = artifact.compiled_fn\n loglikelihood = jax.partial(logpdf, **kwargs)\n return loglikelihood\n\n\ndef get_initial_position(rng_key, model, num_chains, **kwargs):\n conditioning_vars = set(kwargs.keys())\n model_randvars = set(model.random_variables)\n to_sample_vars = model_randvars.difference(conditioning_vars)\n\n samples = sample_forward(rng_key, model, num_samples=num_chains, **kwargs)\n initial_positions = dict((var, samples[var]) for var in to_sample_vars)\n\n # A naive way to go about flattening the positions is to transform the\n # dictionary of arrays that contain the parameter value to a list of\n # dictionaries, one per position and then unravel the dictionaries.\n # However, this approach takes more time than getting the samples in the\n # first place.\n #\n # Luckily, JAX first sorts dictionaries by keys\n # (https://github.com/google/jax/blob/master/jaxlib/pytree.cc) when\n # raveling pytrees. We can thus ravel and stack parameter values in an\n # array, sorting by key; this gives our flattened positions. We then build\n # a single dictionary that contains the parameters value and use it to get\n # the unraveling function using `unravel_pytree`.\n positions = np.stack(\n [np.ravel(samples[s]) for s in sorted(initial_positions.keys())], axis=1\n )\n\n sample_position_dict = {\n parameter: values[0] for parameter, values in initial_positions.items()\n }\n _, unravel_fn = ravel_pytree(sample_position_dict)\n\n return positions, unravel_fn\n\n\ndef flatten_loglikelihood(logpdf, unravel_fn):\n def flattened_logpdf(array):\n kwargs = unravel_fn(array)\n return logpdf(**kwargs)\n\n return flattened_logpdf\n","sub_path":"mcx/sampling.py","file_name":"sampling.py","file_ext":"py","file_size_in_byte":11295,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"526286088","text":"import requests\nfrom bs4 import BeautifulSoup\n\nfrom confs import URL, TITLE_TAG, HTML_FILTER,USER_AGENT\nfrom treffer import Treffer\n\npage = requests.get(URL, headers=USER_AGENT)\nsoup = BeautifulSoup(page.content, \"html.parser\")\n\nresults = soup.find_all(class_=HTML_FILTER)\nwork_data = []\n\nfor html in results:\n titel = html.find(TITLE_TAG).text\n work_data.append(Treffer(titel, html))\n \n\nfor i in work_data:\n print(i.titel)\n print(i.trend())","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":456,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"568257615","text":"import numpy as np\nfrom keras_preprocessing.image import ImageDataGenerator\nfrom matplotlib import pyplot as plt\nfrom tensorflow.keras.datasets import cifar10\nfrom tensorflow.keras import models\nfrom tensorflow.keras import layers\nfrom sklearn.model_selection import train_test_split\nfrom tensorflow import keras\nfrom tensorflow.keras.utils import to_categorical\n\n(x_train, y_train), (x_test, y_test) = cifar10.load_data() # cifar-10 dataset 가져오기\nclass_names = ['airplane', 'automobile', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse', 'ship', 'truck']\n\n# 이미지 시각화\nplt.figure(figsize=[10, 10])\nfor i in range(25): # 1~25번째 이미지\n plt.subplot(5, 5, i+1)\n plt.xticks([])\n plt.yticks([])\n plt.grid(False)\n plt.imshow(x_train[i], cmap=plt.cm.binary)\n plt.xlabel(class_names[y_train[i][0]])\nplt.show()\n\n# validation set 만들기\nx_train, x_val, y_train, y_val = train_test_split(x_train, y_train, test_size=0.2, stratify=y_train, random_state=1)\n\nprint(\"Train samples : \", x_train.shape, y_train.shape) # 40000개의 32*32, 3개 채널의 train sample\nprint(\"Validation samples : \", x_val.shape, y_val.shape) # 10000개의 32*32, 3개 채널의 validation sample\nprint(\"Test samples : \", x_test.shape, y_test.shape) # 10000개의 32*32, 3개 채널의 test sample\n\ngen = ImageDataGenerator(rotation_range=20, shear_range=0.2,\n width_shift_range=0.2, height_shift_range=0.2,\n horizontal_flip=True)\naugment_ratio = 1.5 # 40000*1.5 = 60000개의 train sample 추가\naugment_size = int(augment_ratio * x_train.shape[0])\nrandidx = np.random.randint(x_train.shape[0], size=augment_size)\nx_augmented = x_train[randidx].copy()\ny_augmented = y_train[randidx].copy()\nx_augmented, y_augmented = gen.flow(x_augmented, y_augmented, batch_size=augment_size,\n shuffle=False).next()\nx_train = np.concatenate((x_train, x_augmented))\ny_train = np.concatenate((y_train, y_augmented))\ns = np.arange(x_train.shape[0])\nnp.random.shuffle(s)\nx_train = x_train[s]\ny_train = y_train[s]\nprint(\"after augmented (train set) : \", x_train.shape, y_train.shape)\n\n# 픽셀값 정규화\nx_train = x_train.astype('float32')\nx_val = x_val.astype('float32')\nx_test = x_test.astype('float32')\nx_train = x_train / 255\nx_val = x_val / 255\nx_test = x_test / 255\n\n# 원 핫 인코딩\ny_train = to_categorical(y_train, 10)\ny_val = to_categorical(y_val, 10)\ny_test = to_categorical(y_test, 10)\n\nepochs = 100\n\n# CNN 모델 구현\nmodel = models.Sequential()\n# Conv layer 1\nmodel.add(layers.Conv2D(32, (3, 3), padding='same', input_shape=(32, 32, 3)))\nmodel.add(layers.BatchNormalization())\nmodel.add(layers.Activation('relu'))\n# Conv layer 2\nmodel.add(layers.Conv2D(32, (3, 3), padding='same'))\nmodel.add(layers.BatchNormalization())\nmodel.add(layers.Activation('relu'))\n# Pooling layer 1\nmodel.add(layers.MaxPool2D((2, 2)))\nmodel.add(layers.Dropout(0.3))\n# Conv layer 3\nmodel.add(layers.Conv2D(64, (3, 3), padding='same'))\nmodel.add(layers.BatchNormalization())\nmodel.add(layers.Activation('relu'))\n# Conv layer 4\nmodel.add(layers.Conv2D(64, (3, 3), padding='same'))\nmodel.add(layers.BatchNormalization())\nmodel.add(layers.Activation('relu'))\n# Pooling layer 2\nmodel.add(layers.MaxPool2D((2, 2)))\nmodel.add(layers.Dropout(0.3))\n# Conv layer 5\nmodel.add(layers.Conv2D(128, (3, 3), padding='same'))\nmodel.add(layers.BatchNormalization())\nmodel.add(layers.Activation('relu'))\n# Conv layer 6\nmodel.add(layers.Conv2D(128, (3, 3), padding='same'))\nmodel.add(layers.BatchNormalization())\nmodel.add(layers.Activation('relu'))\n# Pooling layer 3\nmodel.add(layers.MaxPool2D((2, 2)))\nmodel.add(layers.Dropout(0.3))\n# Flat\nmodel.add(layers.Flatten())\n# Dense layer 1\nmodel.add(layers.Dense(128))\nmodel.add(layers.BatchNormalization())\nmodel.add(layers.Activation('relu'))\nmodel.add(layers.Dropout(0.5))\n# Dense layer 2\nmodel.add(layers.Dense(10, activation='softmax'))\n\nmodel.summary()\nmodel.compile(optimizer='adam', loss='categorical_crossentropy', metrics=['accuracy'])\nhistory = model.fit(x_train, y_train, epochs=epochs, batch_size=64, validation_data=(x_val, y_val))\ntest_loss, test_acc = model.evaluate(x_test, y_test)\nprint(\"Test accuracy : \", test_acc)\n\nacc = history.history['accuracy']\nval_acc = history.history['val_accuracy']\nloss = history.history['loss']\nval_loss = history.history['val_loss']\nepochs_range = range(epochs)\n\n# 훈련 과정 시각화 (정확도)\nplt.figure(figsize=(8, 8))\nplt.subplot(1, 2, 1)\nplt.plot(epochs_range, acc, label='Training Accuracy', color='blue', linestyle='solid')\nplt.plot(epochs_range, val_acc, label='Validation Accuracy', color='blue', linestyle='dashed')\nplt.legend(loc='lower right')\nplt.xlabel('Epoch')\nplt.ylabel('Accuracy')\nplt.title('Training and Validation Accuracy')\n\n# 훈련 과정 시각화 (손실)\nplt.subplot(1, 2, 2)\nplt.plot(epochs_range, loss, label='Training Loss', color='red', linestyle='solid')\nplt.plot(epochs_range, val_loss, label='Validation Loss', color='red', linestyle='dashed')\nplt.legend(loc='upper right')\nplt.xlabel('Epoch')\nplt.ylabel('Loss')\nplt.title('Training and Validation Loss')\nplt.show()\n\n# Test\npredict = model.predict(x_test)\npredict_classes = np.argmax(predict, axis=1)\nfig, axes = plt.subplots(5, 5, figsize=(15, 15))\naxes = axes.ravel()\nfor i in np.arange(0, 25):\n axes[i].imshow(x_test[i])\n axes[i].set_title(\"True: %s \\nPredict: %s\" % (class_names[np.argmax(y_test[i])], class_names[predict_classes[i]]))\n axes[i].axis('off')\n plt.subplots_adjust(wspace=1)","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":5530,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"587401351","text":"# -*- coding: utf-8 -*-\nfrom . import Mongua\nfrom routes import *\n\n\nclass Blog(Mongua):\n # 子类必须实现 _fields 类方法来定义字段\n @classmethod\n def _fields(cls):\n fields = [\n ('title', str, ''),\n ('tags', dict, {}),\n ('intro', str, ''),\n ('content', str, ''),\n ('user_id', int, 0),\n ]\n fields.extend(super()._fields())\n return fields\n\n @classmethod\n def new(cls, form, **kwargs):\n m = super().new(form)\n # 处理intro tags\n m.intro = m.content.split('\\n')[0]\n m.save()\n return m\n\n @classmethod\n def insert(cls):\n form = {\n 'user_id': 1,\n 'title': \"ceui12345\",\n \"content\": \"## 123456\",\n \"intro\": \"- li\"\n }\n cls.new(form)","sub_path":"models/blog.py","file_name":"blog.py","file_ext":"py","file_size_in_byte":837,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"95601061","text":"import torch\nimport click\n\nfrom _visualizer import *\nfrom _settings import set_dev_location, get_dataset_path, set_numpy_precision\nfrom _telegramer import send_text\nfrom _utils_torch import *\n\nfrom utils import get_config, get_data_loader_folder\nfrom model import BicycleGAN\n\nset_numpy_precision()\n\n\ndef get_data_loaders(config, train_core_path, test_core_path, is_test_iter=True): # core_path : paths for below dataset path\n new_size = config['new_size']\n height = config['crop_image_height']\n width = config['crop_image_width']\n batch_size = config['batch_size']\n test_size = config['test_size']\n\n train_path = os.path.join(get_dataset_path(), train_core_path)\n test_path = os.path.join(get_dataset_path(), test_core_path)\n\n train_loader = get_data_loader_folder(train_path, batch_size=batch_size, train=True, new_size=new_size, height=height, width=width * 2)\n test_loader = get_data_loader_folder(test_path, batch_size=test_size, train=False, new_size=None, height=height, width=width * 2)\n if is_test_iter:\n test_iter = iter(test_loader)\n return train_loader, test_iter\n return train_loader, test_loader\n\n\n@click.command()\n@click.option('--config', type=str, default='./config/facades.yaml', help='Path to the config file.')\n@click.option('--location', type=str, default='home', help='dev location [macbook | server | home]')\ndef main(config, location):\n set_dev_location(location)\n config = get_config(config)\n\n max_iters = config['max_iter']\n dim_latent = config['gen']['dim_latent']\n\n style_size = config['style_size']\n log_iter = config['log_iter']\n image_save_epoch = config['image_save_epoch']\n model_save_epoch = config['model_save_epoch']\n\n device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\n train_loader, test_iter = get_data_loaders(config, 'facades/train', 'facades/val')\n model = BicycleGAN(config, device)\n\n epoch = 1\n loader_size = len(train_loader)\n epoch_size = max_iters // loader_size\n while epoch <= epoch_size:\n epoch += 1\n for iters, (real_a, real_b) in enumerate(train_loader):\n model.update_scheduler()\n\n real_a = real_a.to(device)\n real_b = real_b.to(device)\n\n model.forward(real_a, real_b)\n\n model.update_d(real_b)\n model.update_eg(real_b)\n model.update_g_alone()\n if device.type == 'cuda':\n torch.cuda.synchronize()\n\n # logger\n if not (iters + 1) % log_iter:\n model.print_log(epoch, epoch_size, iters, loader_size)\n show_batch_torch(torch.cat([real_a, model.fake_b_cvae, model.fake_b_clr, real_b]))\n\n # show test set results\n _ = model.eval_mode_all()\n with torch.no_grad():\n clear_jupyter_console()\n\n try:\n test_a, test_b, _, _ = next(test_iter)\n test_a, test_b = test_a.to(device), test_b.to(device)\n except:\n _, test_iter = get_data_loaders(config, 'facades/train', 'facades/val')\n test_a, test_b, _, _ = next(test_iter)\n test_a, test_b = test_a.to(device), test_b.to(device)\n\n random_style = torch.randn(style_size, dim_latent).to(device)\n repo = []\n for test_a_i, test_b_i in zip(test_a, test_b):\n test_a_i = test_a_i.unsqueeze(0).repeat(random_style.size(0), 1, 1, 1)\n test_b_i = test_b_i.unsqueeze(0)\n\n fake_b = model.generator(test_a_i, random_style)\n sample = reshape_batch_torch(torch.cat([test_a_i[:1], fake_b, test_b_i]), -1, 1)[0]\n repo.append(sample)\n\n img_saving = denormalize(np.concatenate(repo, axis=0))\n show(img_saving, figsize=(25, 25))\n\n if not (epoch + 1) % image_save_epoch:\n save_images(img_saving, save_name='{:03}.png'.format(epoch))\n\n if not (epoch + 1) % model_save_epoch:\n model.save_mdoels('models', save_name='{:03}'.format(epoch))\n\n send_text('Bicycle GAN Learning Finished')\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":4176,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"430827227","text":"\"\"\"Module to resolve database drivers for services\"\"\"\nfrom typing import Union\n\nimport pkg_resources\n\nfrom riptide.db.driver.abstract import AbstractDbDriver\n\n\nDB_DRIVER_ENTRYPOINT_KEY = 'riptide.db_driver'\n\n\ndef get(service: 'Service') -> Union[AbstractDbDriver, None]:\n \"\"\"Returns the db driver instance for this service, if a driver is defined.\"\"\"\n # Look up package entrypoints for db drivers\n drivers = {\n entry_point.name:\n entry_point.load() for entry_point in pkg_resources.iter_entry_points(DB_DRIVER_ENTRYPOINT_KEY)\n }\n\n if service[\"driver\"][\"name\"] in drivers:\n return drivers[service[\"driver\"][\"name\"]](service)\n return None\n","sub_path":"riptide/db/driver/db_driver_for_service.py","file_name":"db_driver_for_service.py","file_ext":"py","file_size_in_byte":680,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"302184583","text":"#!/usr/bin/env python\nimport fileinput\nimport argparse\nimport re\n\n# for file_name in sys.argv[1:]: # skip sys.argv[0], which is script name\n# with open(file_name) as file_in:\n# for raw_line in file_in:\n# line = raw_line.rstrip()\n# if pattern in line:\n# print(line)\n\narg_parser = argparse.ArgumentParser(\n description=\"Print lines that match specied regular expression\",\n)\n\narg_parser.add_argument('-i', dest=\"ignore_case\", help=\"Ignore case\", action=\"store_true\")\narg_parser.add_argument('-n', dest=\"show_name\", help=\"Display file name\", action=\"store_true\")\n\narg_parser.add_argument('pattern', help=\"Pattern to find\")\narg_parser.add_argument('filenames', nargs=\"*\", help=\"File names to search\")\n\nargs = arg_parser.parse_args() # defaults to sys.argv[1:]\n\nprint(args)\n\npattern = re.compile(args.pattern, re.I if args.ignore_case else 0)\n\nfor raw_line in fileinput.input(args.filenames):\n line = raw_line.rstrip()\n if pattern.search(line):\n if args.show_name:\n print(fileinput.filename(), end=' ')\n print(line)\n","sub_path":"using_fileinput.py","file_name":"using_fileinput.py","file_ext":"py","file_size_in_byte":1099,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"226404189","text":"# --- Day 6: Universal Orbit Map ---\n# Part 1\n\n# Example input - Answer for this is 42\n\n# COM)B\n# C)D\n# D)E\n# E)J\n# B)G\n# G)H\n# B)C\n# D)I\n# E)F\n# J)K\n# K)L\n\nfrom input import orbitalInput\nimport time\n\nexampleInput = ['COM)B', 'B)C', 'C)D', 'D)E', 'E)F', 'B)G', 'G)H', 'D)I', 'E)J', 'J)K', 'K)L']\n\n\ndef countOrbits(taskInput):\n pathLists = [['COM']]\n\n steps = 0\n paths = 0\n output = 0\n\n for i in taskInput:\n const = i.split(')')[0]\n orbit = i.split(')')[1]\n\n appended = False\n \n for path in pathLists:\n if path[len(path)-1] == const:\n path.append(orbit)\n appened = True\n else:\n for p in path:\n if p == const:\n newPath = path[:path.index(p)]\n newPath.append(p)\n pathLists.append(newPath)\n appended = True\n \n if appended == False:\n pathLists.append([const, orbit])\n\n for path in pathLists:\n steps += len(path) - 1\n paths += 1\n\n output = (steps * 2) + paths\n\n for path in pathLists:\n print(path)\n\n return output\n\n\n## New idea ## Smashed it ##\n\ndef countToCOM(taskInput):\n t0 = time.time()\n output = 0\n\n for p in taskInput:\n steps = 1\n const = p.split(')')[0]\n\n while const != 'COM':\n for i in taskInput:\n if i.split(')')[1] == const:\n const = i.split(')')[0]\n\n steps += 1\n \n output += steps\n\n t1 = time.time()\n timeTaken = t1-t0\n return output, timeTaken\n\nout, time = countToCOM(orbitalInput)\n\nprint('Result is ' + str(out))\nprint('It took ' + str(time) + ' seconds to run.')","sub_path":"Day 6/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1804,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"298190405","text":"import csv\nimport pv_meter_dict\nimport geocoder as gc\nimport re\n\n# set the row numbers for the apporpriate column\nLOCATION_COL = 16\nMETER_ID_COL = 18\nLATITUDE_COL = 19\nLONGITUDE_COL = 20\nCLEAN_UP_COL = 21\nCLEAN_ADDRESS_COL = 22\n\nlist = [\"^AF\", \"^A/F\", \"^A\\\\\\F\", \"^A F \", \"^F/O\", \"^IFO\", \"^AL \", \"^AC \", \"^ACROSS FR\", \"^ACROSS FROM\", \"^ACF\", \"^ACR FR\", \"^A/O\", \"^R/O\", \"^A/O\", \"^IF \", \"^IRO \"]\n\n# takes an existing value from the lat/long column and refactors it into a valid lat/long\ndef refactorLatLong(row, outputWrite):\n row[LATITUDE_COL] = row[LATITUDE_COL][:2] + \".\" + row[LATITUDE_COL][2:]\n row[LONGITUDE_COL] = \"-\" + row[LONGITUDE_COL][:2] + \".\" + row[LONGITUDE_COL][2:]\n row[CLEAN_UP_COL] = \"pre-existing\"\n outputWrite.writerow(row)\n\n# matches the current row meter id to a meter id in the meter_id_dictionary\n# extracts the lat/long from the dictionary and writes the new row with the lat/longs from the dict\ndef matchMeterId(row, outputWrite, meter_dictionary):\n meter_id = row[METER_ID_COL].replace(\" \", \"\")\n if meter_id in meter_dictionary.keys():\n lat_long = meter_dictionary.get(meter_id)\n row[LATITUDE_COL] = lat_long[0]\n row[LONGITUDE_COL] = lat_long[1]\n row[CLEAN_UP_COL] = \"meter\"\n outputWrite.writerow(row)\n \ndef query(location):\n location = location + ' COLUMBUS OH'\n result = gc.bing(location, key=api_key)\n if result.json != None:\n arr = [result.json['lat'], result.json['lng']]\n return arr\n\ndef cleanAddress(row):\n\n global list\n\n location = row[LOCATION_COL]\n \n # removes the most common prepends from the location entry\n for item in list:\n location = re.sub(item, '', location)\n\n location = location.strip()\n # if a prepend is present in the location entry\n if location != row[LOCATION_COL]:\n\n row[CLEAN_ADDRESS_COL] = location\n row[CLEAN_UP_COL] = \"pre address\"\n \n if row[CLEAN_ADDRESS_COL] in bingDict:\n lats = bingDict[row[CLEAN_ADDRESS_COL]]\n else:\n lats = query(location)\n if lats != None:\n bingDict[str(row[CLEAN_ADDRESS_COL])] = lats\n\n if lats != None:\n row[LATITUDE_COL] = lats[0]\n row[LONGITUDE_COL] = lats[1]\n\n outputWrite.writerow(row)\n\n\ndef matchAddress(row):\n location = row[LOCATION_COL]\n location = location.strip()\n add_match = re.match(r\"^\\d+?[A-Za-z]*\\s\\w*\\s?\\w+?\\s\\w{2}\\w*\\s*\\w*$\",location)\n if add_match:\n row[CLEAN_UP_COL] = \"address\"\n if location in bingDict:\n lats = bingDict[location]\n else:\n lats = query(location)\n if lats != None:\n bingDict[location] = lats\n if lats != None:\n row[LATITUDE_COL] = lats[0]\n row[LONGITUDE_COL] = lats[1]\n outputWrite.writerow(row)\n else:\n # if the location doesn't match the regex pattern, write that row to the secondary output file\n output2Write.writerow(row)\n\n\n# API key for email: scosDataCleaner1@outlook.com\napi_key = 'AuXSgXwz1axgZDxXpwQUxYn6PkqMG2UZjll027z8cAF4ZXG_lYeULxb3YSVWCr31'\n\n# This is the Bing Lookup Dictionary\nbingDict = {}\ni = 0\n\n# create meter dictionary\nmeter_dictionary = pv_meter_dict.meter_dict()\n\n# input and output files should be update accordingly\n# input file can not have headers\ninputFile = 'needs_geocoded.csv'\noutputFile = 'output_needs_geocoded.csv'\noutput2File = 'output_leftovers.csv'\n\nwith open(inputFile, 'r') as csvFile:\n with open(outputFile, 'w') as outFile:\n with open (output2File, 'w') as out2File:\n\n inputRead = csv.reader(csvFile)\n outputWrite = csv.writer(outFile)\n output2Write = csv.writer(out2File)\n\n for row in inputRead:\n\n # tracks progress for large files\n i = i + 1\n if i % 20000 == 0:\n print(i)\n\n # if the row contains a lat/long, refactor it and write it to the output csv\n if int(row[LATITUDE_COL]) or int(row[LONGITUDE_COL]) != 0:\n refactorLatLong(row, outputWrite)\n continue\n \n # if the row contains a meter id, extract lat/long it from the meter_dict, write the row to the output csv\n meter_id = row[METER_ID_COL].replace(\" \", \"\")\n \n # if there is a value in the meter id column match that meter id to a lat/long from the dictionary\n if meter_id != '':\n matchMeterId(row, outputWrite, meter_dictionary)\n continue\n\n # remove the surrouning whitespace from the location entry\n row[LOCATION_COL] = str(row[LOCATION_COL]).strip()\n\n # remove prepends from the location entry (if applicable), and writes the row to the primary output\n cleanAddress(row)\n\n # if the location doesn't contain a prepend, this method looks for an \"address\" pattern\n # if the location entry matches the pattern, writes the row to primary output\n # otherwise the row is written to the secondary output\n if row[LATITUDE_COL] == '0':\n matchAddress(row)","sub_path":"ADD-107/geocode.py","file_name":"geocode.py","file_ext":"py","file_size_in_byte":5324,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"213648239","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Jul 7 11:46:56 2021\n\n@author: mugdhapolimera\n\"\"\"\n\n\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri Sep 18 09:41:34 2020\n\n@author: mugdhapolimera\n\nSelect Gemini observing sample to quantify starburst-AGN-merger connection in\ndwarfs\n\n1. Compact \n2. Extended\n3. Starbursting\n4. Moderately SFing\n5. Strong Emission lines?\n\"\"\"\n\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nfrom scipy.io.idl import readsav\nimport matplotlib as mpl\nlabel_size = 12\nmpl.rcParams['xtick.labelsize'] = label_size \nmpl.rcParams['ytick.labelsize'] = label_size \n\n\nthreshold = 10\nresfull = pd.read_csv(\"ECO_inobssample.csv\")\nresfull.index = resfull.name\n#inspring = (resfull.radeg > 8.75*15.) & (resfull.radeg < 15.75*15.)\ndwarf = (resfull.logmstar < threshold)\n\nrescat = readsav(\"eco_wresa_032918.dat\")\nrescat['name'] = rescat['econames']\n#rescatphot = readsav(\"resolvecatalogphot.dat\")\nresphot = pd.read_csv(\"ECO_barysample_photometrics.csv\")\nresphot.index = resphot.name\nresndx = [x for x in range(len(rescat.name)) if rescat.name[x] in list(resfull.name)]\nresphotndx = [x for x in range(len(resphot.name)) if resphot.name.iloc[x] in list(resfull.name)]\n\nresfull['logmbary'] = np.log10(10**resfull.logmstar + 10**resfull.logmgas)\nresfull['r50'] = rescat.r50[resndx]\nresfull['mur90'] = rescat.mur90[resndx]\nresfull['DEL_MU50'] = resphot.DEL_MU50[resphotndx]\nresfull['DEL_SFR'] = resphot.DEL_SFR[resphotndx]\nresfull['sfr_nuv_wise'] = resphot.SFR[resphotndx]\nresfull['logssfr'] = np.log10(resfull['sfr_nuv_wise']) + 9 - resfull.logmstar\nresfull['logsfr'] = np.log10(resfull['sfr_nuv_wise']) + 9 \n#resfull['BN'] = resphot.BN[resphotndx].astype('bool')\n\n#gem_red = ['rf0266','rf0284','rf0250','rf0045','rf0272','rf0370']\n#sami = list(pd.read_csv(\"SAMI_RESOLVE.csv\").name_1)\n#manga = list(pd.read_csv(\"MANGA_RESOLVE.csv\").name_1)\n#trans= ['rs1036','rs0022','rs1287','rs0014','rs0320','rs0537']\n#resfull = resfull[resfull.del_mu > 0]\n#resfull.loc['rs1456']['BN'] = 0\n\nres = resfull[dwarf & (resfull.logmbary > 9.2)] #inspring & inspring & \ndwarf = (res.logmstar < threshold)\n\nresflag = pd.read_csv(\"eco_emlineclass_dext_snr5_jhu.csv\")\nresflag.index = resflag.galname\nselagn = list(resflag.galname[resflag.sftoagn | resflag.agntosf | \\\n resflag.composite | resflag.defagn])\nsels = list(resflag.galname)\nressel = resfull.loc[sels]#[inspring]\nseldwarf = list(ressel[ressel.logmstar < 9.5].name)\nseldwarfagn = list(np.intersect1d(selagn, seldwarf))\nselim = list(ressel[(ressel.logmstar > 9.5) & (ressel.logmstar < 10)].name)\nselimagn = list(np.intersect1d(selagn, selim))\n#seldwarf = list(ressel[ressel.logmstar < threshold].name)\n#sel_trans = np.intersect1d(sels,trans)\n\n#barro = pd.read_csv(\"Barro_inobssample.csv\")\n#barro.index = barro.name \n#barro = barro.loc[sels]\n\n#sel_sami = np.intersect1d(sels,sami)\n#sel_manga = np.intersect1d(sels,manga)\n#sel_gem = np.intersect1d(sels,gem_red)\n\nhighsfr = list(ressel.name[ressel.logssfr > -0.5])\nbounds = [7.5,8.0,8.5,9., 9.5, 10.0]#np.arange(8.4,10.1,0.2)\n#cmap = plt.get_cmap('Spectral',6)#int(np.max(r)/0.5))\ncmap = mpl.colors.ListedColormap(['navy','navy','blue','mediumturquoise',\\\n 'darkgreen'])\nboundaries = bounds#np.array([0, 0.5, 1.0, 1.5, 2.0, 2.5, 3.0, 3.5])#, 4.0])#, 4.5, 5.0])\nnorm = mpl.colors.BoundaryNorm(boundaries, cmap.N, clip=True)\n\n#plt.figure()\n#plt.ylabel('log($\\Delta$SFR$_{MS}$)')\n#plt.xlabel('$\\Delta \\mu_\\Delta$')\n#plt.plot(barro.DEL_MUDEL.loc[sels], \\\n# barro.DEL_SFR.loc[sels], '.', color = 'gray', alpha = 0.5,\n# label = 'SEL')\n#plt.plot(barro.DEL_MUDEL.loc[seldwarf], \\\n# barro.DEL_SFR.loc[seldwarf], '.', color = 'blue', label = 'Dwarf SELs')\n##plt.plot(res.DEL_MUDEL[res.BN], res.DEL_SFR[res.BN], 'o', color = 'blue', \\\n## label = 'Blue Nuggets')\n#plt.plot(barro.DEL_MUDEL.loc[seldwarfagn], \\\n# barro.DEL_SFR.loc[seldwarfagn], 'o', color = 'blue', ms = 10, \n# label = 'Optical Dwarf SEL AGN')\n#plt.plot(barro.DEL_MUDEL.loc[selim], \\\n# barro.DEL_SFR.loc[selim], '.', color = 'green', \n# label = 'Intermediate Mass SEL')\n#plt.plot(barro.DEL_MUDEL.loc[selimagn], \\\n# barro.DEL_SFR.loc[selimagn], 'o', color = 'green', ms = 10,\n# label = 'Intermediate Mass SEL AGN')\n##plt.plot(barro.DEL_MUDEL.loc[highsfr], \\\n## barro.DEL_SFR.loc[highsfr], 'o', color = 'lime', \n## label = 'High SSFR SEL')\n#plt.legend(fontsize = 10)\n\n#temp = rs1081,'rs0337',''rs0933'\n#dwarfagntargets = ['rs0010','rs1047','rs1143','rs0909','rs1038','rs0472'] #'rs0421',\n#dwarfsftargets = ['rs1163','rs1014','rs1200','rs0432',\\\n# 'rs1091','rs1178']\n\n#imagntargets = ['rs1150','rs1004','rs0978','rs1036']\n#imsftargets = ['rs0087','rs0237','rs0096','rs0320']\n\nanno = 0\n#transition = ['rs0022', 'rs0320', 'rs1036']\n#targets = dwarfagntargets + dwarfsftargets + imagntargets + imsftargets #+ list(sel_trans)\npottarg = np.intersect1d(resfull.name,seldwarf+selim)\nfig,ax = plt.subplots()\n#plt.plot(barro.DEL_MU50.loc[targets], \\\n# barro.DEL_SFR.loc[targets], 'o', ms = 15, mec = 'r', mfc = 'none',\n# mew = 2, label = 'This Proposal Targets')\n#plt.legend(fontsize = 15)\ncax = ax.scatter(resfull.DEL_MU50.loc[seldwarf+selim],\n resfull.DEL_SFR.loc[seldwarf+selim], marker = 'o', s=10, \n c = ressel.logmstar.loc[seldwarf+selim], cmap = cmap, norm = norm,\n label = 'SEL')\ncax2 = ax.scatter(resfull.DEL_MU50.loc[seldwarfagn+selimagn], \n resfull.DEL_SFR.loc[seldwarfagn+selimagn], marker = 'o', s=100, \n c = ressel.logmstar.loc[seldwarfagn+selimagn], cmap = cmap, norm= norm)\n#cb2 = mpl.colorbar.ColorbarBase(ax, cmap=cmap,norm=norm)\n#cb2.set_label('stellar mass')\nfig.colorbar(cax,extend = 'min', ticks = bounds,boundaries = bounds)\nplt.ylabel('log($\\Delta$SFR$_{MS}$)',fontsize = 15)\nplt.xlabel('$\\Delta \\Sigma_e$',fontsize = 15)\n#plt.plot(barro.DEL_MU50.loc[sels], \\\n# barro.DEL_SFR.loc[sels], '.', color = 'gray', alpha = 0.5,\n# label = 'SEL')\n#plt.plot(barro.DEL_MU50.loc[seldwarf], \\\n# barro.DEL_SFR.loc[seldwarf], '.', color = 'blue', ms = 8, label = 'Dwarf SELs')\n##plt.plot(res.DEL_MU50[res.BN], res.DEL_SFR[res.BN], 'o', color = 'blue', \\\n## label = 'Blue Nuggets')\n#plt.plot(barro.DEL_MU50.loc[seldwarfagn], \\\n# barro.DEL_SFR.loc[seldwarfagn], 'o', color = 'blue', ms = 10, \n# label = 'Optical Dwarf SEL AGN')\n#plt.plot(barro.DEL_MU50.loc[selim], \\\n# barro.DEL_SFR.loc[selim], '.', color = 'green', ms = 8, \n# label = 'Intermediate Mass SEL')\n#plt.plot(barro.DEL_MU50.loc[selimagn], \\\n# barro.DEL_SFR.loc[selimagn], 'o', color = 'green', ms = 10,\n# label = 'Intermediate Mass SEL AGN')\n##plt.plot(barro.DEL_MU50.loc[sel_manga], \\\n## barro.DEL_SFR.loc[sel_manga], 'o', ms = 11, mec = 'gray', mfc = 'none',\n## mew = 2, label = 'SAMI/MaNGA/Gemini')\n#plt.plot(barro.DEL_MU50.loc[targets], \\\n# barro.DEL_SFR.loc[targets], 'o', ms = 11, mec = 'r', mfc = 'none',\n# mew = 2, label = 'This Proposal Targets')\n#plt.plot(barro.DEL_MU50.loc[trans], \\\n# barro.DEL_SFR.loc[trans], 'o', ms = 11, mec = 'magenta', mfc = 'none',\n# mew = 2, label = 'Transitional nuggets')\nif anno ==1:\n for i, txt in enumerate(pottarg):\n plt.annotate(txt, (resfull.DEL_MU50.loc[pottarg[i]], \\\n resfull.DEL_SFR.loc[pottarg[i]]), fontsize = 8)\n#plt.plot(barro.DEL_MU50.loc[highsfr], \\\n# barro.DEL_SFR.loc[highsfr], 'o', color = 'lime', \n# label = 'High SSFR SEL')\n\n#plt.plot(barro.DEL_MU50.loc[sel_sami], \\\n# barro.DEL_SFR.loc[sel_sami], 'o', ms = 10, mec = 'gray', mfc = 'none',\n# mew = 2)\n#plt.plot(barro.DEL_MU50.loc[sel_gem], \\\n# barro.DEL_SFR.loc[sel_gem], 'o', ms = 10, mec = 'gray', mfc = 'none', \n# mew = 2)\n\n\n#plt.figure()\n#plt.ylabel('log(M$_{bary}$/M$_\\odot$)')\n#plt.xlabel('log(M$_{halo}$/M$_\\odot$)')\n#plt.plot(ressel.logmh.loc[sels], \\\n# ressel.logmbary.loc[sels], '.', color = 'gray', label = 'SEL')\n#plt.plot(ressel.logmh.loc[seldwarf], \\\n# ressel.logmbary.loc[seldwarf], '.', color = 'blue', label = 'Dwarf SELs')\n##plt.plot(res.logmh[res.BN], res.logmbary[res.BN], 'o', color = 'blue', \\\n## label = 'Blue Nuggets')\n#plt.plot(ressel.logmh.loc[seldwarfagn], \\\n# ressel.logmbary.loc[seldwarfagn], 'o',ms = 10, color = 'blue', \n# label = 'Optical Dwarf SEL AGN')\n#plt.plot(ressel.logmh.loc[selim], \\\n# ressel.logmbary.loc[selim], '.', color = 'green', \n# label = 'Intermediate Mass SEL')\n#plt.plot(ressel.logmh.loc[selimagn], \\\n# ressel.logmbary.loc[selimagn], 'o', ms = 10,color = 'green', \n# label = 'Intermediate Mass SEL AGN')\n#plt.plot(ressel.logmh.loc[targets], ressel.logmbary.loc[targets], \\\n# 'o', ms = 11, mec = 'r', mfc = 'none', mew = 2,\n# label = 'This Proposal Targets')\n##plt.axhline(y=9.5, xmin=0.0, xmax=1.0, color='k')\n##plt.axhline(y=10, xmin=0.0, xmax=1.0, color='k', ls = '--')\n#plt.axvline(x=11.5, ymin=0.0, ymax=1.0, color='k')\n#plt.axvline(x=12.0, ymin=0.0, ymax=1.0, color='k', ls = '--')\n#plt.legend(fontsize = 10)\n\nressel['g_s'] = ressel.logmgas - ressel.logmstar\nfig,ax = plt.subplots()\n#plt.plot(ressel.logmstar.loc[targets], ressel.g_s.loc[targets], \\\n# 'o', ms = 11, mec = 'r', mfc = 'none', mew = 2,\n# label = 'This Proposal Targets')\nplt.axhline(y=0, xmin=0.0, xmax=1.0, color='k', ls = '--')\n#plt.axvline(x=9.5, ymin=0.0, ymax=1.0, color='k',ls = '--')\n#plt.legend(fontsize = 10)\ncax = ax.scatter(ressel.logmstar.loc[seldwarf+selim], ressel.g_s.loc[seldwarf+selim], marker = 'o', s=10, \n c = ressel.logmstar.loc[seldwarf+selim], cmap = cmap, norm = norm,\n label = 'SEL')\ncax2 = ax.scatter(ressel.logmstar.loc[seldwarfagn+selimagn], ressel.g_s.loc[seldwarfagn+selimagn], marker = 'o', s=100, \n c = ressel.logmstar.loc[seldwarfagn+selimagn], cmap = cmap, norm= norm)\n\nplt.ylabel('log(M$_{gas}$/M$_*$)')\nplt.xlabel('log(M$_{*}$/M$_\\odot$)')\nif anno == 1:\n for i, txt in enumerate(pottarg):\n plt.annotate(txt, (ressel.logmstar.loc[pottarg[i]], \\\n ressel.g_s.loc[pottarg[i]]), fontsize = 8)\n\n#plt.plot(ressel.logmstar.loc[sels], \\\n# ressel.g_s.loc[sels], '.', color = 'gray', label = 'SEL')\n#plt.plot(ressel.logmstar.loc[seldwarf], \\\n# ressel.g_s.loc[seldwarf], '.', color = 'blue', label = 'Dwarf SELs')\n##plt.plot(res.logmstar[res.BN], res.g_s[res.BN], 'o', color = 'blue', \\\n## label = 'Blue Nuggets')\n#plt.plot(ressel.logmstar.loc[seldwarfagn], \\\n# ressel.g_s.loc[seldwarfagn], 'o',ms = 10, color = 'blue', \n# label = 'Optical Dwarf SEL AGN')\n#plt.plot(ressel.logmstar.loc[selim], \\\n# ressel.g_s.loc[selim], '.', color = 'green', \n# label = 'Intermediate Mass SEL')\n#plt.plot(ressel.logmstar.loc[selimagn], \\\n# ressel.g_s.loc[selimagn], 'o', ms = 10,color = 'green', \n# label = 'Intermediate Mass SEL AGN')\n\n#bins = np.arange(7.7,11.5,0.2)\n#plt.figure()\n#plt.hist(ressel.logmstar.loc[sels], bins = bins, color = 'gray',label = 'SEL', density = True)\n#plt.hist(ressel.logmstar.loc[seldwarf], color = 'blue', label = 'Dwarf SELs', \n# density = True, histtype = 'step', linewidth = 2)\n#plt.hist(ressel.logmstar.loc[seldwarfagn], bins = bins, color = 'darkblue', \n# label = 'Optical Dwarf SEL AGN', density = True, histtype = 'step', \n# hatch = '\\\\', linewidth = 2)\n#plt.hist(ressel.logmstar.loc[selim],bins = bins, color = 'limegreen',\n# histtype = 'step', linewidth = 2,\n# label = 'Intermediate Mass SEL', density = True)\n#plt.hist(ressel.logmstar.loc[selimagn], bins = bins, color = 'darkgreen', \n# histtype = 'step', hatch = '/', linewidth = 2,\n# label = 'Intermediate Mass SEL AGN', density = True)\n#plt.hist(ressel.logmstar.loc[targets], bins = bins, color = 'red', \n# histtype = 'step', linewidth = 2,\n# label = 'This Proposal Targets', density = True)\n#plt.xlabel('log(M$_{*}$/M$_\\odot$)')\n#plt.ylabel('Relative frequency')\n#plt.legend()\n#\n#bins = np.arange(10.5,14.3,0.2)\n#plt.figure()\n#plt.hist(ressel.logmh.loc[sels], bins = bins, color = 'gray',label = 'SEL', density = True)\n#plt.hist(ressel.logmh.loc[seldwarf], color = 'blue', label = 'Dwarf SELs', \n# density = True, histtype = 'step', linewidth = 2)\n#plt.hist(ressel.logmh.loc[seldwarfagn], bins = bins, color = 'darkblue', \n# label = 'Optical Dwarf SEL AGN', density = True, histtype = 'step', \n# hatch = '\\\\', linewidth = 2)\n#plt.hist(ressel.logmh.loc[selim],bins = bins, color = 'limegreen',\n# histtype = 'step', linewidth = 2,\n# label = 'Intermediate Mass SEL', density = True)\n#plt.hist(ressel.logmh.loc[selimagn], bins = bins, color = 'darkgreen', \n# histtype = 'step', hatch = '/', linewidth = 2,\n# label = 'Intermediate Mass SEL AGN', density = True)\n#plt.hist(ressel.logmh.loc[targets], bins = bins, color = 'red', \n# histtype = 'step', linewidth = 2,\n# label = 'This Proposal Targets', density = True)\n#plt.xlabel('log(M$_{halo}$/M$_\\odot$)')\n#plt.ylabel('Relative Frequency')\n#plt.legend()\n\nssfr_st = 10**(np.log10(ressel.sfr_nuv_wise) - ressel.logmstar)\nssfr_lt = 1/(1+(1/(ressel.meanfsmgr)))\nfsmgr_st = 100*(10**6)*(ssfr_st)/(1-ssfr_st)/(0.1*1e9)\nfsmgr_lt = ressel.meanfsmgr\n\n\nfig,ax = plt.subplots()\n#plt.plot(fsmgr_st.loc[targets], fsmgr_lt.loc[targets], \\\n# 'o', ms = 15, mec = 'r', mfc = 'none', mew = 2,\n# label = 'This Proposal Targets')\n#plt.legend(fontsize = 15, loc = 'lower right')\ncax = ax.scatter(fsmgr_st.loc[seldwarf+selim], fsmgr_lt.loc[seldwarf+selim], marker = 'o', s=10, \n c = ressel.logmstar.loc[seldwarf+selim], cmap = cmap, norm = norm,\n label = 'SEL')\ncax2 = ax.scatter(fsmgr_st.loc[seldwarfagn+selimagn], fsmgr_lt.loc[seldwarfagn+selimagn], marker = 'o', s=100, \n c = ressel.logmstar.loc[seldwarfagn+selimagn], cmap = cmap, norm= norm,\n label = 'SEL')\n#cb2 = mpl.colorbar.ColorbarBase(ax, cmap=cmap,norm=norm)\n#cb2.set_label('stellar mass')\nfig.colorbar(cax,ax=[ax],extend = 'min', ticks = bounds,boundaries = bounds)\nxaxis = np.arange(np.min(fsmgr_st)-0.05, np.max(fsmgr_st)+0.05,0.01)\nyaxis = np.ones(len(xaxis))\n#plt.plot(xaxis, yaxis, 'k--', lw = 3)\nplt.xlim(np.min(fsmgr_st), np.max(fsmgr_st))\nplt.ylim(np.min(fsmgr_lt), np.max(fsmgr_lt))\n#plt.text(0.0005, 1.25, r'Stellar Mass Doubled in last Gyr', \n# fontsize=14, color='k')\nplt.yscale('log')\nplt.xscale('log')\nplt.xlabel(r'Short Term SFH', fontsize = 18)\n# $\\left(\\frac{M_*(<100 Myr)}{M_*(>100 Myr)}\\right)$',\nplt.ylabel(r'Long Term SFH ',fontsize = 18)\nfit = np.poly1d(np.polyfit(np.log10(fsmgr_st), \\\n np.log10(fsmgr_lt), 1))\ny = np.log10(xaxis)*fit[1] + fit[0]\n#plt.plot(xaxis,10**y,ls='--',c='k')\n#$\\left(\\frac{M_*(<1 Gyr)}{M_*(>1 Gyr)}\\right)$'\n################################\n#\n# Define demarcation functions\n#\n\n#def o3hbcomposite(log_NII_HA):\n# return (0.61 / (log_NII_HA - 0.05)) + 1.3\n#\n#def o3hbmain(log_NII_HA):\n# return (0.61 / (log_NII_HA - 0.47)) + 1.19\n#\n#def o1hamain(log_OI_HA): #main line for OI/H-alpha from equation 3, Kewley 2006\n# return 1.33 + (0.73 / (log_OI_HA + 0.59))\n#def o1hacrit(log_OI_HA): #boundary for OI/H-alpha\n# return -0.59\n#def s2hamain(log_SII_HA): #main line for SII/H-alpha from equation 2, Kewley 2006\n# return 1.30 + (0.72 / (log_SII_HA - 0.32))\n\ndef n2hacompmin(log_NII_HA): #composite minimum line from equation 1, Kewley 2006\n return 1.3 + (0.61 / (log_NII_HA - 0.05))\ndef n2hamain(log_NII_HA): #main line for NII/H-alpha from equation 5, Kewley 2006\n return 1.19 + (0.61 / (log_NII_HA - 0.47))\ndef s2hamain(log_SII_HA): #main line for SII/H-alpha from equation 2, Kewley 2006\n return 1.30 + (0.72 / (log_SII_HA - 0.32))\ndef s2halinseyf(log_SII_HA): #liner/seyfert divider for SII/H-alpha\n return 0.76 + 1.89*log_SII_HA\ndef o1hamain(log_OI_HA): #main line for OI/H-alpha from equation 3, Kewley 2006\n return 1.33 + (0.73 / (log_OI_HA + 0.59))\ndef o1halinseyf(log_OI_HA): #liner/seyfert divider for OI/H-alpha\n return 1.3 + 1.18*log_OI_HA\ndef o1hacrit(log_OI_HA): #boundary for OI/H-alpha\n return -0.59\n\nrefn2ha = np.linspace(-3.0, 0.35)\nrefoiha = np.linspace(-2.5, -0.4)\nrefsiiha = np.linspace(-2, 0.3,100)\nmain_sii = s2hamain(refsiiha)\nmain_oi = o1hamain(refoiha)\n\nf, (sp1, sp2, sp3) = plt.subplots(1,3, sharey = True)\n#bounds = [7.8,8.4, 8.7, 9. , 9.3, 9.6, 10.0]#np.arange(8.4,10.1,0.2)\n##cmap = plt.get_cmap('Spectral',6)#int(np.max(r)/0.5))\n#cmap = mpl.colors.ListedColormap(['navy','navy','blue','lightsteelblue','mediumturquoise',\\\n# 'darkgreen'])\n#boundaries = bounds#np.array([0, 0.5, 1.0, 1.5, 2.0, 2.5, 3.0, 3.5])#, 4.0])#, 4.5, 5.0])\n#norm = mpl.colors.BoundaryNorm(boundaries, cmap.N, clip=True)\ngrp_color = ['blue','blue','green','green','red']\n#grps = [seldwarf,seldwarfagn,selim,selimagn,targets]\ngrps = [seldwarf+selim,seldwarfagn+selimagn,targets]\ngrp_size = [10,100,15]\ngrp_marker = ['o','o','o']#,'o','o']\ngrp_label = ['Dwarf SELs','Dwarf SEL AGN','Proposal Targets']\na = 1\nniiha = np.log10(ressel['nii_6584_flux']/ressel['h_alpha_flux'])\nSII = ressel['sii_6731_flux'] + ressel['sii_6717_flux']\nsiiha = np.log10(SII/ressel['h_alpha_flux'])\noiha = np.log10(ressel['oi_6300_flux']/ressel['h_alpha_flux'])\noiiihb = np.log10(ressel['oiii_5007_flux']/ressel['h_beta_flux'])\n\nfor i in range(len(grps)):\n if i == 2:\n sp1.plot(niiha.loc[grps[i]],oiiihb.loc[grps[i]], 'o',c=grp_color[i], ms= grp_size[i],\n mfc = 'none',mec = 'r',mew = 2, label = grp_label[i])\n sp2.plot(siiha.loc[grps[i]],oiiihb.loc[grps[i]], 'o',c=grp_color[i], ms= grp_size[i],mfc = 'none',mec = 'r',mew = 2, \n label = grp_label[i])\n sp3.plot(oiha.loc[grps[i]],oiiihb.loc[grps[i]], 'o',c=grp_color[i], ms= grp_size[i],mfc = 'none',mec = 'r',mew = 2)\n else: \n sp1.scatter(niiha.loc[grps[i]],oiiihb.loc[grps[i]], marker = grp_marker[i], s= grp_size[i], \n alpha = a,c = ressel.logmstar.loc[grps[i]],\n cmap = cmap, norm= norm, label = grp_label[i])\n sp2.scatter(siiha.loc[grps[i]],oiiihb.loc[grps[i]], marker = grp_marker[i], s= grp_size[i], \n alpha = a,c = ressel.logmstar.loc[grps[i]],\n cmap = cmap, norm= norm)\n sp3.scatter(oiha.loc[grps[i]].loc[grps[i]],oiiihb.loc[grps[i]], marker = grp_marker[i], s= grp_size[i], \n alpha = a,c = ressel.logmstar.loc[grps[i]],\n cmap = cmap, norm= norm)\n#for i in range(len(grps)):\n# niiha = np.log10(ressel.loc[grps[i]]['nii_6584_flux']/ressel.loc[grps[i]]['h_alpha_flux'])\n# SII = ressel.loc[grps[i]]['sii_6731_flux'] + ressel.loc[grps[i]]['sii_6717_flux']\n# siiha = np.log10(SII/ressel.loc[grps[i]]['h_alpha_flux'])\n# oiha = np.log10(ressel.loc[grps[i]]['oi_6300_flux']/ressel.loc[grps[i]]['h_alpha_flux'])\n# oiiihb = np.log10(ressel.loc[grps[i]]['oiii_5007_flux']/ressel.loc[grps[i]]['h_beta_flux'])\n# \n# if i == 4:\n# sp1.plot(niiha,oiiihb, 'o',c=grp_color[i], ms= grp_size[i],mfc = 'none',mec = 'r',mew = 2)\n# sp2.plot(siiha,oiiihb, 'o',c=grp_color[i], ms= grp_size[i],mfc = 'none',mec = 'r',mew = 2, \n# label = grp_label[i])\n# sp3.plot(oiha,oiiihb, 'o',c=grp_color[i], ms= grp_size[i],mfc = 'none',mec = 'r',mew = 2)\n# else: \n# sp1.scatter(niiha,oiiihb, c=grp_color[i],marker = grp_marker[i], s= grp_size[i], alpha = 0.5)\n# sp2.scatter(siiha,oiiihb, c=grp_color[i],marker = grp_marker[i], s= grp_size[i], alpha = 0.5,\n# label = grp_label[i])\n# sp3.scatter(oiha,oiiihb, c=grp_color[i],marker = grp_marker[i], s= grp_size[i], alpha = 0.5)\nsp1.plot(refn2ha, n2hamain(refn2ha),'k',zorder = 0)\nsp1.plot(refn2ha[refn2ha < 0], n2hacompmin(refn2ha[refn2ha < 0]),'k--',zorder = 0)\nsp2.plot(refsiiha, s2hamain(refsiiha),'k',zorder = 0)\nsp2.plot(refsiiha[refsiiha > -0.31], s2halinseyf(refsiiha[refsiiha > -0.31]),\n 'k-.',zorder = 0)\n \nsp2.set_xlabel(r'$\\rm log([$SII$]$ 6717 + 6731/H$\\alpha)$',fontsize=22)\nsp3.plot(refoiha[refoiha < -0.7], o1hamain(refoiha[refoiha < -0.7]),'k', \n zorder = 0)\nsp3.plot(refoiha[refoiha > -1.13], o1halinseyf(refoiha[refoiha > -1.13]),\n 'k-.',zorder = 0)\n\nsp3.set_xlabel(r'$\\rm log ([$OI$]$ 6300/H$\\alpha$)',fontsize=22)\nsp1.set_xlabel(r'$\\rm log([$N II$]$ 6584 / H$\\alpha)$',fontsize=22)\nsp1.set_ylabel(r'$\\rm log([$O III$]$ 5007 / H$\\beta$)',fontsize=22)\n#sp1.legend(loc = 'lower left', fontsize = 12)\nxmin, xmax = -2.00001, 0.30001\nymin, ymax = -1.5, 1.5\nsp1.set_xlim(xmin,xmax)\nsp1.set_ylim(ymin,ymax)\nxmin, xmax = -2.00001, 0.50001\nsp2.set_xlim(xmin,xmax)\nxmin, xmax = -2.50001, -0.40001\nsp3.set_xlim(xmin,xmax)\nif anno ==1:\n for i, txt in enumerate(pottarg):\n sp1.annotate(txt, (niiha.loc[pottarg[i]], \\\n oiiihb.loc[pottarg[i]]), fontsize = 8)\n#selndx = [x for x in range(len(rescat.name)) if rescat.name[x] in sels]\n#targndx = [x for x in range(len(rescat.name)) if rescat.name[x] in targets]\n#ressel['rmag'] = 0\n#ressel.rmag.loc[sels] = np.array(resphotcat.rmag[selndx], dtype = np.float) \n#targ_table = pd.DataFrame({'Name': np.array(targets),\n# 'RA': ressel.radeg.loc[targets],\n# 'Dec': ressel.dedeg.loc[targets],\n# 'r': ressel.rmag.loc[targets],\n# 'r_sys': ['AB']*len(targets)})\n#targ_table.to_csv(\"Gemini2020A_targets.csv\",index = False)","sub_path":"starburst_AGN_sample_eco.py","file_name":"starburst_AGN_sample_eco.py","file_ext":"py","file_size_in_byte":21793,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"567303368","text":"from netmiko import Netmiko\r\nfrom tkinter import *\r\nfrom concurrent.futures import ThreadPoolExecutor\r\nfrom itertools import repeat\r\nimport netmiko.ssh_exception\r\nimport logging\r\nimport os\r\nimport time\r\nimport argparse\r\n\r\n#sets cli parameters\r\ndef cliparse():\r\n\r\n cliparser = argparse.ArgumentParser(description = 'Cisco config analyzer')\r\n cliparser.add_argument('-d', metavar='devices', type=str, default='hosts',\r\n help='Specify the file with list of hosts. Default filename is \"hosts\". Specifying hosts explicitly takes precedence over file')\r\n cliparser.add_argument('-t', metavar='threads', type=int, \r\n help='Specify number of threads while connecting to other devices', default=1)\r\n cliparser.add_argument('-f', metavar='file', type=str, default='commands',\r\n help='Specify the file with list of commands. Default filename is \"commands\". Specifying the key \"-c\" takes precedence over the file')\r\n cliparser.add_argument('-l', metavar='level', type=str, default='INFO',\r\n help='Specify severity level (DEBUG, INFO, WARNING, ERROR, CRITICAL)')\r\n args=cliparser.parse_args()\r\n args.hosts = [line.rstrip('\\n') for line in open(args.d)]\r\n args.c = [line.rstrip('\\n') for line in open(args.f)]\r\n return args\r\n\r\n#connecting to the single device, using deviceIP IP, creds{'uname':username, 'pwd':password}, logger object, cli command\r\n#connects to the device, sends single command, writes output to file and returns result \r\ndef parser_sh_cdp(output) :\r\n peers = 0\r\n if output.startswith(\"% CDP is not enabled\") :\r\n cdp = \"CDP is OFF\"\r\n peers = 0\r\n else :\r\n cdp = \"CDP is ON\"\r\n peers = output.count(\"Device ID\")\r\n return(\"%s,%s peers\" % (cdp, peers))\r\ndef parser_sh_ntp(output) :\r\n ntp_list = output.split(\",\")\r\n return(\"%s-%s\" % (ntp_list[0],ntp_list[2]))\r\ndef parser_sh_inv(output, SN) :\r\n inv_list = output.split(\"\\n\\n\")\r\n invent = []\r\n PID = \"\"\r\n for inv in inv_list :\r\n search1 = re.search('NAME: (\".*\"), DESCR: (\".*\")',inv) \r\n search2 = re.search('PID: (\\S+) *, VID: (.*), SN: (\\S+)',inv) \r\n if search1 and search2 :\r\n invent = search1.groups() + search2.groups()\r\n if invent[4] == SN :\r\n PID = invent[2]\r\n break\r\n return(PID)\r\ndef parser_ver_ios(output) :\r\n SN = ver_ios = \"\"\r\n if re.search(\".*Cisco IOS Software,.*\\((\\S+)\\),\", output) :\r\n ver_ios = re.search(\".*Cisco IOS Software,.*\\((\\S+)\\),\", output).group(1)\r\n if re.search(\".*Processor board ID (\\S+)\", output) :\r\n SN = re.search(\".*Processor board ID (\\S+)\", output).group(1)\r\n return(SN,ver_ios)\r\ndef sshconnect (deviceIP, creds, logger, cli, date_string):\r\n uname=creds['uname']\r\n pwd=creds['pwd']\r\n rezult = \"\"\r\n SN = \"\"\r\n logger.info(\"connecting to %s...\", deviceIP)\r\n try:\r\n net_connect = Netmiko(\r\n ip=deviceIP,\r\n username=uname,\r\n password=pwd,\r\n device_type=\"cisco_ios\",\r\n )\r\n logger.info(\"connected to %s\", deviceIP) \r\n if net_connect:\r\n prompt = net_connect.find_prompt()\r\n rez_cdp = rez_ntp = PE_NPE = ver_ios = PID = \"\"\r\n for command in cli:\r\n command_lower = command.lower()\r\n filename_part3 = command_lower.split(\"|\")[0] \r\n logger.info(\"sending %s to %s\" % (command, deviceIP)) \r\n if re.search(\"sh\\S* run\\S*\", command_lower) :\r\n file_output=open(\"data\\%s-%s.txt\" % (prompt.replace(\"#\",\"\"), date_string), \"w\")\r\n else : \r\n file_output=open(\"data\\%s-%s - %s.txt\" % (prompt.replace(\"#\",\"\"), date_string, filename_part3), \"w\")\r\n if command_lower.startswith(\"sh\") :\r\n output = net_connect.send_command(command)\r\n else :\r\n output = net_connect.send_config_set(command)\r\n if command_lower.find(\"cdp\") > 0 :\r\n rez_cdp = parser_sh_cdp(output)\r\n elif command_lower.find(\"ntp\") > 0 :\r\n rez_ntp = parser_sh_ntp(output)\r\n elif command_lower.find(\"ver\") > 0 :\r\n PE_NPE = \"PE_NPE\"\r\n SN,ver_ios = parser_ver_ios(output)\r\n if ver_ios.find(\"NPE\") > 0 :\r\n PE_NPE = \"NPE\"\r\n else :\r\n PE_NPE = \"PE\"\r\n elif command_lower.find(\"inv\") > 0 :\r\n PID = parser_sh_inv(output,SN)\r\n file_output.write(output)\r\n file_output.close()\r\n rezult = \"%s|%s|%s|%s|%s|%s|%s\" % (prompt.replace(\"#\",\"\"),PID,SN,ver_ios,PE_NPE,rez_cdp,rez_ntp)\r\n net_connect.disconnect()\r\n return rezult\r\n except Exception:\r\n print (\"Cannot connect to the device %s\" % deviceIP)\r\n logger.error(\"Cannot connect to the device %s\" % deviceIP)\r\n return\r\n\r\n#connecting to several devices using multithreading \r\ndef retrieve_result(hosts, creds, logger, cli, date_string, thlimit=1):\r\n with ThreadPoolExecutor(max_workers=thlimit) as executor:\r\n result = executor.map(sshconnect, hosts, repeat(creds), repeat(logger), repeat(cli), repeat(date_string))\r\n return list(result)\r\n \r\n#checking folders\r\ndef script_init(folder): \r\n if not os.path.exists(\"log\"):\r\n os.makedirs(\"log\")\r\n if not os.path.exists(\"data\"):\r\n os.makedirs(\"data\")\r\n\r\n \r\n#setting initial logging parameters \r\ndef log_settings(loggername, filename, severity): \r\n logger = logging.getLogger(loggername)\r\n \r\n logger.setLevel(logging.INFO)\r\n logfh=logging.FileHandler(\"log/%s.txt\" % filename)\r\n logformatter = logging.Formatter('%(asctime)s: %(name)s: %(levelname)s %(message)s')\r\n logfh.setFormatter(logformatter)\r\n logger.addHandler(logfh)\r\n return logger\r\n","sub_path":"netcheck.py","file_name":"netcheck.py","file_ext":"py","file_size_in_byte":6046,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"9652632","text":"import re\nfrom speak_and_listen import speak,listen\n\n\n\n\n\ndef identify_name(text):\n name = None\n patterns = [\"me llamo ([A-Za-z]+)\", \"yo soy ([A-Za-z]+)\", \"^([A-Za-z]+)$\"]\n #for pattern in patterns: \n for pattern in patterns:\n try:\n name = re.findall(pattern, text)[0]\n print(name)\n except IndexError:\n pass \n return name\n\n\n\n\ndef main():\n speak(\"Hola, ¿Cómo te llamas?, dime me llamo y tu nombre, o yo soy y tu nombre, o solo tu nombre, cuando veas ESCUCHANDO... da un segundo y hablas\")\n text = listen()\n name = identify_name(text)\n if name:\n speak(\"Encantado de conocerte, {}\".format(name))\n else:\n speak(\"Pues mira, la verdad no te he entenido\")\n \n\n\n\n \n\n\n\n\n\nif __name__ == \"__main__\":\n main()","sub_path":"Ejercicios/Modulo_III/asistente_de_voz.py","file_name":"asistente_de_voz.py","file_ext":"py","file_size_in_byte":799,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"569765294","text":"# -*- coding: utf-8 -*-\n\nimport os.path\nfrom django.conf import settings\nfrom django.shortcuts import render_to_response\nfrom django.template import RequestContext\nfrom django.http import HttpResponse\n\nURL = settings.DEPLOY_URL\nSTATIC = settings.STATIC_DIR\n\ndef index(request):\n c=RequestContext(request,{})\n return render_to_response('index.html', c)\n\ndef recordVotable(request):\n if request.method == 'POST':\n votable = request.POST.get('table', '')\n votable_id = request.POST.get('table_id')\n outfile = STATIC+'/votables'\n f = open(os.path.join(outfile,votable_id+'.xml'), 'w')\n f.write(''+votable)\n f.close()\n #return HttpResponse(settings.STATIC_URL+'votables/'+votable_id)\n return HttpResponse(URL+'votables/'+votable_id+'.xml')\n\n","sub_path":"webtools/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":831,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"587859255","text":"\"\"\"\nGenerating Steganography images using Adversarial attacks and comparing impact of data loss on recovery rate\nusing different models\n\"\"\"\n\n\n\nimport sys\nsys.path.append(\"./\")\nfrom experiments import logger, RANDOM_SEED\n\nimport os\n\nimport json\n\nimport numpy as np\nimport matplotlib\nimport matplotlib.pyplot as plt\nfrom matplotlib.ticker import PercentFormatter\nfrom scipy import stats\n\n\n\nkeras_values = {\"rotate_recovery\":0.5192,\"crop_recovery\":0.6217,\"upscale_recovery\":0.5513,\"downscale_recovery\":0.9872,\"color_depth_recovery\":0.4487,\"compress90_recovery\":0.8782,\"compress75_recovery\":0.5897,\"compress50_recovery\":0.35897}\nkeras_values = {**keras_values, \"ssim\":6.02e-2,\"lpips\":3.34e-3}\ndef runa(experiment_time=\"1571050357\"):\n f = json.load(open(\"./experiments/results/{}.json\".format(experiment_time)))\n \n models = []\n rotate_recovery =[]\n crop_recovery =[]\n upscale_recovery =[]\n compress_recovery =[]\n \n for i, combination in enumerate(f):\n rotate_recovery.append(combination[\"rotate_recovery\"])\n crop_recovery.append(combination[\"crop_recovery\"])\n upscale_recovery.append(combination[\"upscale_recovery\"])\n compress_recovery.append(combination[\"compress_recovery\"])\n models.append(combination[\"model\"])\n\n rotate_recovery = np.array(rotate_recovery)\n crop_recovery = np.array(crop_recovery)\n upscale_recovery = np.array(upscale_recovery)\n compress_recovery = np.array(compress_recovery)\n\n suffix = \"recovery rate ({} models)\".format(len(f))\n n_bins = 20\n m = 0\n mx = 1\n metrics = {\"rotate_recovery\":(\"Post Rotation\",rotate_recovery),\"crop_recovery\":(\"Post Cropping\",crop_recovery),\"upscale_recovery\":(\"Post Upscaling\",upscale_recovery),\"compress75_recovery\":(\"Post Compression (quality=75)\",compress_recovery)}\n \n #metrics = {\"ssim\":(\"SSIM\",ssim),\"lpips\":(\"LPIPS\",lpips),\"psnr\":(\"PSNR\",psnr),\"color_depth_recovery\":(\"Post Color Depth Reduction\",color_recovery),\"compress50_recovery\":(\"Post Compression (quality=50)\",compress50_recovery),\"compress90_recovery\":(\"Post Compression (quality=90)\",compress90_recovery)}\n \n plot_metrics(metrics, suffix)\n\n # plt.figure()\n # plt.title(\"Post Rotation {}\".format(suffix))\n # plt.hist(rotate_recovery, n_bins, weights=np.ones(len(rotate_recovery)) / len(rotate_recovery),rwidth=0.85)\n # plt.gca().yaxis.set_major_formatter(PercentFormatter(1))\n # plt.xlabel('Recovery rate')\n # plt.ylabel('Count')\n\n # plt.figure()\n # plt.title(\"Post Cropping {}\".format(suffix))\n # plt.hist(crop_recovery, n_bins, weights=np.ones(len(rotate_recovery)) / len(rotate_recovery))\n # plt.gca().yaxis.set_major_formatter(PercentFormatter(1))\n # plt.xlabel('Recovery rate')\n # plt.ylabel('Count')\n\n # plt.figure()\n # plt.title(\"Post Upscaling {}\".format(suffix))\n # plt.hist(upscale_recovery, n_bins, weights=np.ones(len(rotate_recovery)) / len(rotate_recovery))\n # plt.gca().yaxis.set_major_formatter(PercentFormatter(1))\n # plt.xlabel('Recovery rate')\n # plt.ylabel('Count')\n \n\n # plt.figure()\n # plt.title(\"Post Compression (quality=75) {}\".format(suffix))\n # n,bins,edges = plt.hist(compress_recovery, n_bins, weights=np.ones(len(rotate_recovery)) / len(rotate_recovery), color='#0504aa',\n # alpha=0.7, rwidth=0.9,edgecolor='k', cumulative=True)\n # plt.grid(axis='y', alpha=0.75)\n # plt.gca().yaxis.set_major_formatter(PercentFormatter(1))\n # plt.xlabel('Recovery rate')\n # plt.ylabel('Count')\n # m = min(compress_recovery)\n # mx = max(compress_recovery)\n # plt.xlim(m,mx)\n # b = bins+1/n_bins/2\n # plt.xticks(b,np.around(b,2))\n\n plt.show()\n\n\ndef boxplot(f, label, labels):\n fig, ax = plt.subplots()\n\n #f = f[:2]\n #labels = labels[:2]\n adv_map = np.transpose(np.array(f))\n adv_map = adv_map/adv_map.max()\n ax.boxplot(adv_map, labels=labels)\n ax.set_title(\"{} inputs\".format(label))\n \n fig.tight_layout()\n\n\ndef plot_metrics(metrics, suffix):\n\n n_bins = 20\n m = 0\n mx = 1\n\n for k, (title, tbl) in metrics.items():\n plt.figure()\n\n indices = np.arange(len(tbl))\n #plt.scatter(indices,tbl)\n\n n,bins,edges = plt.hist(tbl, np.arange(0,n_bins+1)/n_bins, weights=np.ones(len(tbl)) / len(tbl), color='#0504aa',\n alpha=0.7, rwidth=0.9,edgecolor='k', cumulative=True)\n\n\n plt.grid(axis='y', alpha=0.75)\n\n if k==\"ssim\" or k==\"lpips\" or k==\"psnr\":\n plt.title(\"{}\".format(title))\n plt.ylabel('Loss value')\n plt.xlabel('Model')\n plt.xticks(indices, [\"\"]*len(tbl))\n\n b = bins\n lbls = np.around(b,2)\n plt.xticks(b,lbls)\n \n else:\n plt.title(\"{} {}\".format(title,suffix))\n plt.gca().yaxis.set_major_formatter(PercentFormatter(1))\n plt.xlabel('Recovery rate')\n plt.xlim(m,mx)\n plt.ylabel('Count')\n \n \n\n keras_value = keras_values.get(k)\n if keras_value:\n plt.axvline(x=keras_value, c='r')\n \n \n\n plt.show()\n\ndef runb(experiment_time=\"1572371338\"):\n f = json.load(open(\"./experiments/results/{}.json\".format(experiment_time)))\n \n models = []\n color_recovery =[]\n downscale_recovery =[]\n compress50_recovery =[]\n compress90_recovery =[]\n lpips = []\n ssim = []\n psnr = []\n \n\n for i, combination in enumerate(f):\n color_recovery.append(combination[\"color_depth_recovery\"])\n downscale_recovery.append(combination[\"downscale_recovery\"])\n compress50_recovery.append(combination[\"compress50_recovery\"])\n compress90_recovery.append(combination[\"compress90_recovery\"])\n\n lpips.append(combination[\"lpips\"])\n ssim.append(combination[\"ssim\"])\n psnr.append(combination[\"psnr\"])\n\n models.append(combination[\"model\"])\n\n color_recovery = np.array(color_recovery)\n downscale_recovery = np.array(downscale_recovery)\n compress50_recovery = np.array(compress50_recovery)\n compress90_recovery = np.array(compress90_recovery)\n\n tbl = lpips\n indices = np.arange(len(tbl))\n plt.scatter(indices,tbl,s=45)\n keras_value = keras_values.get(\"lpips\")\n if keras_value:\n plt.axhline(y=keras_value, c='r')\n plt.title(\"LPIPS\")\n plt.ylabel('Loss value')\n plt.xlabel('Model')\n plt.xticks(indices, [\"\"]*len(tbl))\n plt.show()\n return \n # boxplot([np.array(psnr)/100,np.array(lpips), np.array(ssim)],\"Similarity metrics\", (\"psnr\",\"lpips\",\"ssim\"))\n # plt.show()\n #return\n\n suffix = \"recovery rate ({} models)\".format(len(f))\n n_bins = 20\n m = 0\n mx = 1\n metrics = {\"color_depth_recovery\":(\"Post Color Depth Reduction\",color_recovery),\"compress50_recovery\":(\"Post Compression (quality=50)\",compress50_recovery),\"compress90_recovery\":(\"Post Compression (quality=90)\",compress90_recovery)}\n #metrics = {\"ssim\":(\"SSIM\",ssim),\"lpips\":(\"LPIPS\",lpips),\"psnr\":(\"PSNR\",psnr),**metrics}\n \n #metrics = {\"compress90_recovery\":(\"Post Compression (quality=90)\",compress90_recovery)}\n plot_metrics(metrics, suffix)\n return \n\n\n\n\ndef runc(experiment_time=\"1572371338\"):\n f = json.load(open(\"./experiments/results/experimentSP9c/{}.json\".format(experiment_time)))\n \n models = []\n decoding_recovery =[e[\"decoding_recovery\"] for e in f][:100]\n\n plt.figure()\n title = \"Decoding rate by third party models ({} models)\".format(len(decoding_recovery))\n plt.title(title)\n n_bins = 20\n n,bins,edges = plt.hist(decoding_recovery, n_bins, weights=np.ones(len(decoding_recovery)) / len(decoding_recovery), color='#0504aa',\n alpha=0.7, rwidth=0.9,edgecolor='k', cumulative=True)\n plt.grid(axis='y', alpha=0.75)\n plt.gca().yaxis.set_major_formatter(PercentFormatter(1))\n plt.xlabel('Decoding rate')\n plt.ylabel('Count')\n m = min(decoding_recovery)\n mx = max(decoding_recovery)\n plt.xlim(m,mx)\n b = bins+1/n_bins/2\n plt.xticks(b,np.around(b,2))\n\n plt.show()\n\n\nif __name__ == \"__main__\":\n # runa(experiment_time=\"experimentSP9/1571050357 - Copie\")\n # runa(\"experimentSP9/merged\")\n # runb(experiment_time=\"experimentSP9b/merged\")\n runc()","sub_path":"experiments/experiment9_analysis.py","file_name":"experiment9_analysis.py","file_ext":"py","file_size_in_byte":8281,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"390402384","text":"import pygame\r\n\r\nfrom affects.invincibility import Invincibility\r\nfrom entities import CollidableEntity\r\nfrom imaging import Animated, Animation\r\nfrom vectors import VectorXY\r\nfrom weapons.pistol import Pistol\r\n\r\nclass Player(CollidableEntity, Animated):\r\n def __init__(self, x, y, length, height):\r\n self.startingPosition = (x, y)\r\n\r\n CollidableEntity.__init__(self, x, y, length, height)\r\n self.adjustHitBox(17, 11, -(length - 29), -(height - 52))\r\n\r\n self.standingImage = pygame.image.load(\"assets/images/player/standing.png\")\r\n\r\n Animated.__init__(self)\r\n\r\n # Walking information and visuals.\r\n self.walkingVelocity = 5\r\n self.walkingLeftAnimation = Animation.fromDirectory(\"assets/images/player/walkingLeft\", 3)\r\n self.walkingRightAnimation = Animation.fromDirectory(\"assets/images/player/walkingRight\", 3)\r\n\r\n # TODO: Convert jumping into an affect.\r\n self.isJump = False\r\n self.jumpCount = 10\r\n\r\n self.weapon = Pistol(3)\r\n self.weapon.pointRight() # Weapons initially face right, towards the enem(y|ies).\r\n\r\n self.velocity = VectorXY(0, 0)\r\n self.score = 0\r\n\r\n self.affects = list()\r\n\r\n def turnLeft(self):\r\n self.velocity.x = -self.walkingVelocity\r\n self.setAnimation(self.walkingLeftAnimation)\r\n self.weapon.pointLeft()\r\n\r\n def turnRight(self):\r\n self.velocity.x = self.walkingVelocity\r\n self.setAnimation(self.walkingRightAnimation)\r\n self.weapon.pointRight()\r\n\r\n def stop(self):\r\n self.velocity.x = 0\r\n self.setAnimation(None)\r\n\r\n def move(self):\r\n # TODO: Figure out how to do 'self.position += self.velocity'.\r\n self.x += self.velocity.x\r\n self.y += self.velocity.y\r\n\r\n if not self.minimumXPosition is None and self.x < self.minimumXPosition:\r\n self.x = self.minimumXPosition\r\n\r\n if not self.maximumXPosition is None and self.maximumXPosition < self.x:\r\n self.x = self.maximumXPosition\r\n\r\n if not self.minimumYPosition is None and self.y < self.minimumYPosition:\r\n self.y = self.minimumYPosition\r\n\r\n if not self.maximumYPosition is None and self.maximumYPosition < self.y:\r\n self.y = self.maximumYPosition\r\n\r\n def jump(self):\r\n startedNewJump = False\r\n\r\n if not self.isJump:\r\n self.isJump = True\r\n startedNewJump = True\r\n\r\n return startedNewJump\r\n\r\n def shoot(self):\r\n bulletFired = None\r\n\r\n # Only shoot if the weapon is available.\r\n if not self.weapon is None:\r\n # Shoot from the center of the player.\r\n position = self.x + self.halfLength, self.y + self.halfHeight\r\n\r\n bulletFired = self.weapon.fire(position, self)\r\n\r\n return bulletFired\r\n\r\n def update(self):\r\n self.updateAnimation()\r\n\r\n if self.isJump:\r\n if -10 <= self.jumpCount:\r\n neg = 1\r\n if self.jumpCount < 0:\r\n neg = -1\r\n self.y -= (self.jumpCount ** 2) * 0.5 * neg\r\n self.jumpCount -= 1\r\n else:\r\n self.isJump = False\r\n self.jumpCount = 10\r\n\r\n if not self.weapon is None:\r\n self.weapon.update()\r\n\r\n for affect in self.affects:\r\n if not affect.isActive():\r\n self.affects.pop(self.affects.index(affect))\r\n else:\r\n affect.update()\r\n\r\n def draw(self, win):\r\n image = self.getAnimationFrame()\r\n\r\n if image is None:\r\n image = self.standingImage\r\n\r\n win.blit(image, self.position)\r\n\r\n# self.showHitBox(win)\r\n\r\n def onAttackedBy(self, attacker=None):\r\n \"\"\"Handles an attack on the entity from another.\"\"\"\r\n attackSuccessful = True\r\n\r\n # If the player is invincible, ignore the attack.\r\n for affect in self.affects:\r\n if type(affect) is Invincibility:\r\n attackSuccessful = False\r\n break\r\n\r\n if attackSuccessful:\r\n # Reset jumping.\r\n self.isJump = False\r\n self.jumpCount = 10\r\n\r\n # \"Respawn the player\" (move them to their starting position).\r\n self.x, self.y = self.startingPosition\r\n\r\n # Stop moving\r\n self.stop()\r\n\r\n # Decrease the score:\r\n self.score -= 5\r\n\r\n # Apply temporary invincibility affect.\r\n self.affects.append(Invincibility(200))\r\n\r\n return attackSuccessful\r\n","sub_path":"source/player.py","file_name":"player.py","file_ext":"py","file_size_in_byte":4608,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"419801371","text":"import tensorflow as tf\nimport math\nfrom tqdm import tqdm\nfrom tensorflow.python.platform import flags\nfrom torch.utils.data import DataLoader, Dataset\nfrom models import ResNetModel, CelebAModel\nfrom utils import ReplayBuffer, GaussianBlur\nimport os.path as osp\nimport numpy as np\nfrom logger import TensorBoardOutputFormat\nfrom scipy.misc import imsave\nfrom torchvision import transforms\nimport os\nfrom itertools import product\nfrom PIL import Image\nimport torch\n\nflags.DEFINE_integer('batch_size', 256, 'Size of inputs')\nflags.DEFINE_integer('data_workers', 4, 'Number of workers to do things')\nflags.DEFINE_string('logdir', 'cachedir', 'directory for logging')\nflags.DEFINE_string('savedir', 'cachedir', 'location where log of experiments will be stored')\nflags.DEFINE_integer('num_filters', 64, 'number of filters for conv nets -- 32 for miniimagenet, 64 for omniglot.')\nflags.DEFINE_float('step_lr', 10.0, 'size of gradient descent size')\nflags.DEFINE_bool('cclass', True, 'not cclass')\nflags.DEFINE_bool('proj_cclass', False, 'use for backwards compatibility reasons')\nflags.DEFINE_bool('spec_norm', True, 'Whether to use spectral normalization on weights')\nflags.DEFINE_bool('use_bias', True, 'Whether to use bias in convolution')\nflags.DEFINE_bool('use_attention', False, 'Whether to use self attention in network')\nflags.DEFINE_integer('num_steps', 200, 'number of steps to optimize the label')\nflags.DEFINE_string('task', 'negation_figure', 'conceptcombine, combination_figure, negation_figure, or_figure, negation_eval')\n\nflags.DEFINE_bool('eval', False, 'Whether to quantitively evaluate models')\nflags.DEFINE_bool('latent_energy', False, 'latent energy in model')\nflags.DEFINE_bool('proj_latent', False, 'Projection of latents')\n\n\n# Whether to train for gentest\nflags.DEFINE_bool('train', False, 'whether to train on generalization into multiple different predictions')\n\nFLAGS = flags.FLAGS\n\n\ndef conceptcombine(model_list, select_idx):\n\n n = 64\n labels = []\n\n for six in select_idx:\n label_ix = np.eye(10)[six]\n label_batch = np.tile(label_ix[None, :], (n, 1))\n label = torch.Tensor(label_batch).cuda()\n labels.append(label)\n\n im = torch.rand(n, 3, 32, 32).cuda()\n im_noise = torch.randn_like(im).detach()\n\n def get_color_distortion(s=1.0):\n # s is the strength of color distortion.\n color_jitter = transforms.ColorJitter(0.8*s, 0.8*s, 0.8*s, 0.4*s)\n rnd_color_jitter = transforms.RandomApply([color_jitter], p=0.8)\n rnd_gray = transforms.RandomGrayscale(p=0.2)\n color_distort = transforms.Compose([\n rnd_color_jitter,\n rnd_gray])\n return color_distort\n\n color_transform = get_color_distortion()\n\n im_size = 32\n transform = transforms.Compose([transforms.RandomResizedCrop(im_size, scale=(0.02, 1.0)), transforms.RandomHorizontalFlip(), color_transform, GaussianBlur(kernel_size=5), transforms.ToTensor()])\n\n # First get good initializations for sampling\n for i in range(10):\n for i in range(20):\n im_noise.normal_()\n im = im + 0.001 * im_noise\n # im.requires_grad = True\n im.requires_grad_(requires_grad=True)\n energy = 0\n\n for model, label in zip(model_list, labels):\n energy = model.forward(im, label) + energy\n\n # print(\"step: \", i, energy.mean())\n im_grad = torch.autograd.grad([energy.sum()], [im])[0]\n\n im = im - FLAGS.step_lr * im_grad\n im = im.detach()\n\n im = torch.clamp(im, 0, 1)\n\n im = im.detach().cpu().numpy().transpose((0, 2, 3, 1))\n im = (im * 255).astype(np.uint8)\n\n ims = []\n for i in range(im.shape[0]):\n im_i = np.array(transform(Image.fromarray(np.array(im[i]))))\n ims.append(im_i)\n\n im = torch.Tensor(np.array(ims)).cuda()\n\n # Then refine the images\n\n for i in range(FLAGS.num_steps):\n im_noise.normal_()\n im = im + 0.001 * im_noise\n # im.requires_grad = True\n im.requires_grad_(requires_grad=True)\n energy = 0\n\n for model, label in zip(model_list, labels):\n energy = model.forward(im, label) + energy\n\n print(\"step: \", i, energy.mean())\n im_grad = torch.autograd.grad([energy.sum()], [im])[0]\n\n im = im - FLAGS.step_lr * im_grad\n im = im.detach()\n\n im = torch.clamp(im, 0, 1)\n\n output = im.detach().cpu().numpy()\n output = output.transpose((0, 2, 3, 1))\n output = output.reshape((-1, 8, 32, 32, 3)).transpose((0, 2, 1, 3, 4)).reshape((-1, 32 * 8, 3))\n imsave(\"debug.png\", output)\n\n\ndef combine_main(models, resume_iters, select_idx):\n\n model_list = []\n\n for model, resume_iter in zip(models, resume_iters):\n model_path = osp.join(\"cachedir\", model, \"model_{}.pth\".format(resume_iter))\n checkpoint = torch.load(model_path)\n FLAGS_model = checkpoint['FLAGS']\n model_base = ResNetModel(FLAGS_model)\n model_base.load_state_dict(checkpoint['ema_model_state_dict_0'])\n model_base = model_base.cuda()\n model_list.append(model_base)\n\n conceptcombine(model_list, select_idx)\n\n\nif __name__ == \"__main__\":\n models_orig = ['cifar10_cond_807', 'cifar10_cond_807']\n resume_iters_orig = [\"15000\", \"15000\"]\n\n models = []\n resume_iters = []\n select_idx = []\n ##################################\n # Settings for the composition_figure\n models = models + [models_orig[1]]\n resume_iters = resume_iters + [resume_iters_orig[1]]\n select_idx = select_idx + [6]\n\n models = models + [models_orig[0]]\n resume_iters = resume_iters + [resume_iters_orig[0]]\n select_idx = select_idx + [9]\n\n\n FLAGS.step_lr = FLAGS.step_lr / len(models)\n\n # List of 4 attributes that might be good\n # Young -> Female -> Smiling -> Wavy\n combine_main(models, resume_iters, select_idx)\n\n","sub_path":"cifar10_combine.py","file_name":"cifar10_combine.py","file_ext":"py","file_size_in_byte":5913,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"574044744","text":"# class declaration\nclass Student:\n classVar = 'Vestibulum Facilisis' # this is a class variable\n\n# creating objects of class Student\nstudent1 = Student() # first object\nstudent2 = Student() # second object\n\n# creating instance variable of class\nstudent1.name = 'Lorem Ipsum'\nstudent1.roll = 17364\nstudent1.address = 'A-17 Dolor Sit Amet, Consectetur Adipiscing, Volnovica'\nstudent1.classVar = 'Morbi Hendrerit' # only change value of variable for student1 object\n\nstudent2.name = 'Perspiciatis Unde'\nstudent2.roll = 17365\nstudent2.address = 'A-18 Dolor Sit Amet, Consectetur Adipiscing, Volnovica'\nstudent2.classVar = 'Maecenas Mattis' # only change value of variable for student2 object\n\nprint(f'Student 1 Details :-\\nName : {student1.name}\\nRoll Number : {student1.roll}\\nAddress : {student1.address}\\n')\nprint(f'Student 2 Details :-\\nName : {student2.name}\\nRoll Number : {student2.roll}\\nAddress : {student2.address}\\n')\n\nprint(f'Printing Class Variable : {Student.classVar}')\nprint(f'Printing Class Variable for student 1 object : {student1.classVar}')\nprint(f'Printing Class Variable for student 2 object : {student2.classVar}\\n')\n# Student.classVar = 'Ipsum Sollicitudin' # changes value of variable for all the objects\n# print(Student.classVar)\n\nprint(student1.__dict__) # prints out all the instance variable of object with its value in a dictionary\nprint(student2.__dict__) # prints out all the instance variable of object with its value in a dictionary","sub_path":"objectOrientedProgrammingUsingPython/classes.py","file_name":"classes.py","file_ext":"py","file_size_in_byte":1671,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"12300124","text":"import psutil,time,sys,logging,os\n\nlogName = time.strftime(\"%Y-%m-%d-%H-%M-%S\", time.localtime())\nlogger = logging.getLogger(__name__)\nlogger.setLevel(level = logging.INFO)\npath = os.getcwd()+\"/\"+logName+\".txt\"\nhandler = logging.FileHandler(path)\nhandler.setLevel(logging.INFO)\nformatter = logging.Formatter('%(asctime)s - %(levelname)s - %(message)s')\nhandler.setFormatter(formatter)\nlogger.addHandler(handler)\n\ndef getChromeCPU():\n webRTCCPU = 0\n chromePidlist = getChromePid()\n for pid in chromePidlist:\n pro = psutil.Process(int(pid))\n pro.cpu_percent(None)\n time.sleep(2)\n pCPU = pro.cpu_percent(None)\n print(pid ,\" pid CPU is \",pCPU)\n # logger.info(pid,\" CPU is \",pCPU)\n\n webRTCCPU += pCPU\n # print(\"******************* webRTC_CPU is %.2f%% *******************\" %(webRTCCPU/2))\n return webRTCCPU/2\n\ndef getChromeMEM():\n webRTCMEM = 0\n chromePidlist = getChromePid()\n for pid in chromePidlist:\n time.sleep(2)\n virmem = psutil.virtual_memory()\n getMemP = psutil.Process(int(pid))\n Pmem = (getMemP.memory_percent() / 100) * virmem.used / 1024 / 1024\n print(\"pid:\", pid + \" mem is\", Pmem)\n # logger.info(\"pid: \",pid + \"mem is \", Pmem)\n webRTCMEM += Pmem\n # print(\"=================== webRTC_MEM is %.2fMB ===================\" % webRTCMEM)\n return webRTCMEM\n\n# def getMEM(GPUpid,Capturepid,premiumpid):\n# mem = psutil.virtual_memory()\n# GPUp = psutil.Process(GPUpid)\n# Capturep = psutil.Process(Capturepid)\n# premiump = psutil.Process(premiumpid)\n# GPUMEM = GPUp.memory_percent()\n# CaptureMEM = Capturep.memory_percent()\n# premiumMEM = premiump.memory_percent()\n# memUSED = mem.used\n# webRTCMEM = (((GPUMEM+CaptureMEM+premiumMEM)/100)*(memUSED))/1024/1024\n# print(\"GPU MEM is %dMB\"% ((((GPUMEM)/100)*(memUSED))/1024/1024))\n# print(\"Capture MEM is %sMB\"% ((((CaptureMEM)/100)*(memUSED))/1024/1024))\n# print(\"Premium MEM is %sMB\"% ((((premiumMEM)/100)*(memUSED))/1024/1024))\n# logger.info(\"GPU MEM is %dMB\"% ((((GPUMEM)/100)*(memUSED))/1024/1024))\n# logger.info(\"Capture MEM is %sMB\"% ((((CaptureMEM)/100)*(memUSED))/1024/1024))\n# logger.info(\"Premium MEM is %sMB\"% ((((premiumMEM)/100)*(memUSED))/1024/1024))\n# print(\"=================== webRTC_MEM is %.2fMB ===================\" % webRTCMEM)\n# return webRTCMEM\n\n\ndef getSafariCPU():\n safariPidList = getsafariPid()\n webRTCCPU = 0\n for pid in safariPidList:\n pro = psutil.Process(int(pid))\n pro.cpu_percent(None)\n time.sleep(2)\n proCPU = pro.cpu_percent(None)\n print(pid,\" pid CPU is %d%%\" % (proCPU))\n # logger.info(pid, \" pid CPU is %d%%\" % (proCPU))\n webRTCCPU += proCPU\n # print(\"******************* webRTC_CPU is %.2f%% *******************\" % (webRTCCPU / 2))\n return webRTCCPU / 2\n\ndef getSafariMEM():\n webRTCMEM = 0\n safariPidlist = getsafariPid()\n for pid in safariPidlist:\n time.sleep(2)\n virmem = psutil.virtual_memory()\n getMemP = psutil.Process(int(pid))\n Pmem = (getMemP.memory_percent() / 100) * virmem.used / 1024 / 1024\n print(\"pid:\", pid + \" mem is\", Pmem)\n # logger.info(\"pid: \",pid + \"mem is \", Pmem)\n webRTCMEM += Pmem\n # print(\"=================== webRTC_MEM is %.2fMB ===================\" % webRTCMEM)\n return webRTCMEM\n\ndef getChromeCPUMEM(secs,GPUpid, Capturepid, premiumpid):\n cpuTotal = 0\n memTotal = 0\n for i in range(0,10):\n time.sleep(secs)\n cpu = getCPU(GPUpid, Capturepid, premiumpid)\n print(\"*******************第%d组 webRTC_CPU is %.2f%% *******************\" % ((i+1),cpu))\n logger.info(\"*******************第%d组 webRTC_CPU is %.2f%% *******************\" % ((i+1),cpu))\n mem = getMEM(GPUpid, Capturepid, premiumpid)\n print(\"===================第%d组 webRTC_MEM is %.2fMB ===================\" % ((i+1),mem))\n logger.info(\"===================第%d组 webRTC_MEM is %.2fMB ===================\" % ((i+1),mem))\n cpuTotal += cpu\n memTotal += mem\n print(\"cpu平均值为:%.2f\"%(cpuTotal/10))\n print(\"内存平均值为:%.2f\"%(memTotal/10))\n logger.info(\"cpu平均值为:%.2f\"%(cpuTotal/10))\n logger.info(\"内存平均值为:%.2f\"%(memTotal/10))\n\ndef getChromeCPUMEM_autoGetPid(secs):\n cpuTotal = 0\n memTotal = 0\n for i in range(0,10):\n time.sleep(secs)\n cpu = getChromeCPU()\n print(\"*******************第%d组 webRTC_CPU is %.2f%% *******************\" % ((i+1),cpu))\n logger.info(\"*******************第%d组 webRTC_CPU is %.2f%% *******************\" % ((i+1),cpu))\n mem = getChromeMEM()\n print(\"===================第%d组 webRTC_MEM is %.2fMB ===================\" % ((i+1),mem))\n logger.info(\"===================第%d组 webRTC_MEM is %.2fMB ===================\" % ((i+1),mem))\n cpuTotal += cpu\n memTotal += mem\n print(\"cpu平均值为:%.2f\"%(cpuTotal/10))\n print(\"内存平均值为:%.2f\"%(memTotal/10))\n logger.info(\"cpu平均值为:%.2f\"%(cpuTotal/10))\n logger.info(\"内存平均值为:%.2f\"%(memTotal/10))\n\ndef getSafariCPUMEM_autoGetPid(secs):\n cpuTotal = 0\n memTotal = 0\n for i in range(0,10):\n time.sleep(secs)\n cpu = getSafariCPU()\n print(\"*******************第%d组 webRTC_CPU is %.2f%% *******************\" % ((i+1),cpu))\n logger.info(\"*******************第%d组 webRTC_CPU is %.2f%% *******************\" % ((i+1),cpu))\n mem = getSafariMEM()\n print(\"===================第%d组 webRTC_MEM is %.2fMB ===================\" % ((i+1),mem))\n logger.info(\"===================第%d组 webRTC_MEM is %.2fMB ===================\" % ((i+1),mem))\n cpuTotal += cpu\n memTotal += mem\n print(\"cpu平均值为:%.2f\"%(cpuTotal/10))\n print(\"内存平均值为:%.2f\"%(memTotal/10))\n logger.info(\"cpu平均值为:%.2f\"%(cpuTotal/10))\n logger.info(\"内存平均值为:%.2f\"%(memTotal/10))\n\ndef getSafariCPUMEM(secs,safariProcesspid,premiumpid):\n cpuTotal = 0\n memTotal = 0\n for i in range(0,10):\n time.sleep(secs)\n cpu = getSafariCPU(safariProcesspid,premiumpid)\n print(\"*******************第%d组 webRTC_CPU is %.2f%% *******************\" % ((i+1),cpu))\n logger.info(\"*******************第%d组 webRTC_CPU is %.2f%% *******************\" % ((i+1),cpu))\n mem = getSafariMEM(safariProcesspid,premiumpid)\n print(\"===================第%d组 webRTC_MEM is %.2fMB ===================\" % ((i+1),mem))\n logger.info(\"===================第%d组 webRTC_MEM is %.2fMB ===================\" % ((i+1),mem))\n cpuTotal += cpu\n memTotal += mem\n print(\"cpu平均值为:%.2f\"%(cpuTotal/10))\n print(\"内存平均值为:%.2f\"%(memTotal/10))\n logger.info(\"cpu平均值为:%.2f\"%(cpuTotal/10))\n logger.info(\"内存平均值为:%.2f\"%(memTotal/10))\n\ndef getChromePid():\n renderpidinfo = os.popen(\"ps -ef | grep 'Google Chrome.app' | grep 'type=renderer' | grep 'renderer-client-id=7'| awk '{print $2}'\")\n renderpidlist = renderpidinfo.read().split(\"\\n\")[:-2]\n\n gpupidinfo = os.popen(\"ps -ef | grep 'Google Chrome.app' | grep 'gpu-preferences' | awk '{print $2}'\")\n gpuuidlist = gpupidinfo.read().split(\"\\n\")[:-2]\n\n mediapidinfo = os.popen(\"ps -ef | grep 'Google Chrome.app' | grep 'type=utility' | grep 'message-loop-type-ui' | awk '{print $2}'\")\n\n mediauidlist = mediapidinfo.read().split(\"\\n\")[:-2]\n\n chromepidlist = renderpidlist + gpuuidlist + mediauidlist\n print(\"chromePidList is :\",chromepidlist)\n return chromepidlist\n\ndef getsafariPid():\n safaripidInfo = os.popen(\"ps -ef | grep 'Safari.app' | awk '{print $2}'\")\n safaripidlist = safaripidInfo.read().split(\"\\n\")[:-3]\n\n premiumpidinfo = os.popen(\"ps -ef | grep 'XPCServices/com.apple.WebKit.WebContent.xpc' | awk '{print $2}'\")\n premiumpidlist = premiumpidinfo.read().split(\"\\n\")[:-3]\n\n safaripidlist = safaripidlist + premiumpidlist\n print(\"safaripidlist is :\",safaripidlist)\n return safaripidlist\n\nif __name__ == '__main__':\n if len(sys.argv) < 2:\n print('''run as: python3 macBrowserCpuMem.py browserType secs \n browserType:chrome or safari\n secs:sleep time\n ''')\n exit()\n browserType = sys.argv[1]\n if browserType == \"chrome\":\n secs = int(sys.argv[2])\n getChromeCPUMEM_autoGetPid(secs)\n elif browserType == \"safari\":\n secs = int(sys.argv[2])\n getSafariCPUMEM_autoGetPid(secs)\n\n","sub_path":"autoTest/macBrowserCpuMem_autoGetPid.py","file_name":"macBrowserCpuMem_autoGetPid.py","file_ext":"py","file_size_in_byte":8639,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"545198604","text":"'''\nuse within-cluster connection COUNT algorithm.\n\nLast edited: May 28\n'''\n\n\nfrom sklearn.cluster import KMeans\nfrom sklearn.manifold import TSNE\nfrom sklearn.decomposition import PCA\nfrom sklearn.externals import joblib\nimport sys\nimport numpy as np\nimport pandas as pd\nimport random as rd\nimport json\nimport arrow as ar\nimport scipy.sparse as scs\n\n#########\nStart_p = 2000\nEnd_p = 5000\n#########\n\npca = joblib.load('output/PCA_model.pkl')\nkm_pca = joblib.load('output/km_pca_model.pkl')\nSimM1 = np.load('output/SimilaryMatrix.npy')\nSimM_ind = np.load('output/SM_index.npy')\n# target_list = ['0000841919', '0000536923','0000501372','0000407808', ##3C\n# '0001532950','0001516417','0001521830', ##fresh\n# '0000693220', '0000608357', '0000579436'] ##health\nwith open('May-June/namelist.txt', 'r') as f:\n target_list = json.load(f)[Start_p:End_p]\nwith open('May-June/flag_tr_te.txt', 'r') as f:\n tr_te = json.load(f)\nRec5000_dict = {}\nRec_dict_len = 1000\n# target = '0000187738' #SimM_ind[0] ##\"0000\"\nn_mark = len(SimM_ind)\nfv = pd.read_table('May-June/FeatureVector.txt', sep = '\\t', dtype= {'CMark': object}, index_col = 0)\nfv = fv.set_index('CMark')\n# if target in SimM_ind:\n# fv = fv.loc[SimM_ind]\n# else:\n# fv = fv.loc[np.append(SimM_ind, target)]\n\nrd.seed(19920804)\nwith open('May-June/NeighborSend_list_True.txt', 'r') as f:\n NBS= json.load(f)\nwith open('May-June/NeighborRec_list_True.txt', 'r') as f:\n NBR= json.load(f)\nwith open('output/SimPair.json', 'r', encoding = 'utf8') as f:\n SimPair = json.load(f)\nglobal w, indlistlen\nw = {'oversea': 1, 'indust1': 8, 'indust2': 5, 'area': 1, 'DayAverage': 3, 'Variation': 3, 'toInd': 15, 'fromInd': 10, 'SoverR': 10, 'toIndSet': 15, 'fromIndSet':10}\nw = pd.Series(w)\nw = w/sum(w)\nindlistlen = 12\n\n\ndef overlap(x):\n a = x.iloc[0]\n b = x.iloc[1]\n a = set(a[1:-1].replace(\"'\", '').replace(\" \",'').split(','))\n b = set(b[1:-1].replace(\"'\", '').replace(\" \",'').split(','))\n if ('' in a) or('' in b):\n ol = 0\n else:\n ol = len(a & b) / len(a|b)\n return(ol)\n\ndef sim_com(a, b):\n sim = 0\n sim += w['oversea'] * int(a[0] == b[0]) #oversea\n sim += w['indust1'] * int(a[1] == b[1]) #indust1\n sim += w['indust2'] * int(a[2] == b[2])\n sim += w['area'] * int(a[3] == b[3]) #area\n sim += w['DayAverage'] * (1 - abs(a[4] - b[4])/(a[4] + b[4])) #DayAverage\n sim += w['Variation'] * (1 - abs(a[5] - b[5])/(a[5]+b[5])) #Variation\n r = 1 #control the influence of main industry, eg: for less influence can choose r = 0.5\n sim += w['toInd'] * (1 - np.linalg.norm((abs(a[6:(6 + indlistlen)] - b[6:(6 + indlistlen)]) * (pd.concat([a[6:(6+indlistlen)], b[6:(6+indlistlen)]], axis = 1).max(axis = 1) ** r)),\n ord = 1)/2) #toIndust\n sim += w['fromInd'] * (1 - np.linalg.norm((abs(a[(6 + indlistlen):(6+ 2* indlistlen)] - b[(6 + indlistlen):(6+ 2* indlistlen)]) *\n (pd.concat([a[(6 + indlistlen):(6+ 2* indlistlen)], b[(6 + indlistlen):(6+ 2* indlistlen)]], axis = 1).max(axis =1) ** r)),\n ord = 1)/2)#fromIndust\n sim += w['SoverR'] * (1 - abs(a[6 + 2 * indlistlen] - b[6 + 2 * indlistlen])/ (a[6 + 2 * indlistlen] + b[6 + 2 * indlistlen]))\n sim += w['toIndSet'] * np.linalg.norm(np.array(pd.concat([a[(7 + 2 * indlistlen):(7 + 3 * indlistlen) ], b[(7 + 2 * indlistlen): (7 + 3 * indlistlen)]], axis = 1).apply(\n axis = 1, func = lambda x: overlap(x)))*np.array((a[6:(6 + indlistlen)] + b[6:(6 + indlistlen)])/2), ord = 1)\n sim += w['fromIndSet'] * np.linalg.norm(np.array(pd.concat([a[7 + 3 * indlistlen: ], b[7 + 3 * indlistlen:]], axis = 1).apply(\n axis = 1, func = lambda x: overlap(x))) * np.array((a[(6 + indlistlen):(6+ 2 * indlistlen)] + b[(6 + indlistlen):(6+ 2 * indlistlen)])/2), ord = 1)\n return(sim)\nMark2Ind = {}\nfor i in range(n_mark):\n Mark2Ind[SimM_ind[i]] = i\nof_D = open('May-June/DownRec_fast.txt', 'a')\nof_U = open('May-June/UpRec_fast.txt', 'a')\n\nfor cc, target in enumerate(target_list):\n print(cc)\n print(target)\n ## compute similarity vector\n print('Compute Sim.\\n')\n Rec5000_dict[target] ={}\n x = target\n time1 = ar.now()\n rdl = 150\n sim_v = np.array([])\n\n ## depth 2 similarity , to change to depth 1 similarity, can only compute\n ## sim_v = [sim(fv.loc[target], fv.loc[x]) for x in SimM_ind]\n for j in range(n_mark):\n # if (j>1):\n # break\n y = SimM_ind[j]\n t = 1\n D_send = 0\n D_rec = 0\n if (x in NBS) and( y in NBS):\n nn = len(NBS[x])\n mm = len(NBS[y])\n if (nn>rdl):\n NBS[x] = [NBS[x][ind] for ind in rd.sample(range(nn), rdl)]\n nn = rdl\n if (mm>rdl):\n NBS[y] = [NBS[y][ind] for ind in rd.sample(range(mm), rdl)]\n mm = rdl\n t = t +1\n alist = []\n blist = []\n for k in NBS[x]:\n alist = alist + [k] * mm\n blist = NBS[y] * nn\n D = []\n # print('%d, %d'%(nn,mm))\n for k in range(nn*mm):\n # if (k%(rdl*10)==0):\n # print('k = %d'%k)\n if (alist[k] in Mark2Ind.keys()) and (blist[k] in Mark2Ind.keys()):\n D.append(SimM1[Mark2Ind[alist[k]], Mark2Ind[blist[k]]])\n elif (alist[k]+'\\t'+ blist[k] in SimPair):\n D.append(SimPair[alist[k]+'\\t'+ blist[k]])\n elif (blist[k]+'\\t'+ alist[k] in SimPair):\n D.append(SimPair[blist[k]+'\\t'+ alist[k]])\n else:\n SimPair[alist[k]+'\\t'+ blist[k]] = sim_com(fv.loc[alist[k]], fv.loc[blist[k]])\n D.append(SimPair[alist[k]+'\\t'+ blist[k]])\n m = np.matrix(np.reshape(D, (nn, mm)))\n D_send = max(m.max(axis = 0).mean(), m.max(axis = 1).mean())\n D_com = sim_com(fv.loc[x], fv.loc[y])\n if (x in NBR) and (y in NBR):\n nn = len(NBR[x])\n mm = len(NBR[y])\n if (nn>rdl):\n NBR[x] = [NBR[x][ind] for ind in rd.sample(range(nn), rdl)]\n nn = rdl\n if (mm>rdl):\n NBR[y] = [NBR[y][ind] for ind in rd.sample(range(mm), rdl)]\n mm = rdl\n t = t +1\n alist = []\n blist = []\n # print('%d, %d'%(nn,mm))\n for k in NBR[x]:\n alist = alist + [k] * mm\n blist = NBR[y] * nn\n D = []\n for k in range(nn*mm):\n # if (k%(rdl*10)==0):\n # print('k = %d'%k)\n if (alist[k] in Mark2Ind.keys()) and (blist[k] in Mark2Ind.keys()):\n D.append(SimM1[Mark2Ind[alist[k]], Mark2Ind[blist[k]]])\n elif (alist[k]+'\\t'+ blist[k] in SimPair):\n D.append(SimPair[alist[k]+'\\t'+ blist[k]])\n elif (blist[k]+'\\t'+ alist[k] in SimPair):\n D.append(SimPair[blist[k]+'\\t'+ alist[k]])\n else:\n SimPair[alist[k]+'\\t'+ blist[k]] = sim_com(fv.loc[alist[k]], fv.loc[blist[k]])\n D.append(SimPair[alist[k]+'\\t'+ blist[k]])\n m = np.matrix(np.reshape(D, (nn, mm)))\n D_rec = max(m.max(axis = 0).mean(), m.max(axis = 1).mean())\n sim_v = np.append(sim_v,(D_send + D_rec + D_com)/t)\n\n\n print('finish computing sim vec\\n')\n ## apply pca on sim_v\n\n sim_v_pca = pca.transform(sim_v.reshape(1,-1))\n\n ## predict km cluster\n\n np_label = km_pca.predict(sim_v_pca)\n\n cl_index = np.where(km_pca.labels_ ==np_label)\n if target in SimM_ind:\n cl_index = (cl_index[0][np.where(cl_index[0]!=Mark2Ind[target])],)\n sim_v_cl = sim_v[cl_index]\n\n ## collaborative Filtering\n print('Collaborative Filtering')\n path = 'May-June/'\n fname_nl = path +'namelist.txt'\n with open(fname_nl, 'r') as f:\n namelist = json.load(f)\n ## down stream recommmendation\n score_mtx = scs.load_npz('May-June/A_sparse_send.npz')\n Re_list = np.zeros(shape = (score_mtx.shape[1],))\n score_mtx[score_mtx>0] = 1\n Re_list = score_mtx[cl_index[0],].transpose().dot(sim_v_cl)/sim_v_cl.sum()\n\n # for i in range(score_mtx.shape[1]):\n # # if (i%10000 ==0):\n # # print(i)\n # score_vt = score_mtx[cl_index[0],i]\n # ## I use some filter to de-noise, to simplify, can just\n # ## Re_list[i] = score_vt.transpose().dot(sim_v_cl)/sim_v_cl.sum() or even without loop\n # ## Re_list = score_mtx[cl_index[0],].transpose().dot(sim_v_cl)/sim_v_cl.sum()\n # if (score_vt.count_nonzero()>4) and(sim_v_cl[score_vt.nonzero()[0]].sum()>0.7):\n # temp = (score_vt>0).toarray().reshape(len(sim_v_cl),)*sim_v_cl ##\n # temp[np.argsort(temp)[::-1][5:]] = 0\n # most_similar_company = temp\n # Re_list[i] = score_vt.transpose().dot(most_similar_company)/most_similar_company.sum()\n fname_As = path + 'A_s.txt'\n with open(fname_As, 'r') as f:\n A_s = json.load(f)\n target_down_stream =np.zeros(shape = (score_mtx.shape[1],))\n if target in A_s:\n for y in A_s[target]:\n if (tr_te[target+'-'+y]==1):\n target_down_stream[namelist.index(y)] = A_s[target][y]\n count = 0\n i = -1\n Re_list_index = np.argsort(Re_list)[::-1]\n print('Down stream company recommendation:')\n Ds_rl = []\n Ds_rl2 = []\n while (count=5000):\n print(target)\n break\n if (target_down_stream[Re_list_index[i]] >0):\n continue\n if (namelist[Re_list_index[i]] ==target):\n continue\n Ds_rl.append((namelist[Re_list_index[i]], Re_list[Re_list_index[i]]))\n Ds_rl2.append(namelist[Re_list_index[i]])\n # print(namelist[Re_list_index[i]])\n count +=1\n Rec5000_dict[target]['Down'] = Ds_rl\n # a = np.argsort(Re_list)[::-1][0:200]\n # b = np.argsort(target_down_stream)[::-1][0:200]\n # set.intersection(set(a),set(b))\n ## up stream recommmendation\n\n\n score_mtx = scs.load_npz('May-June/A_sparse_rec.npz')\n Re_list = np.zeros(shape = (score_mtx.shape[1],))\n score_mtx[score_mtx>0] = 1\n Re_list = score_mtx[cl_index[0],].transpose().dot(sim_v_cl)/sim_v_cl.sum()\n # for i in range(score_mtx.shape[1]):\n # # if (i%10000 ==0):\n # # print(i)\n # score_vt = score_mtx[cl_index[0],i]\n # if (score_vt.count_nonzero()>4) and(sim_v_cl[score_vt.nonzero()[0]].sum()>0.7):\n # temp = (score_vt>0).toarray().reshape(len(sim_v_cl),)*sim_v_cl\n # temp[np.argsort(temp)[::-1][5:]] = 0\n # most_similar_company = temp\n # Re_list[i] = score_vt.transpose().dot(most_similar_company)/most_similar_company.sum()\n fname_Ar = path + 'A_r.txt'\n with open(fname_Ar, 'r') as f:\n A_r = json.load(f)\n target_up_stream =np.zeros(shape = (score_mtx.shape[1],))\n if target in A_r:\n for y in A_r[target]:\n if (tr_te[y+'-'+target]==1):\n target_up_stream[namelist.index(y)] = A_r[target][y]\n count = 0\n i = -1\n Re_list_index = np.argsort(Re_list)[::-1]\n print('Up stream company recommendation:')\n us_rl = []\n us_rl2 = []\n while (count=5000):\n print(target)\n break\n if (target_up_stream[Re_list_index[i]] >0):\n continue\n if (namelist[Re_list_index[i]] ==target):\n continue\n us_rl.append((namelist[Re_list_index[i]], Re_list[Re_list_index[i]]))\n us_rl2.append(namelist[Re_list_index[i]])\n # print(namelist[Re_list_index[i]])\n count +=1\n Rec5000_dict[target]['Up'] = us_rl\n of_D.write(json.dumps(target)+'##?##'+json.dumps(Ds_rl2)+'\\n')\n of_U.write(json.dumps(target)+'##?##'+json.dumps(us_rl2)+'\\n')\n # print(target+'\\t', file = sys.stderr)\n # print(Rec5000_dict[target], file = sys.stderr)\nwith open('output/Rec5000_fast_dict_'+str(Start_p)+'_'+str(End_p)+'.json', 'w') as of:\n json.dump(Rec5000_dict, of)\nlogf = open('output/Rec5000_fast_dict_log.txt', 'w', encoding = 'utf8')\nlogf.write('produced by pred_newp_Rec5000_fast.py')\nlogf.write(str(ar.now()))\nlogf.close()\nof_D.close()\nof_U.close()\ntime2 = ar.now()\nprint(time2-time1)\n","sub_path":"demo/module/algorithm/pre_rec5000_fast.py","file_name":"pre_rec5000_fast.py","file_ext":"py","file_size_in_byte":12590,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"389748","text":"#!/usr/bin/env python\nimport sys\nimport os\nfrom datetime import datetime, timedelta\nfrom pathlib import Path\nimport re\n\n\nclass Session:\n \"\"\"\n Class for a single EDGAR access session.\n \"\"\"\n\n def __init__(self, ip_address: str, start_timestamp: datetime) -> None:\n self._ip_address = ip_address\n self._starting_timestamp = self._last_request_timestamp = start_timestamp\n self._count_request = 1\n\n def record_request(self, request_timestamp: datetime) -> None:\n \"\"\"\n Record the occurrence of a webpage request by increasing the request count by one and \n update the most recent timestamp.\n \"\"\"\n self._count_request += 1\n self._last_request_timestamp = request_timestamp\n\n def seconds_since_last_request(self, query_timestamp: datetime) -> int:\n \"\"\"\n Return the number of seconds since the last request.\n \"\"\"\n return int((query_timestamp - self._last_request_timestamp).total_seconds())\n\n def output_session(self) -> str:\n \"\"\"\n Return the string that conforms to the coding challenge's \n\n The fields on each line must be separated by a \",\".\n\n - IP address of the user exactly as found in log.csv\n - date and time of the first webpage request in the session (yyyy-mm-dd hh:mm:ss)\n - date and time of the last webpage request in the session (yyyy-mm-dd hh:mm:ss)\n - duration of the session in seconds\n - count of webpage requests during the session\n \"\"\"\n timestamp_format = '%Y-%m-%d %H:%M:%S'\n output = (\n self._ip_address,\n self._starting_timestamp.strftime(timestamp_format),\n self._last_request_timestamp.strftime(timestamp_format),\n str(int((self._last_request_timestamp - self._starting_timestamp).total_seconds()) + 1),\n str(self._count_request)\n )\n return(','.join(output))\n\n\nclass SessionAgent:\n \"\"\"\n Class for processing an EDGAR log into a list of sessions.\n \"\"\"\n def __init__(self, *args):\n \"\"\"\n Expected arguments: EDGAR log file name, inactivity period file name, output file name\n \"\"\"\n\n if len(args) != 3 and set(map(type, args)) != {str}:\n raise SyntaxError(\"SessionAgent expects 3 file names as arguments: EDGAR log, inactivity period, and output\")\n \n self._active_sessions = dict()\n self._log_filename = args[0]\n self._output_filename = args[2]\n self._output_cache = str()\n\n # read the inactivity period\n try:\n with open(args[1], \"r\") as inactivity_period_file:\n self._inactivity_period_in_seconds = int(inactivity_period_file.read().strip())\n except OSError as e:\n raise type(e)(f\"Failed to open the interactivity period file: {args[1]}\")\n except Exception as e:\n raise type(e)(f\"Failed to interpret the interactivity period file. Check the contents of {args[1]} .\")\n\n # clear and make an empty output file: will open for append\n try:\n os.makedirs(os.path.dirname(self._output_filename), exist_ok=True)\n outputfile = open(self._output_filename, \"w\")\n outputfile.close()\n except Exception as e:\n raise type(e)(f\"Failed to initialize the output file for writing: {self._output_filename}\")\n\n\n def process_log(self) -> None:\n \"\"\"\n The nugget of this program! Convert the log into output in accordance with the requirements.\n \"\"\"\n # keep the file open for the duration of the SessionAgent's life\n try:\n logfile = open(self._log_filename, \"r\")\n except Exception as e:\n raise type(e)(f\"Failed to open the log file: {self._log_filename}\")\n\n output_text = str()\n\n logfile.readline() # discard the first line of the CSV\n timestamp_of_previous_cleanup = datetime(1970, 1, 1, 0, 0, 0) # keep track of the previous iteration's timestamp\n\n # log structure: ip,date,time,zone,cik,accession,extention,code,size,idx,norefer,noagent,find,crawler,browser\n for logline in logfile:\n if logline.strip() == '':\n continue\n\n ip_address, request_timestamp_str = re.match(\"^(.*?),(.*?,.*?),\", logline).group(1,2)\n request_timestamp = datetime.strptime(request_timestamp_str, \"%Y-%m-%d,%H:%M:%S\")\n\n # First check if we are dealing with a new timestamp. Flush the logs and clean up old sessions.\n if (request_timestamp > timestamp_of_previous_cleanup):\n self._flush(request_timestamp)\n timestamp_of_previous_cleanup = request_timestamp # update the previous cleanup time\n\n # This IP address does not have a session on record. Instantiate a new session.\n if ip_address not in self._active_sessions.keys():\n self._active_sessions[ip_address] = Session(ip_address, request_timestamp)\n\n # This IP address has a session currently on record. Check to see if it's expired: if it is, output the session \n # and create a new instance in its place. If it hasn't, add to the active session.\n else:\n if self._active_sessions[ip_address].seconds_since_last_request(request_timestamp) \\\n > self._inactivity_period_in_seconds: \n self._output_cache += self._active_sessions[ip_address].output_session() + \"\\n\" \n self._active_sessions[ip_address] = Session(ip_address, request_timestamp)\n\n else:\n self._active_sessions[ip_address].record_request(request_timestamp)\n \n # End of file -- flush for the last time\n self._flush()\n\n\n def _flush(self, timestamp: datetime=datetime(2999,12,31,23,59,59)) -> None:\n \"\"\"\n Check all sessions and clean up those that are no longer active as of the timestamp provided as an argument.\n Append all cached output to the output file. \n \"\"\"\n # open the log file for appending\n try:\n with open(self._output_filename, \"w+\") as outputfile:\n # iterate over all active sessions and delete if no longer active\n sessions_to_delete = list()\n\n for ip_address in self._active_sessions.keys():\n if self._active_sessions[ip_address].seconds_since_last_request(timestamp) \\\n > self._inactivity_period_in_seconds: \n self._output_cache += self._active_sessions[ip_address].output_session()\n self._output_cache += \"\\n\" \n sessions_to_delete.append(ip_address)\n \n for ip_address in sessions_to_delete:\n del self._active_sessions[ip_address]\n\n outputfile.write(self._output_cache)\n output_text = str()\n\n except Exception as e:\n raise type(e)(f\"Failed to initialize the output file for writing: {self._output_filename}\")\n\n\nif __name__ == \"__main__\":\n if len(sys.argv) != 4:\n sys.stderr.write('Usage: %s \\n' % sys.argv[0])\n sys.exit(1)\n\n foo = SessionAgent(sys.argv[1], sys.argv[2], sys.argv[3])\n foo.process_log()","sub_path":"insight_testsuite/temp/src/sessionization.py","file_name":"sessionization.py","file_ext":"py","file_size_in_byte":7368,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"556656237","text":"\nimport numpy as np\nimport matplotlib as mpl\nimport matplotlib.pyplot as plt\nimport random\nimport math\nfrom Target import Target\nfrom General import Location\nfrom General import Threat\nfrom General import Speed\nfrom UAV_new import UAV_1125\nfrom Environmance import EnV\nfrom CS import Part\nfrom CS import CS\n#import tensorflow as tf\nfrom TPM import TPM\n# km\nimport csv\nimport datetime\nfrom General import Globalvar\nimport os\nenv = EnV(100,100)\nthreat_init = [\n [1,10,40,3],\n [2,30,85,4],\n [3,60,30,6],\n [4,30,15,3],\n [5,70,85,3]\n]\n\"\"\"\"\ntarget_init_ = [\n [1,80,70,1,3,0,0],\n [2,15,30,1,3,0,0],\n [3,45,50,1,2,0,0],\n [4,70,20,1,4,0,0],\n [5,30,40,2,3,0.03,0],\n [6,15,90,2,1,0.02,0],\n [7,40,10,2,3,0.04,0],\n [8,85,30,2,5,0.05,0],\n [9,8,15,3,2,0.01,1],\n [10,55,20,3,3,0.02,1.7],\n [11,60,85,3,4,0.025,4],\n [12,25,70,3,3,0.03,5],\n [13,65,60,4,1,0,0],\n [14,20,30,4,2,0,0],\n [15,40,80,4,1,0,0],\n [16,75,40,4,2,0,0],\n #add target\n [17, 5, 70, 5, 3, 0, 0],\n [18, 70, 5, 5, 3, 0, 0],\n [19, 40, 40, 5, 2, 0, 0],\n [20, 20, 20, 5, 4, 0, 0]\n]\n\"\"\"\ntarget_init_ = [\n [1,80,70,1,3,0,0],\n [2,15,30,1,3,0,0],\n [3,45,50,1,2,0,0],\n [4,70,20,1,4,0,0],\n [5,30,40,2,3,0.03,0],\n [6,15,90,2,1,0.02,0],\n [7,40,10,2,3,0.04,0],\n [8,85,30,2,5,0.05,0],\n [9,8,15,3,2,0.01,1],\n [10,55,20,3,3,0.02,1.7],\n [11,60,85,3,4,0.025,4],\n [12,25,70,3,3,0.03,5],\n [13,65,60,4,1,0,0],\n [14,20,30,4,2,0,0],\n [15,40,80,4,1,0,0],\n [16,75,40,4,2,0,0],\n #add target\n [17, 5, 70, 5, 3, 0, 0],\n [18, 70, 5, 5, 3, 0, 0],\n [19, 40, 40, 5, 2, 0, 0],\n [20, 20, 20, 5, 4, 0, 0]\n]\n\n\nloop_max = 10 #循环次数\nfind_num = 0\nuav_total = 10 #无人机总数\n#attack_list_250 = np.zeros(250)\n#coverage_all_250 = np.zeros(250)\n\nattack_list_250 = np.zeros(10000)\ncoverage_all_250 = np.zeros(10000)\n\ncoverage_all = 0\nfile_data_out1 = open(\"uav_num_attack\"+\".txt\",\"w\")\nfile_data_out2 = open(\"coverage\"+\".txt\",\"w\")\ndisable_time = 150\nfile_data_out3 = open(\"target_95_time.txt\",\"w\")\nfor loop_num in range(loop_max):\n attack_num_now = 0\n coverage_now = 0\n time_start = datetime.datetime.now()\n coverage_matrix = np.zeros((100,100))\n\n #exit(0)\n target_list = []\n threat_list = []\n target_track_list = []\n target_attack_list_x = []\n target_attack_list_y = []\n target_attack_list_ =[]\n for i in target_init_:\n t = Target(i[0],i[3],i[4],Location(i[1],i[2]),i[5]*10,i[6])\n target_track_list.append([t.location.x])\n target_track_list.append([t.location.y])\n target_list.append(t)\n for i in threat_init:\n t = Threat(Location(i[1],i[2]),i[3])\n threat_list.append(t)\n UAV_init_ = [\n [1,10,10,0],\n [2,90,90,0],\n [3,30,30,0],\n [4,50,50,0],\n [5,70,70,0],\n [6,10,90,0],\n [7, 30, 70, 0],\n [8, 70, 30, 0],\n [9, 90, 10, 0],\n [10 , 20,20,0],\n [11, 80 ,80 ,0],\n [12, 20,80,0],\n [13,80,20,0],\n [14,20,20,0],\n [15,80,80,0],\n [16,40,60,0],\n [17,60,40,0],\n [18,35,80,0],\n [19,35,35,0],\n [20,80,35,0],\n [21,5,95,0],\n [22, 15, 85, 0],\n [23, 25, 75, 0],\n [24, 35, 65, 0],\n [25, 45, 55, 0],\n [26, 55, 45, 0],\n [27, 65, 35, 0],\n [28, 75, 25, 0],\n [29, 85, 15, 0],\n [30, 95, 5, 0]\n ]\n\n\n\n\n\n\n #\n '''\n for i in range(4):\n while(1):\n xx = math.floor(random.random()*100)\n yy = math.floor(random.random()*100)\n for m in threat_init:\n if (xx - m[1])**2 + (yy-m[2])**2 < m[3] **2:\n continue\n break\n UAV_init_.append([i+6,xx,yy,0])\n '''\n #\n UAV_list = []\n UAV_track_list = []\n tpm_i = TPM(100,100,target_init_)\n sr_i = 0\n for i in UAV_init_:\n xx = math.floor(random.random() * 100)\n yy = math.floor(random.random() * 100)\n #u = UAV_1125(i[0],i[3],Location(xx,yy))\n u = UAV_1125(i[0],i[3],Location(i[1],i[2]))\n u.tpm = tpm_i\n UAV_list.append(u)\n UAV_track_list.append([u.location.x])\n UAV_track_list.append([u.location.y])\n for j in target_init_:\n if j[3] <3:\n s =0\n\n elif j[3] ==4 or j[3] == 5:\n continue\n else:\n s = j[5] * 10\n t = Target(j[0], j[3], j[4], Location(j[1], j[2]),s, j[6])\n u.target_list_local.append(t)\n sr_i +=1\n if sr_i >= uav_total:\n break\n\n cs = CS(uav_total,100)\n cs.uav_list = UAV_list\n #for i in range(250):\n for i in range(250):\n #print(i)\n #\n # 加入扰动\n\n if i == disable_time:\n #让1/3的无人机失效\n unable_uav_num = 0\n #while unable_uav_num <= len(UAV_list)//3:\n while unable_uav_num <= 2:\n random_uav = random.random() * uav_total\n random_uav = math.floor(random_uav)\n if UAV_list[random_uav].type == 0:\n #print(UAV_list[random_uav].num,\"unable\")\n UAV_list[random_uav].type =1\n unable_uav_num +=1\n\n cs.update_search_map()\n target_attack_know_num = 0\n for j in range(9):\n if target_list[j].type ==0:\n target_attack_know_num += 1\n if target_attack_know_num >= 8:\n s_map_rel = cs.search_map_check()\n #if i %3 == 1:\n # cs.uav_num_check()\n\n for j in target_list:\n for k in UAV_list:\n if k.type >0 :\n continue\n length = j.location.loc_length(k.location)\n #if length <= 3 & j.type > 0:\n if abs(j.location.x - k.location.x) <=3 and abs(j.location.y - k.location.y) <=3 and j.type > 0 :\n j.target_attack()\n target_attack_list_x.append(j.location.x)\n target_attack_list_y.append(j.location.y)\n target_attack_list_.append([j,k,i])\n for attack_i in range(250 - i):\n attack_list_250 [249 - attack_i] +=1\n attack_num_now += 1\n #update\n\n p = 0\n for j in UAV_list:\n if j.type >0:\n continue\n j.get_swarm_info(UAV_list)\n j.update_threat_list_local(threat_list)\n j.make_decision()\n\n #j.pheromone.update_auto(j.location)\n j.update_target_list_local()\n j.update_location_auto()\n UAV_track_list[2*p].append(j.location.x)\n UAV_track_list[2 * p + 1].append(j.location.y)\n p = p +1\n loc_uav_x = j.location.x\n loc_uav_y = j.location.y\n\n for x in range(7):\n for y in range(7):\n a_x = loc_uav_x - x +3\n a_y = loc_uav_y - y +3\n if a_x >= 0 and a_x < 100 and a_y >= 0 and a_y < 100:\n coverage_matrix[a_x,a_y] = 1\n\n # j.update_moving_direction_auto()\n p = 0\n for j in target_list:\n j.update_location_auto()\n target_track_list[2 * p].append (j.location.x)\n target_track_list[2 * p + 1].append(j.location.y)\n p = p + 1\n coverage_now = coverage_matrix.sum()\n coverage_all_250[i] += coverage_now\n write_str1 = str(attack_num_now) + \" \"\n write_str2 = str(coverage_now) + \" \"\n\n file_data_out1.write(write_str1)\n file_data_out2.write(write_str2)\n '''\n if attack_num_now >= 14:\n write_str3 = str(i) + \" \"\n file_data_out3.write(write_str3)\n break\n '''\n write_str1 = os.linesep\n write_str2 = os.linesep\n file_data_out1.write(write_str1)\n file_data_out2.write(write_str2)\n find_num += len(target_attack_list_x)\n coverage_all += coverage_matrix.sum()\n time_end = datetime.datetime.now()\n if loop_num % 5 == 0:\n print(loop_num)\n print((time_end - time_start).seconds)\n\n #print(coverage_matrix.sum())\nprint(find_num / loop_max)\nprint(attack_list_250)\nprint(coverage_all / 100 /100 / loop_max)\nprint(coverage_all_250)\n\n\n\n\nplt.figure()\ncolor_list = [\n'#F0F8FF',\n'#FAEBD7',\n'#00FFFF',\n'#7FFFD4',\n'#F0FFFF',\n'#F5F5DC',\n'#FFE4C4',\n'#000000',\n'#FFEBCD',\n'#0000FF',\n'#8A2BE2',\n'#A52A2A',\n'#DEB887',\n'#5F9EA0',\n'#7FFF00',\n'#D2691E',\n'#FF7F50',\n'#6495ED',\n'#FFF8DC',\n'#DC143C',\n'#00FFFF',\n'#00008B',\n'#008B8B',\n'#B8860B',\n'#A9A9A9',\n'#006400',\n'#BDB76B',\n'#8B008B',\n'#556B2F',\n'#FF8C00',\n'#9932CC',\n'#8B0000',\n'#E9967A',\n'#8FBC8F',\n'#483D8B',\n'#2F4F4F',\n'#00CED1',\n'#9400D3',\n'#FF1493',\n'#00BFFF',\n'#696969',\n'#1E90FF',\n'#B22222',\n'#FFFAF0',\n'#228B22',\n'#FF00FF',\n'#DCDCDC',\n'#F8F8FF',\n'#FFD700',\n'#DAA520',\n'#808080',\n'#008000',\n'#ADFF2F',\n'#F0FFF0',\n'#FF69B4',\n'#CD5C5C',\n'#4B0082',\n'#FFFFF0',\n'#F0E68C',\n'#E6E6FA',\n'#FFF0F5',\n'#7CFC00',\n'#FFFACD',\n'#ADD8E6',\n'#F08080',\n'#E0FFFF',\n'#FAFAD2',\n'#90EE90',\n'#D3D3D3',\n'#FFB6C1',\n'#FFA07A',\n'#20B2AA',\n'#87CEFA',\n'#778899',\n'#B0C4DE',\n'#FFFFE0',\n'#00FF00',\n'#32CD32',\n'#FAF0E6',\n'#FF00FF',\n'#800000',\n'#66CDAA',\n'#0000CD',\n'#BA55D3',\n'#9370DB',\n'#3CB371',\n'#7B68EE',\n'#00FA9A',\n'#48D1CC',\n'#C71585',\n'#191970',\n'#F5FFFA',\n'#FFE4E1',\n'#FFE4B5',\n'#FFDEAD',\n'#000080',\n'#FDF5E6',\n'#808000',\n'#6B8E23',\n'#FFA500',\n'#FF4500',\n'#DA70D6',\n'#EEE8AA',\n'#98FB98',\n'#AFEEEE',\n'#DB7093',\n'#FFEFD5',\n'#FFDAB9',\n'#CD853F',\n'#FFC0CB',\n'#DDA0DD',\n'#B0E0E6',\n'#800080',\n'#FF0000',\n'#BC8F8F',\n'#4169E1',\n'#8B4513',\n'#FA8072',\n'#FAA460',\n'#2E8B57',\n'#FFF5EE',\n'#A0522D',\n'#C0C0C0',\n'#87CEEB',\n'#6A5ACD',\n'#708090',\n'#FFFAFA',\n'#00FF7F',\n'#4682B4',\n'#D2B48C',\n'#008080',\n'#D8BFD8',\n'#FF6347',\n'#40E0D0',\n'#EE82EE',\n'#F5DEB3',\n'#FFFFFF',\n'#F5F5F5',\n'#FFFF00'\n]\nfor i in threat_list:\n r = i.r\n theta = np.arange(0,2*np.pi,0.01)\n x = i.loc.x+r*np.cos(theta)\n y = i.loc.y + r*np.sin(theta)\n plt.plot(x,y)\n plt.plot(x,-y)\nfor i in range(20):\n plt.plot(target_track_list[2*i], target_track_list[2*i+1],'b-')\nfor i in range(uav_total):\n\n plt.plot(UAV_track_list[2 * i], UAV_track_list[2 * i + 1],'k--', color=color_list[2*i+1])\nif target_attack_list_x :\n plt.plot(target_attack_list_x,target_attack_list_y,'k+')\nplt.axis(\"scaled\")\nplt.axis([0,100,0,100])\n\nplt.savefig('1.png')\nfor i in target_attack_list_:\n print ('Target',i[0].num,'in run ',i[2],'attacked by UAV',i[1].num,'in location (',i[0].location.x,',',i[0].location.y,')')\nprint(UAV_track_list)\n\nnp.savetxt(\"search_map.txt\",cs.search_map)\n\n\n","sub_path":"main_new.py","file_name":"main_new.py","file_ext":"py","file_size_in_byte":10659,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"213921380","text":"import urllib.request\nimport urllib.parse # 검색어 자동변환\nfrom bs4 import BeautifulSoup\n\nbaseUrl = 'https://search.naver.com/search.naver?where=post&sm=tab_jum&query='\nplusUrl = input('검색어를 입력하세요. >> ')\nurl = baseUrl + urllib.parse.quote_plus(plusUrl)\n\nprint()\n\nhtml = urllib.request.urlopen(url).read()\nsoup = BeautifulSoup(html, 'html.parser')\n\ntitle = soup.find_all(class_='sh_blog_title')\n\nfor i in title:\n print(i.attrs['title']) # 속성을 찾음\n print(i.attrs['href'])\n print()\n","sub_path":"naver_crawling.py","file_name":"naver_crawling.py","file_ext":"py","file_size_in_byte":525,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"582940988","text":"import os\nimport numpy as np\nimport sys\nimport numpy as np\nfrom menu import menu\nfrom file import LeerDatos, create_file, createJson\nfrom nodo import Nodo\nfrom visitados import isVisitado,ordenar\n\ndef sacarHijos(nodo):\n aux= []\n if nodo.lado:\n for i,x in enumerate(nodo.estado[0]):\n for j,y in enumerate(nodo.estado[0][i+1:]):\n estadoAux= [[y for y in x] for x in nodo.estado]\n valor1=estadoAux[0].pop(i+1+j)\n valor2= estadoAux[0].pop(i)\n estadoAux[1].append(valor1)\n estadoAux[1].append(valor2)\n if not(isVisitado(nodo,estadoAux)):\n #print(nodo.visitados, 'visitados')\n aux.append(Nodo(ordenar(estadoAux),[],nodo.visitados,nodo.valor+max(valor1,valor2 ),not(nodo.lado),nodo))\n else:\n for i,x in enumerate(nodo.estado[1]):\n estadoAux= [[y for y in x] for x in nodo.estado]\n valor = estadoAux[1].pop(i)\n estadoAux[0].append(valor)\n if not(isVisitado(nodo,estadoAux)):\n aux.append(Nodo(ordenar(estadoAux),[],nodo.visitados,nodo.valor+valor,not(nodo.lado),nodo))\n \n return aux\ndef subirAPadre(nodo):\n if nodo.padre != 'padre':\n aux = nodo.padre\n aux.visitados.append(nodo.estado)\n return aux\n return 'padre'\ndef arbol(nodo,final): \n nodo.hijos = sacarHijos(nodo)\n \n if len(nodo.hijos) > 0 and not(np.array_equal(nodo.estado,final)):\n for hijo in nodo.hijos:\n # print(hijo.visitados, hijo.estado)\n hijo.visitados = hijo.visitados + [hijo.estado]\n arbol(hijo,final)\n # print('=========')\n # print('estado ',nodo.estado)\n # print('visitados ',nodo.visitados)\n # print('valor ',nodo.valor)\n else:\n nodo = subirAPadre(nodo)\n if nodo.padre == 'padre':\n return nodo\n return nodo\ndef evaluarHijos(nodo):\n return min([x.valor for x in nodo.hijos])\n\ndef dsf(tree,final,camino):\n if len(tree.hijos) > 0 and not(np.array_equal(tree.estado,final)) and evaluarHijos(tree)<=17:\n for hijo in tree.hijos: \n dsf(hijo,final,camino+[hijo.estado,'===>'])\n elif np.array_equal(tree.estado,final):\n print(camino, tree.valor)\ndef bsf(tree,final,camino):\n aux =[]\n aux_camino = []\n for i,x in enumerate(tree):\n if not(np.array_equal(x.estado,final)) and evaluarHijos(x)<=17:\n aux += x.hijos\n aux_camino += [ camino[i] + [x.estado] for y in x.hijos ]\n elif np.array_equal(x.estado,final):\n print(camino[i]+[x.estado], x.valor)\n if len(aux)>0:\n bsf(aux,final,aux_camino)\n\ndef main():\n pathDatos = ''\n if sys.platform == 'win32':\n pathDatos = './../datos/puenteInicio.txt'\n else:\n pathDatos = '/home/paco/Documents/tsia3/python/datos/puenteInicio.txt'\n \n inicio = [LeerDatos(pathDatos)[0],[]]\n final = [[],[1,2,5,10]]\n nodo = Nodo(inicio)\n tree = arbol(nodo,final)\n create_file([createJson(tree)]) \n if sys.argv[1] == 'dsf':\n dsf(tree,final,[tree.estado,'===>'])\n else:\n bsf([tree],final,[tree.estado])\nmain()","sub_path":"python/puente/puente.py","file_name":"puente.py","file_ext":"py","file_size_in_byte":3235,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"140315635","text":"import scrapy\nfrom scrapy.loader import ItemLoader\nfrom amazon.items import AmazonItem\n\n\nclass HairOilSpider(scrapy.Spider):\n name = 'hair_oil'\n custom_settings = {\n 'LOG_ENCODING': 'UTF-8',\n 'LOG_FILE': f'logs/{name}.log',\n 'LOG_FORMAT': '%(asctime)s [%(name)s] %(levelname)s: %(message)s',\n 'LOG_DATEFORMAT': '%Y-%m-%d %H:%M:%S',\n 'LOG_LEVEL': 'INFO',\n 'LOG_STDOUT': True\n }\n\n def start_requests(self):\n urls = ['https://www.amazon.in/s?rh=n%3A3507139031&fs=true']\n\n i = 1\n while i < 40:\n i += 1\n urls.append(f'https://www.amazon.in/s?i=beauty&rh=n%3A3507139031&page={i}')\n\n for url in urls: yield scrapy.Request(url=url, callback=self.parse)\n\n def parse(self, response):\n for href in response.xpath('//span[@data-component-type= \"s-product-image\"]//a[@class=\"a-link-normal s-no-outline\"]/@href').getall():\n self.logger.info(href)\n yield scrapy.Request('https://www.amazon.in' + href, callback=self.parse_product)\n\n def parse_product(self, response):\n l = ItemLoader(item=AmazonItem(), response=response)\n row = {}\n try:\n row['Product Name'] = response.xpath('//span[@id=\"productTitle\"]/text()').extract_first().replace('\\n\\n\\n\\n\\n\\n\\n\\n', '').strip()\n except:\n row['Product Name'] = 'NA'\n row['Product Url'] = response.url\n try:\n row['MRP'] = response.xpath('//span[@class=\"priceBlockStrikePriceString a-text-strike\"]/text()').extract_first().strip()\n except:\n row['MRP'] = 'NA'\n try:\n row['Sale'] = response.xpath('//span[@id=\"priceblock_ourprice\"]/text()').extract_first().strip()\n except:\n row['Sale'] = 'NA'\n try:\n row['Total Customer Reviews'] = response.xpath('//span[@id=\"acrCustomerReviewText\"]/text()').extract_first().strip()\n except:\n row['Total Customer Reviews'] = 'NA'\n try:\n description = ''\n for des in response.xpath('//div[@id=\"productDescription\"]//p/text()').getall():\n description = description + '\\n' + des.replace('\\n\\n\\n\\n\\n\\n\\n\\n\\n', '').strip()\n row['Description'] = description\n except:\n row['Description'] = 'NA'\n try:\n for feature in response.xpath('//div[@id=\"detailBullets_feature_div\"]//ul//li'):\n if 'Customer Reviews:' not in feature.xpath('.//span[@class=\"a-text-bold\"]/text()').extract_first():\n if 'Best Sellers Rank' not in feature.xpath('.//span[@class=\"a-text-bold\"]/text()').extract_first():\n row[feature.xpath('.//span[@class=\"a-text-bold\"]/text()').extract_first().replace(\n '\\n\\n\\n\\n:\\n\\n\\n', '')] = feature.xpath(\n './/span[@class=\"a-text-bold\"]//following::span/text()').extract_first()\n except:\n pass\n soup = BeautifulSoup(response.text, \"html.parser\")\n heading = \"Best Sellers Rank\"\n get_text = soup.find('div', attrs={'id': 'detailBullets_feature_div'}).find_next('ul').find_next('ul').text\n row[heading] = get_text.replace(heading, '').replace(':', '').strip()\n try:\n for feature_rating in response.xpath('//div[@data-hook=\"cr-summarization-attributes-list\"]//div[@data-hook=\"cr-summarization-attribute\"]'):\n row[feature_rating.xpath('.//span/text()').extract_first().replace('\\n\\n\\n\\n:\\n\\n\\n', '')] = feature_rating.xpath('.//span//following::span/text()').extract_first()\n except:\n pass\n try:\n tag = ''\n for tags in response.xpath('//div[@class=\"cr-lighthouse-terms\"]//span[@class=\"cr-lighthouse-term \"]/text()').getall():\n tag = tag + ', ' + tags.replace(' \\n ', '').strip()\n\n row['Product Tags'] = tag\n except:\n row['Product Tags'] = 'NA'\n self.logger.info(row)\n l.add_value('row', row)\n\n yield l.load_item()\n","sub_path":"amazon/spiders/hair_oils.py","file_name":"hair_oils.py","file_ext":"py","file_size_in_byte":4070,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"350663705","text":"from peewee import *\r\n\r\ndb=SqliteDatabase('students.db')\r\n\r\nclass Student(Model):\r\n username=CharField(max_length=255,unique=True)\r\n points=IntegerField(default=0)\r\n\r\n class Meta:\r\n database = db\r\nstudents=[\r\n {'username':'Rogger','points':10},\r\n {'username':'Andres','points':11},\r\n {'username':'Michael','points':12},\r\n {'username':'Kripton','points':13}\r\n ]\r\n\r\ndef add_student():\r\n for i in students:\r\n Student.create(username=i['username'],points=i['points'])\r\n\r\nif __name__ == '__main__':\r\n db.connect() #permite conectar a la base de datos\r\n db.create_tables([Student],safe=True)\r\n add_student()\r\n","sub_path":"students.py","file_name":"students.py","file_ext":"py","file_size_in_byte":653,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"360733538","text":"from django.shortcuts import render\nfrom django.http import HttpResponse\nimport requests\n\n\n# 向存app封面图的地址发请求,得到返回的图片\ndef get_image(request):\n url = request.GET.get('url', None)\n\n if not url:\n response = HttpResponse('')\n response['Status Code'] = 404\n return response\n\n headers = {\n \"Host\": \"imgs.gamersky.com\",\n \"User-Agent\": \"Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/68.0.3440.106 Safari/537.36\",\n }\n\n remote_response = requests.get(url, headers=headers)\n\n ext = url.split('.')[-1]\n\n response = HttpResponse(remote_response.content, content_type=\"image/\" + ext)\n response[\"Access-Control-Allow-Origin\"] = \"*\"\n\n return response\n","sub_path":"360/app/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":770,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"416682098","text":"import torch\nimport torch.nn as nn\nimport torchvision.models as models\n\n\nclass Encoder(nn.Module):\n def __init__(self):\n super().__init__()\n self.model = models.resnet18(pretrained=True)\n self.layers = list(self.model.children())[:-1]\n\n def forward(self, x):\n features=[x]\n for layer in self.layers:\n # print(features[-1].size())\n features.append(layer(features[-1]))\n return features\n\n\nclass UpSample(nn.Module):\n def __init__(self, in_size, out_size):\n super().__init__()\n self.conv1 = nn.Conv2d(in_size, out_size, kernel_size=3, stride=1, padding=1)\n self.act1 = nn.LeakyReLU(0.2)\n self.conv2 = nn.Conv2d(out_size, out_size, kernel_size=3, stride=1, padding=1)\n self.act2 = nn.LeakyReLU(0.2)\n\n def forward(self, x, concat_with):\n up_x = nn.functional.interpolate(x, size=(concat_with.shape[2], concat_with.shape[3]),\n mode='bilinear', align_corners=True)\n out = torch.cat((up_x, concat_with), dim=1)\n out = self.conv1(out)\n out = self.act1(out)\n out = self.conv2(out)\n out = self.act2(out)\n return out\n\n\nclass DeconvNet(nn.Module):\n def __init__(self, num_features=512, decoder_width=0.5):\n super().__init__()\n decoder_features = int(num_features * decoder_width)\n\n self.conv1 = nn.Conv2d(num_features, decoder_features, kernel_size=1, stride=1, padding=1)\n\n self.up1 = UpSample(decoder_features//1 + 256, decoder_features//2)\n self.up2 = UpSample(decoder_features//2 + 128, decoder_features//4)\n self.up3 = UpSample(decoder_features//4 + 64, decoder_features//8)\n self.up4 = UpSample(decoder_features//8 + 64, decoder_features//16)\n self.up5 = UpSample(decoder_features//16 + 3, decoder_features//32)\n\n self.conv2 = nn.Conv2d(decoder_features//32, 1, kernel_size=3, stride=1, padding=1)\n\n def forward(self, features):\n x, x_0, x_1, x_2, x_3, x_4 = features[0], features[1], features[5], features[6], features[7], features[8]\n out = self.conv1(x_4)\n out = self.up1(out, x_3)\n out = self.up2(out, x_2)\n out = self.up3(out, x_1)\n out = self.up4(out, x_0)\n out = self.up5(out, x)\n out = self.conv2(out)\n\n return out\n\n\nclass Model(nn.Module):\n def __init__(self):\n super().__init__()\n self.encoder = Encoder()\n self.decoder = DeconvNet()\n\n def forward(self, x):\n return self.decoder(self.encoder(x))\n","sub_path":"Depth_estimation/Model.py","file_name":"Model.py","file_ext":"py","file_size_in_byte":2577,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"578717623","text":"\"\"\"Merge Sort in Python\n\n=== Module description ===\nThis module contains an implementation of merge sort.\n\n=== Authorship info ===\n - Date: November 25, 2018\n - Author: Larry Shi\n\"\"\"\nfrom typing import List\nfrom typing import Any\nfrom typing import Tuple\n\n\ndef quicksort(obj: List) -> List:\n \"\"\"Return a sorted list with the same elements as .\n\n >>> lst = [3, 4, 1, 0, 8, 7]\n >>> quicksort(lst)\n [0, 1, 3, 4, 7, 8]\n \"\"\"\n if len(obj) < 2:\n return obj[:]\n else:\n # Pick pivot to be first element.\n pivot = obj[0]\n\n # Partition rest of list into two halves\n smaller, bigger = _partition(obj[1:], pivot)\n\n # Recurse on each partition\n smaller_sorted = quicksort(smaller)\n bigger_sorted = quicksort(bigger)\n\n return smaller_sorted + [pivot] + bigger_sorted\n\n\ndef _partition(obj: List, pivot: Any) -> Tuple[List, List]:\n \"\"\"Return a partition of with the chosen pivot.\n\n Return two lists, where the first contains the items in \n that are <= pivot, and the second is the items in that are > pivot.\n \"\"\"\n smaller = []\n bigger = []\n\n for item in obj:\n if item <= pivot:\n smaller.append(item)\n else:\n bigger.append(item)\n\n return smaller, bigger\n\n\nif __name__ == '__main__':\n import doctest\n\n doctest.testmod()\n","sub_path":"pyguide/implementations/csc148/sorting/quick_sort.py","file_name":"quick_sort.py","file_ext":"py","file_size_in_byte":1375,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"473881739","text":"\"\"\"\n检查每个关卡的单词长度是否符合关卡表中预先配置好的模式数字\n函数输入为关卡表的每一行\n\"\"\"\n\n\ndef checkpattern(levellist: list, targetpattern)->None:\n levelnum = int(levellist[0])\n targetpatternnumber = int(levellist[targetpattern])\n wordstartwith= targetpattern+1\n purelevel = levellist[wordstartwith::]\n purelevel=sorted(purelevel, key=lambda x : len(x))\n patternnum = 0\n for word in purelevel:\n patternnum = patternnum * 10 + len(word)\n if targetpatternnumber != patternnum:\n print(\"level\", levelnum, \"do not match the pattern! Please check this level!\")\n # print(\"Check pattern done!\")\n\n\ndef main() -> None:\n \"\"\"测试\"\"\"\n level16 = [\"16\",\"\",\"\",\"23334455\", \"ab\", \"abc\", \"abd\", \"edcd\", \"rtyu\", \"asdfg\", \"qwert\",\"abd\"]\n level17 = [\"17\",\"\",\"2\",\"55667\", \"werty\", \"asdfg\", \"qwertr\", \"sdsdsd\", \"tttttt\"]\n checkpattern(level16)\n checkpattern(level17)\n\n\nif __name__ == '__main__':\n main()\n\n","sub_path":"checkpattern.py","file_name":"checkpattern.py","file_ext":"py","file_size_in_byte":985,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"474066847","text":"# Implement me.\nimport re\n\ndef histo(filename):\n data = None\n try:\n with open(filename, 'r') as f:\n data = f.read()\n except Exception as e:\n return None\n \n print(\"regexing\")\n\n data = re.sub(\"[\\\"\\:;,\\.\\-\\+=/\\\\\\|\\[\\]\\{\\}\\(\\)\\*\\^&]\", \"\", data)\n\n print(\"printing\")\n data = data.split()\n \n counts = {}\n for x in data:\n x = x.lower()\n if x not in counts:\n counts[x] = 0\n counts[x] += 1\n\n counts = sorted(counts.items(), key=lambda x : x[1], reverse=True)\n counts = {x[0]:x[1] for x in counts[:20]}\n\n for key, value in counts.items():\n print(key, end='\\t\\t')\n for x in range(0,value):\n print('#', end='')\n\n print()\n\nfilename = 'robin.txt'\nhisto(filename)\n","sub_path":"applications/histo/histo.py","file_name":"histo.py","file_ext":"py","file_size_in_byte":779,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"256783706","text":"'''\n# Derived from: https://github.com/pytorch/vision/blob/master/torchvision/models/mobilenetv2.py (modified to make it mobilenetv1)\n\n==============================================================================\nTexas Instruments (C) 2018-2019\nAll Rights Reserved\n\nRedistribution and use in source and binary forms, with or without\nmodification, are permitted provided that the following conditions are met:\n\n* Redistributions of source code must retain the above copyright notice, this\n list of conditions and the following disclaimer.\n\n* Redistributions in binary form must reproduce the above copyright notice,\n this list of conditions and the following disclaimer in the documentation\n and/or other materials provided with the distribution.\n\n* Neither the name of the copyright holder nor the names of its\n contributors may be used to endorse or promote products derived from\n this software without specific prior written permission.\n\nTHIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\nAND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\nIMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\nDISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE\nFOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL\nDAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR\nSERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER\nCAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,\nOR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\nOF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\n==============================================================================\nSome parts of the code are borrowed from: https://github.com/pytorch/vision\nwith the following license:\n\nBSD 3-Clause License\n\nCopyright (c) Soumith Chintala 2016,\nAll rights reserved.\n\nRedistribution and use in source and binary forms, with or without\nmodification, are permitted provided that the following conditions are met:\n\n* Redistributions of source code must retain the above copyright notice, this\n list of conditions and the following disclaimer.\n\n* Redistributions in binary form must reproduce the above copyright notice,\n this list of conditions and the following disclaimer in the documentation\n and/or other materials provided with the distribution.\n\n* Neither the name of the copyright holder nor the names of its\n contributors may be used to endorse or promote products derived from\n this software without specific prior written permission.\n\nTHIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\nAND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\nIMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\nDISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE\nFOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL\nDAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR\nSERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER\nCAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,\nOR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\nOF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n'''\n\nimport torch\nimport logging\nimport numpy as np\nimport warnings\nfrom mmcv.runner import BaseModule\n\nfrom mmdet.utils import get_root_logger\nfrom mmdet.models.builder import BACKBONES\nfrom torchvision.edgeailite import xnn\n\n###################################################\n__all__ = ['MobileNetV1LiteBase', 'MobileNetV1Lite', 'mobilenet_v1_lite']\n\n\n###################################################\nclass ModelConfig(xnn.utils.ConfigNode):\n def __init__(self):\n super().__init__()\n self.input_channels = 3\n self.num_classes = None\n self.width_mult = 1.\n self.expand_ratio = 6\n self.strides = (2,2,2,2,2)\n self.activation = xnn.layers.DefaultAct2d\n self.use_blocks = False\n self.kernel_size = 3\n self.dropout = False\n self.linear_dw = False\n self.layer_setting = None\n self.out_indices = None\n self.shortcut_channels = (32,128,256,512,1024)\n self.frozen_stages = 0\n self.extra_channels = None\n self.act_cfg = None\n\n @property\n def shortcut_strides(self):\n encoder_stride = np.prod(self.strides)\n s_strides = (2,4,8,16,encoder_stride)\n return s_strides\n\ndef get_config():\n return ModelConfig()\n\nmodel_urls = {\n 'mobilenet_v1': None,\n}\n\n\nclass MobileNetV1LiteBase(BaseModule):\n def __init__(self, BlockBuilder, model_config, pretrained=None, init_cfg=None):\n \"\"\"\n MobileNet V1 main class\n \"\"\"\n super().__init__(init_cfg)\n\n self.model_config = model_config\n self.num_classes = self.model_config.num_classes\n\n assert not (init_cfg and pretrained), \\\n 'init_cfg and pretrained cannot be setting at the same time'\n if isinstance(pretrained, str):\n warnings.warn('DeprecationWarning: pretrained is deprecated, '\n 'please use \"init_cfg\" instead')\n self.init_cfg = dict(type='Pretrained', checkpoint=pretrained)\n\n # strides of various layers\n s0 = model_config.strides[0]\n s1 = model_config.strides[1]\n s2 = model_config.strides[2]\n s3 = model_config.strides[3]\n s4 = model_config.strides[4]\n\n if self.model_config.layer_setting is None:\n self.model_config.layer_setting = [\n # t, c, n, s\n [1, 32, 1, s0],\n [1, 64, 1, 1],\n [1, 128, 2, s1],\n [1, 256, 2, s2],\n [1, 512, 6, s3],\n [1,1024, 2, s4],\n ]\n\n # only check the first element, assuming user knows t,c,n,s are required\n if len(self.model_config.layer_setting) == 0 or len(self.model_config.layer_setting[0]) != 4:\n raise ValueError(f\"inverted_residual_setting should be non-empty or a 4-element list, got {self.model_config.layer_setting}\")\n\n # some params\n activation = self.model_config.activation\n width_mult = self.model_config.width_mult\n kernel_size = self.model_config.kernel_size\n\n # building first layer\n output_channels = xnn.utils.make_divisible_by8(self.model_config.layer_setting[0][1] * width_mult)\n features = [xnn.layers.ConvNormAct2d(3, output_channels, kernel_size=kernel_size, stride=s0, activation=activation)]\n channels = output_channels\n\n # building inverted residual blocks\n for t, c, n, s in self.model_config.layer_setting[1:]:\n output_channels = xnn.utils.make_divisible_by8(c * width_mult)\n for i in range(n):\n stride = s if i == 0 else 1\n block = BlockBuilder(channels, output_channels, stride=stride, kernel_size=kernel_size, activation=(activation,activation))\n features.append(block)\n channels = output_channels\n #\n #\n\n # building classifier\n if self.model_config.num_classes is not None:\n self.classifier = torch.nn.Sequential(\n torch.nn.Dropout(0.2) if self.model_config.dropout else xnn.layers.BypassBlock(),\n torch.nn.Linear(channels, self.num_classes),\n )\n #\n\n # make it sequential\n self.features = torch.nn.Sequential(*features)\n\n def forward(self, x):\n x = self.features(x)\n xnn.utils.print_once('=> feature size is: ', x.size())\n x = torch.nn.functional.adaptive_avg_pool2d(x, (1,1))\n x = torch.flatten(x, 1)\n x = self.classifier(x)\n return x\n\n\n@BACKBONES.register_module\nclass MobileNetV1Lite(MobileNetV1LiteBase):\n def __init__(self, pretrained=None, init_cfg=None, **kwargs):\n model_config = get_config()\n for key, value in kwargs.items():\n if key == 'model_config':\n model_config = model_config.merge_from(value)\n elif key in ('out_indices', 'strides', 'extra_channels', 'frozen_stages', 'act_cfg'):\n setattr(model_config, key, value)\n #\n super().__init__(xnn.layers.ConvDWSepNormAct2d, model_config, pretrained=pretrained, init_cfg=init_cfg)\n\n self.extra = self._make_extra_layers(1024, self.model_config.extra_channels) \\\n if self.model_config.extra_channels else None\n\n # weights init\n xnn.utils.module_weights_init(self)\n\n # def init_weights(self, pretrained=None):\n # if pretrained is not None:\n # assert isinstance(pretrained, str), f'Make sure that the pretrained is correct. Got: {pretrained}'\n # logger = get_root_logger()\n # load_checkpoint(self, pretrained, strict=False, logger=logger)\n # else:\n # warnings.warn('No pretrained is provided.')\n\n def forward(self, x):\n if self.num_classes is not None:\n x = super().forward(x)\n return x\n else:\n in_shape = x.shape\n x_list = []\n for layer in self.features:\n x = layer(x)\n x_list.append(x)\n #\n out = []\n shortcut_strides = self.model_config.shortcut_strides\n for s_stride, short_chan in zip(shortcut_strides, self.model_config.shortcut_channels):\n shape_s = xnn.utils.get_shape_with_stride(in_shape, s_stride)\n shape_s[1] = short_chan\n # do not want this to be traced by jit\n shape_s = [int(s) for s in shape_s]\n x_s = xnn.utils.get_blob_from_list(x_list, shape_s)\n out.append(x_s)\n\n if self.model_config.out_indices is not None:\n selected_out = []\n for i, o in enumerate(out):\n if i in self.model_config.out_indices:\n selected_out.append(o)\n #\n else:\n selected_out = out\n #\n if self.extra:\n for layer in self.extra:\n x = layer(x)\n selected_out.append(x)\n #\n #\n if self.model_config.frozen_stages>0:\n selected_out = [o.detach() for o in selected_out]\n #\n return selected_out\n #\n\n\n def _make_extra_layers(self, inplanes, outplanes, kernel_size=3):\n act_cfg = self.model_config.act_cfg\n act_dw = (act_cfg is None) or ('act_dw' not in act_cfg) or act_cfg['act_dw']\n extra_layers = []\n for i, out_ch in enumerate(outplanes):\n activation = (act_dw, True)\n layer = xnn.layers.ConvDWSepNormAct2d(inplanes, out_ch, stride=2, kernel_size=kernel_size, activation=activation)\n extra_layers.append(layer)\n inplanes = out_ch\n #\n return torch.nn.Sequential(*extra_layers)\n\n\n#######################################################################\ndef mobilenet_v1_lite(pretrained=False, progress=True, **kwargs):\n \"\"\"\n Constructs a MobileNetV1\n Args:\n pretrained (bool): If True, returns a model pre-trained on ImageNet\n progress (bool): If True, displays a progress bar of the download to stderr\n \"\"\"\n model = MobileNetV1(**kwargs)\n if pretrained:\n state_dict = xnn.utils.load_state_dict_from_url(model_urls['mobilenet_v1'], progress=progress)\n model.load_state_dict(state_dict)\n return model\n","sub_path":"mmdet/models/backbones/mobilenetv1_lite.py","file_name":"mobilenetv1_lite.py","file_ext":"py","file_size_in_byte":11751,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"538843753","text":"import unittest\nfrom glob import glob\nfrom .OpcodeComputer import Computer\n\n\nclass OpcodeComputerTestCase(unittest.TestCase):\n def test_day02_tests(self):\n for fl in glob(\"day05/data/day02test*\"):\n with open(fl, \"r\") as fh:\n test_data = fh.read().splitlines()\n computer = Computer(test_data[0])\n computer.run()\n self.assertEqual([int(val) for val in test_data[1].split(\",\")], computer.program)\n\n def test_day05_tests(self):\n for fl in glob(\"day05/data/day05test*\"):\n with open(fl, \"r\") as fh:\n test_data = fh.read().splitlines()\n computer = Computer(test_data[0])\n computer.run()\n self.assertEqual([int(val) for val in test_data[1].split(\",\")], computer.program)\n\n def test_inputoutput(self):\n computer = Computer(\"3,0,4,0,99\")\n computer.add_input(1337)\n computer.run()\n self.assertEqual(1337, computer.output)\n\n def test_day05_part01_fullprogram(self):\n with open(\"day05/data/full_program\", \"r\") as fh:\n test_data = fh.read()\n computer = Computer(test_data)\n computer.add_input(1)\n computer.run()\n self.assertEqual(5346030, computer.output)\n\n def test_day05_part02_equality(self):\n with open(\"day05/data/day05equalitytest01\", \"r\") as fh:\n for test_data in fh.read().splitlines():\n test_cases = zip([7, 8, 9], [0, 1, 0])\n for test_input, expected_outcome in test_cases:\n computer = Computer(test_data)\n computer.add_input(test_input)\n computer.run()\n self.assertEqual(expected_outcome, computer.output)\n\n def test_day05_part02_inequality(self):\n with open(\"day05/data/day05equalitytest02\", \"r\") as fh:\n for test_data in fh.read().splitlines():\n test_cases = zip([7, 8, 9], [1, 0, 0])\n for test_input, expected_outcome in test_cases:\n computer = Computer(test_data)\n computer.add_input(test_input)\n computer.run()\n self.assertEqual(expected_outcome, computer.output)\n\n def test_day05_part02_jumps(self):\n with open(\"day05/data/day05jumptests\", \"r\") as fh:\n for test_data in fh.read().splitlines():\n test_cases = zip([0, 1, -1], [0, 1, 1])\n for test_input, expected_outcome in test_cases:\n computer = Computer(test_data)\n computer.add_input(test_input)\n computer.run()\n self.assertEqual(expected_outcome, computer.output)\n\n def test_day05_part02_bigtest(self):\n with open(\"day05/data/day05part02test\", \"r\") as fh:\n test_data = fh.read()\n test_cases = zip([7, 8, 9], [999, 1000, 1001])\n for test_input, expected_outcome in test_cases:\n computer = Computer(test_data)\n computer.add_input(test_input)\n computer.run()\n self.assertEqual(expected_outcome, computer.output)\n\n\nif __name__ == '__main__':\n unittest.main()\n","sub_path":"day05/tests.py","file_name":"tests.py","file_ext":"py","file_size_in_byte":3216,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"178563465","text":"from PIL import ImageFile\nimport os\nimport numpy as np\nimport pickle\ndef get_img_size(file_path):\n ImPar=ImageFile.Parser()\n with open(file_path, \"rb\") as f:\n ImPar=ImageFile.Parser()\n chunk = f.read(2048)\n count=2048\n while chunk != \"\":\n ImPar.feed(chunk)\n if ImPar.image:\n break\n chunk = f.read(2048)\n count+=2048\n #print(ImPar.image.size)\n #print(count)\n return ImPar.image.size\n\n\nfile_name_list = list()\nimg_sizes = np.zeros([18038,2])\nif __name__ == \"__main__\":\n count = 0\n for root, dirs, files in os.walk(\"../dataset_copy\"):\n path = root.split(os.sep)\n for file in files:\n if '.jpg' in str(file).lower():\n print(len(path) * '---', file)\n file_path = os.path.join(root,file)\n img_size = get_img_size(file_path)\n img_sizes[count] = np.array(img_size)\n file_name_list.append(file_path)\n count += 1\n print(count)\n #pickle.dump(img_sizes, open('../data/img_sizes.p','wb'))\n #pickle.dump(file_name_list, open('../data/file_name_list.p','wb'))\n\n\n #img_sizes = pickle.load(open(\"../data/img_sizes.p\", 'rb'))\n #file_name_list = pickle.load(open('../data/file_name_list.p','rb'))\n","sub_path":"code/imagesize.py","file_name":"imagesize.py","file_ext":"py","file_size_in_byte":1327,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"627478145","text":"import numpy as np\nimport cv2\nimport matplotlib.pyplot as plt\nimport time\n\nfrom helpers import BrightnessAndContrastAuto, MorphClose\n\n\n\n\n\ndef onStonesTb(self):\n thStone = cv2.getTrackbarPos('thStone', 'Stones')\n thShadow = cv2.getTrackbarPos('thShadow', 'Stones')\n minSizeMM = cv2.getTrackbarPos('Size(mm)', 'Stones')\n\n h, w = image.shape[:2]\n\n blur = cv2.GaussianBlur(image, (3, 3), 0)\n # cv2.imshow(\"blur\", blur)\n\n # imgray = cv2.cvtColor(blur, cv2.COLOR_BGR2GRAY)\n\n\n\n saturation = MorphClose(blur, 1)\n saturation = BrightnessAndContrastAuto(saturation, 0)\n cv2.imshow(\"BrightnessAndContrastAuto\", saturation)\n\n thres = 100\n cont = []\n while not cont:\n ret, bwStones = cv2.threshold(saturation, thres, 255, cv2.THRESH_BINARY_INV)\n\n k = 6\n\n bwStones[:k, :] = 255\n bwStones[bwStones.shape[0] - k:, k:] = 255\n bwStones[:, :k] = 255\n bwStones[k:, bwStones.shape[1] - k:] = 255\n\n # cv2.imshow(\"threshold\", bwStones)\n\n contours_stone, hierarchy = cv2.findContours(bwStones, cv2.RETR_CCOMP, cv2.CHAIN_APPROX_SIMPLE)\n # c = max(contours_stone, key=cv2.contourArea)\n\n for c in contours_stone:\n if cv2.contourArea(c) > 2500:\n ex = False\n for x_y in c:\n if x_y[0][0] == h or x_y[0][1] == w or x_y[0][0] == 0 or x_y[0][1] == 0:\n ex = True\n if not ex:\n cont.append(c)\n thres += 1\n\n if thres > 255:\n break\n\n cv2.drawContours(saturation, cont, -1, (0, 0, 255), 2)\n # cv2.imshow(\"bwStones\", saturation)\n\n # blur2 = cv2.GaussianBlur(image, (0, 0), 2, 2)\n\n # hsv_image = cv2.cvtColor(blur, cv2.COLOR_BGR2HSV)\n #\n # saturation = hsv_image[..., 1]\n # brightness = hsv_image[..., 2]\n #\n # minSizePx = round(minSizeMM / pix2mm)\n # closerSize = minSizePx / 2.0\n #\n # # saturation = MorphClose(saturation, closerSize)\n # saturation = BrightnessAndContrastAuto(saturation, 1)\n #\n # ret, bwStones = cv2.threshold(saturation, thStone, 255, cv2.THRESH_BINARY_INV)\n # # cv2.imshow(\"threshold\", bwStones)\n #\n # # contours_stone, hierarchy = cv2.findContours(bwStones, cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE)\n # # c = max(contours_stone, key=cv2.contourArea)\n # cv2.drawContours(saturation, contours_stone, -1, (0, 0, 255), 2)\n # # cv2.imshow(\"Threshold Stones+Shadow on Saturation\", saturation)\n #\n # # if removeShadow:\n # # brightness = MorphClose(brightness, closerSize)\n # brightness = BrightnessAndContrastAuto(brightness, 1)\n #\n # ret, bwShadow = cv2.threshold(brightness, thShadow, 255, cv2.THRESH_BINARY)\n # # contours.clear()\n #\n # bwShadow[:k, :] = 255\n # bwShadow[bwStones.shape[0] - k:, k:] = 255\n # bwShadow[:, :k] = 255\n # bwShadow[k:, bwShadow.shape[1] - k:] = 255\n #\n #\n # contours_shadow, hierarchy = cv2.findContours(bwShadow, cv2.RETR_CCOMP, cv2.CHAIN_APPROX_SIMPLE)\n # cv2.drawContours(brightness, contours_shadow, -1, (0, 0, 255), 2)\n # # cv2.imshow(\"Threshold Shadow on Brightness\", brightness)\n #\n # cv2.bitwise_not(bwShadow, bwStones, bwStones)\n # # cv2.imshow(\"bwStones\", bwStones)\n\n dst = image.copy()\n # contours, hierarchy = cv2.findContours(bwStones, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)\n # cv2.drawContours(dst, contours_stone + contours_shadow, -1, (255, 255, 255), 2)\n # c = max(cont, key=cv2.contourArea)\n cv2.drawContours(dst, cont, -1, (0, 255, 0), 2)\n\n # for i in range(len(contours)):\n # cv2.polylines(dst, contours[i], True, (0, 255, 0), 2)\n\n cv2.imshow(winName, dst)\n cv2.imwrite('result/1.jpg', dst)\n\n\nif __name__ == '__main__':\n knowDistancePX = 170\n knowDistanceMM = 150\n pix2mm = knowDistancePX / knowDistanceMM\n\n image = cv2.imread('data/stone2.jpg')\n # pad_size = 8\n # image = cv2.copyMakeBorder(\n # image, pad_size, pad_size, pad_size, pad_size, borderType=cv2.BORDER_CONSTANT, value=[0, 0, 0])\n\n # set defaults\n minSizeMM = 80 # width in millimetre\n thStone = 100 # max saturation for stones\n thShadow = 128 # max brightness for shadow\n removeShadow = 1 # try to remove shadows (1=Yes 0=No)\n\n winName = 'Stones'\n cv2.imshow(winName, image)\n # cv2.createTrackbar(\"Size(mm)\", winName, minSizeMM, 100, onStonesTb)\n # cv2.createTrackbar(\"thStone\", winName, thStone, 255, onStonesTb)\n # cv2.createTrackbar(\"thShadow\", winName, thShadow, 255, onStonesTb)\n # cv2.createTrackbar(\"remove Shadow\", winName, removeShadow, 1, onStonesTb)\n\n\n\n\n\n\n onStonesTb(1)\n cv2.waitKey(0)\n","sub_path":"src/draw_contours4.py","file_name":"draw_contours4.py","file_ext":"py","file_size_in_byte":4661,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"158236060","text":"import time #needed for later\n\n\n# Project euler problem 5 is:\n# 2520 is the smallest number that can be divided by\n# each of the numbers from 1 to 10 without any remainder.\n# What is the smallest positive number that is evenly\n# divisible by all of the numbers from 1 to 20?\n# (https://projecteuler.net/problem=5)\n\n# Here is one way to solve the problem:\n# 1. guess that the answer is n\n# 2a. Check if guess is divisible by 1.\n# 2b. Check if guess is divisible by 2.\n# 2... Keep checking for divisibility until reaching n\n# 3a. If at any point guess is not divisble, increase guess\n# by 1, go back to step 2.a\n# 3b. If guess is divisble up to n, return guess.\n\n# The following function follows the above algorithm:\ndef euler5(n):\n guess = n\n max_divisor = 1\n while max_divisor <= n:\n if guess % max_divisor == 0:\n max_divisor+= 1\n else:\n guess+= 1\n max_divisor = 1\n return guess\nanswer = euler5(10)\nprint(\"euler5(10): \", answer)\n\n#==================================================\n# Problem 0\n\n# We want to be able to discuss how long this function takes to run.\n# We can measure this in time, or in number of loop iterations.\n# Let us start with loop iterations\n\n# Below is the euler5 function, modify it such that before it ends\n# it prints out the total number of loops needed to get the answer.\n\ndef euler5_loop_count(n):\n guess = n\n max_divisor = 1\n count = 0 #==================\n while max_divisor <= n:\n count+= 1\n if guess % max_divisor == 0:\n max_divisor+= 1\n else:\n guess+= 1\n max_divisor = 1\n print(\"loop count: \", count) #==================\n return guess\n# To test, use this, it should take 25 loops to get the answer\n# which is 12\nanswer = euler5_loop_count(4)\nprint(\"euler5(4): \", answer)\n\n# End Problem 0\n#==================================================\n\n#==================================================\n# Problem 1\n\n# We can also talk about how long a fucntion takes measured in seconds.\n# time.time() will return the number of seconds since 1/1/1970. This is\n# known as EPOCH time.\n\n# Below are 2 calls to euler5_loop_count. Add code above and below\n# each call to display the number of seconds taken to run each.\n\n# YOUR CODE HERE\nprint(\"Timing slow version:\")\nstart = time.time()\nanswer = euler5_loop_count(10)\nprint(\"euler5(10): \", answer)\n# YOUR CODE HERE\nprint(\"elapsed time: \", (time.time() - start))\n\n\n# YOUR CODE HERE\n#start = time.time()\n#answer = euler5_loop_count(20)\n#print(\"euler5(20): \", answer)\n# YOUR CODE HERE\n#print(\"elapsed time: \", (time.time() - start))\n\n# End Problem 1\n#==================================================\n\n\n#==================================================\n# Problem 2\n\n# Hopefully, you noticed that the loop count and time to\n# get the answer for 20 were both quite large.\n\n# Now we can look at the algorithm from the begining and\n# ask ourselves: can we make it faster?\n\n# Below is a copy of euler_5_loop_count, first, add\n# the code to count and print the number of loops from\n# Problem 0\n\n# Then, try to decrease the number of loops run.\n# Think about what you need to do each time to\n# have to reset your guess. Can you make it better?\n\n# An initial goal should be to get the loop count for\n# 20 down to 416,181,955.\n\n# But eventually, you should be able to get the loop count\n# for 20 down to 51,473,642.\ndef euler5_better(n):\n guess = n\n max_divisor = 1\n count = 0 #==================\n while max_divisor <= n:\n count+= 1\n if guess % max_divisor == 0:\n max_divisor+= 1\n else:\n guess+= n\n max_divisor = 1\n print(\"loop count: \", count) #==================\n return guess\n\nprint(\"\\nBetter version:\")\n# Dont forget to include time code as well\n# YOUR CODE HERE\nstart = time.time()\nanswer = euler5_better(20)\nprint(\"euler5(20): \", answer)\n# YOUR CODE HERE\nprint(\"elapsed time: \", (time.time() - start))\n\n\n#==================================================\n# An advanced approach\n\n# Perhaps the number above is not good enough for you.\n# Well, try this on for size!\n\n# This function will find the least common multiple of\n# 2 integers.\ndef lcm(max_divisor, guess):\n m = guess\n while m % max_divisor != 0:\n m+= guess\n return m\n\n# Can you use this to make your solution for euler5\n# even better?\n# Write a new version, and test it, measuring\n# both time and loop counts.\n# To keep track of loop counts, you may need\n# to consolodate the lcm function into your main\n# euler5 function.\n\n\ndef euler_5_2(n):\n guess = n\n max_divisor = 1\n while max_divisor<= n:\n guess = lcm(max_divisor, guess)\n max_divisor+= 1\n return guess\n#print(euler_5_2(2500))\n\n\n\n#lcm loop built into function\ndef euler_5_3(n):\n guess = n\n max_divisor = 1\n count = 0\n while max_divisor<= n:\n count+= 1\n new_guess = guess\n while new_guess % max_divisor != 0:\n count+= 1\n new_guess+= guess\n guess = new_guess\n max_divisor+= 1\n print(\"loop count: \", count)\n return guess\n\nprint(\"\\nBest version:\")\nstart = time.time()\nprint(euler_5_3(20))\nprint(\"elapsed time: \", (time.time() - start))\n\n\n\n# End advanced approach\n#==================================================\n","sub_path":"efficiency_lab_solved.py","file_name":"efficiency_lab_solved.py","file_ext":"py","file_size_in_byte":5340,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"245375334","text":"#!/usr/bin/python3\n\"\"\"almost a circle\"\"\"\nimport json\nimport os\n\n\nclass Base:\n\n \"\"\"base class\"\"\"\n\n __nb_objects = 0\n\n def __init__(self, id=None):\n if id:\n self.id = id\n else:\n Base.__nb_objects += 1\n self.id = Base.__nb_objects\n\n @staticmethod\n def to_json_string(list_dictionaries):\n \"\"\"encode the dictionaries to json string\"\"\"\n\n if list_dictionaries is None or list_dictionaries == {}:\n return \"[]\"\n else:\n return json.dumps(list_dictionaries)\n\n @staticmethod\n def from_json_string(json_string):\n \"\"\"decode the json string to a list of dictionaries\"\"\"\n\n if json_string is None or json_string == '':\n return []\n else:\n return json.loads(json_string)\n\n @classmethod\n def load_from_file(cls):\n \"\"\"class method for load from from\"\"\"\n file_path = cls.__name__ + \".json\"\n if not os.path.exists(file_path):\n return []\n else:\n with open(file_path, 'r') as h:\n list = cls.from_json_string(h.read())\n list_rect = []\n for rect in list:\n list_rect.append(cls.create(**rect))\n return list_rect\n\n @classmethod\n def save_to_file(cls, list_objs):\n \"\"\"class method to save a file\"\"\"\n\n if list_objs is None:\n list_objs = []\n json_string_list = []\n for obj in list_objs:\n json_string_list.append(obj.to_dictionary())\n json_string = cls.to_json_string(json_string_list)\n with open(cls.__name__ + \".json\", \"w\") as f:\n f.write(json_string)\n\n @classmethod\n def create(cls, **dictionary):\n \"\"\"creat a dummy instance\"\"\"\n\n if cls.__name__ == 'Rectangle':\n dummy = cls(1, 1)\n dummy.update(**dictionary)\n if cls.__name__ == 'Square':\n dummy = cls(1)\n dummy.update(**dictionary)\n return dummy\n","sub_path":"0x0C-python-almost_a_circle/models/base.py","file_name":"base.py","file_ext":"py","file_size_in_byte":2019,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"551263539","text":"import numpy as np\nimport cv2 as cv\nfrom objloader_simple import *\n# import matplotlib.pyplot as plt\n\n# Load previously saved data\nwith np.load('R1.npz') as X:\n mtx, dist, _, _ = [X[i] for i in ('mtx','dist','rvecs','tvecs')]\n\n\n\ndef draw(img, corners, imgpts):\n corner = tuple(corners[0].ravel())\n img = cv.line(img, corner, tuple(imgpts[0].ravel()), (255,0,0), 5)\n img = cv.line(img, corner, tuple(imgpts[1].ravel()), (0,255,0), 5)\n img = cv.line(img, corner, tuple(imgpts[2].ravel()), (0,0,255), 5)\n return img\ndef draw2(img, corners, imgpts):\n imgpts = np.int32(imgpts).reshape(-1,2)\n # draw ground floor in green\n img = cv.drawContours(img, [imgpts[:4]],-1,(0,255,0),-3)\n # draw pillars in blue color\n for i,j in zip(range(4),range(4,8)):\n img = cv.line(img, tuple(imgpts[i]), tuple(imgpts[j]),(255),3)\n # draw top layer in red color\n img = cv.drawContours(img, [imgpts[4:]],-1,(0,0,255),3)\n return img\n\ncriteria = (cv.TERM_CRITERIA_EPS + cv.TERM_CRITERIA_MAX_ITER, 30, 0.001)\nobjp = np.zeros((6*7,3), np.float32)\nobjp[:,:2] = np.mgrid[0:7,0:6].T.reshape(-1,2)\n# axis = np.float32([[3,0,0], [0,3,0], [0,0,-3]]).reshape(-1,3)\naxis = np.float32([[0,0,0], [0,3,0], [3,3,0], [3,0,0],[0,0,-3],[0,3,-3],[3,3,-3],[3,0,-3] ])\n\n\ncap = cv.VideoCapture(2)\nimg1 = cv.imread('fotos/box1.jpg',cv.IMREAD_GRAYSCALE) # queryImage\n\nwhile(True):\n # Capture frame-by-frame\n ret, img2 = cap.read()\n img2Gray = cv.cvtColor(img2, cv.COLOR_BGR2GRAY)\n\n\n # Display the resulting frame\n \n if cv.waitKey(1) & 0xFF == ord('q'):\n break\n \n \n \n # Initiate ORB detector\n orb = cv.ORB_create()\n # find the keypoints and descriptors with ORB\n kp1, des1 = orb.detectAndCompute(img1,None)\n kp2, des2 = orb.detectAndCompute(img2Gray,None)\n pts = cv.KeyPoint_convert(kp1)\n pts3d = np.insert(pts, 2, 1, axis=1)\n # print(\"kp1.x\")\n # print(len(pts))\n\n pts2d = cv.KeyPoint_convert(kp2)\n\n\n # create BFMatcher object\n bf = cv.BFMatcher(cv.NORM_HAMMING, crossCheck=True)\n # Match descriptors.\n matches = bf.match(des1,des2)\n # Sort them in the order of their distance.\n matches = sorted(matches, key = lambda x:x.distance)\n # for m,n in matches:\n # if m.distance < 0.7*n.distance:\n # good.append(m)\n\n\n \n limite = 0\n distanciaMaxima = 40\n a = []\n b = []\n for i in range(len(matches)):\n a.append(matches[i].trainIdx)\n b.append(matches[i].queryIdx)\n # print(matches[i].distance)\n if(matches[i].distance < distanciaMaxima):\n limite = i-1\n\n # print(\"b:\")\n # print(b)\n\n pts2dd = np.asarray(pts2d) \n pts3dd = np.asarray(pts3d) \n\n pts2dd = pts2dd[a]\n pts3dd = pts3dd[b]\n\n\n\n # Draw first 10 matches.\n img3 = cv.drawMatches(img1,kp1,img2Gray,kp2,matches[:20],None,flags=cv.DrawMatchesFlags_NOT_DRAW_SINGLE_POINTS)\n print(limite)\n if limite > 5:\n print(\"RA\")\n ret,rvecs, tvecs = cv.solvePnP(pts3dd[:limite], pts2dd[:limite], mtx, dist)\n width = img1.shape[0]\n heigth = img1.shape[1]\n axis1 = np.float32([[0,0,1],[0,width,1],[heigth,0,1],[heigth,width,1]])\n # axis1 = np.float32([[0,0,1],[0,img1.width,1]])\n imgpts, jac = cv.projectPoints(axis1, rvecs, tvecs, mtx, dist)\n # print(imgpts[1][0][1])\n # print(imgpts[0][0][1])\n largura = int(imgpts[1][0][1] - imgpts[0][0][1])\n altura = int(imgpts[3][0][1] - imgpts[2][0][1])\n # print(largura)\n # print(altura)\n # cv.rectangle(img3,(imgpts[0][0][0],imgpts[0][0][1]),(imgpts[3][0][0],imgpts[3][0][1]),(0,255,0),3)\n if(int(imgpts[0][0][1]) > -10 and int(imgpts[0][0][1]) < 1000):\n print(int(imgpts[0][0][1]))\n # cv.rectangle(img2,(imgpts[0][0][0],imgpts[0][0][1]),(imgpts[3][0][0],imgpts[3][0][1]),(0,255,0),3)\n # project 3D points to image plane\n imgpts, jac = cv.projectPoints(10*axis, rvecs, tvecs, mtx, dist) \n img2 = draw2(img2,pts2d,imgpts)\n cv.imwrite('frame.png',img2)\n cv.imwrite('frameSaida.png',img3)\n print(\"frame\")\n cv.imshow('frame',img2)\n\n# When everything done, release the capture\ncap.release()\ncv.destroyAllWindows()\n\n\n\n","sub_path":"opencv/Realidade-Aumentada-master/src/webcam2.py","file_name":"webcam2.py","file_ext":"py","file_size_in_byte":4239,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"296045307","text":"def run_bd_series(code_type=\"Arepo_ENERGY\",res=\"256_20Mpc\",sub_id=234):\n\timport numpy as np\n\timport readsubf\n\timport os\n\timport bd\n\treload(bd)\n\n\timport glob\n\n\t#############\n\t#Control Parameters\n\tbase_folder = \"/n/hernquistfs1/mvogelsberger/ComparisonProject/\"\n\tout_folder = \"/output\"\n\t\n\tsnap_num = 314\n\n\t# Interactive mode?\n\tinteract = 0\n\n\t# Restrict analysis to only primary subhalos in given group?\n\tfirst_subs_only = 1\n\n\t# Analyze only subhalo matches?\n\tmatching = 0\n\n\t# Overwrite past analysis?\n\toverw = 1\n\n\n\t############################################################################################\n\tresnap = code_type+\"_\"+res+\"_resnap\"+str(snap_num).zfill(3)+'.dat'\n\tsnap_type = 1\n\n\tout_folder = \"/output\"\n\tbase = base_folder + res + \"/\" + code_type + out_folder\n\n\tsave_dir = code_type[0]+res+\"_test\"+\"/\"\n\tif not os.path.exists(save_dir):\n\t\tos.system('mkdir '+save_dir)\n\n\thalo = bd.calc_diskfrac(code_type=code_type,res=res,snap_num=snap_num,snap_type=snap_type, sub_id=sub_id,\\\n\tsave_dir=save_dir,snap_name=resnap,base=base,interact=interact)\n\n\treturn halo\n\t\n\n\nif __name__ == '__main__':\n run_bd_series()\n","sub_path":"Arepo-Gadget Disk Comparisons/bd_check.py","file_name":"bd_check.py","file_ext":"py","file_size_in_byte":1117,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"47702679","text":"from 全局变量辅助 import *\nfrom 基础函数 import *\nfrom 变量 import *\nimport logging\nlogging.basicConfig(level=logging.INFO)\n\n#参考原C++军旗代码,python3.7复刻框架\n#概率表是完全复刻的,包括刷新机制和初始值与原来的一样\n#框架绝大部分采用中文编程,方便快速入门与维护(未来将写成维护手册),整个程序就是大一个注释\n#构建时间 19.7.24 -> ?\n#最后维护 19.8.2\n\n###本来除了行棋之外的东西在第2天结束的时候就写完了,项目迟迟没有发布一直拖到现在原因是:\n# Upper Confidence Bound Apply to Tree 真难\n# 本以为UCT和最大最小搜索差不多\n# 在粗略看完wls给的论文后夸下海口,\"看我在三天内让他跑起来”\n# 没想到在后续的工作进程中遇到了巨大的坎坷\n# 从0开始学习MCsl、UCB耗费了不少时间,(主要是因为一年多没学数学)\n# 然后在开源社区找了大量的UCT板子\n# 都是基于类围棋那种布子类的,局势比较单调\n# 反复研读资料,终于,及众家之所长,造了一个板子出来,\n# 虽然还不能跑,不过至少离成功又近了一步\n# 假期还有其他项目要写,上面监督的比较紧,UCT的研究进度要暂停一段时间了\n\n\n# 任何人可以随意修改框架(本体在github),希望有有志人士可以帮我提前完成他\n# github的本体是实时同步更新,如果取得了较大的进展,会尽可能第一时间同步到coding上(如果有机会的话)\n# 本项目基于GLWTPL许可证开源(https://github.com/me-shaon/GLWTPL)\n# 项目本体于github私人仓库(https://github.com/DOEMsy/JunQi),进不去因为没有开放\n\n#��译: python 3.7\n#编辑:vscode\n#不是很推荐用vs2017/2019修改本项目,无厘头的报错\n#本项目的全局变量采用全局dict,在引用的时候一定要相信自己的操作,不要盲目的相信IDE补全\n#除行棋以外的功能均已测试过了,可以跑,而且跑下来的问题均已修复\n\n#def main:\n输入消息 = val('输入消息',input().split())\nwhile 输入消息[0]!='END':\n if 输入消息[0] == 'INFO':\n 信息处理INFO()\n print(val('输出消息'))\n elif 输入消息[0] == 'START':\n 反馈布局START()\n print(val('输出消息'))\n #log######\n 概率表log()\n elif 输入消息[0] == 'GO':\n 信息处理GO指令()\n #行棋 : 复刻当前棋局和概率表 ->>{ 抛针得到完备棋局 -> UTC算法得到最优解([起点,终点]) } -> 选择重复最多的最优解 \n 行棋函数驱动()\n print(val('输出消息'))\n #log#####\n 概率表log()\n val('棋盘').display()\n elif 输入消息[0] == 'RESULT':\n 信息处理RESULT指令()\n elif 输入消息[0] == 'END':\n break\n else:\n break\n \n 输入消息 = val('输入消息',input().split())\n\n","sub_path":"基于UCT的军棋博弈/codes/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2936,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"126135507","text":"# -*- coding:utf-8 -*-\nimport datetime\nimport math\nimport time\nfrom math import *\nfrom PIL import Image, ImageDraw, ImageFont, ImageFilter\nimport random\nimport glob\nimport numpy as np\nimport os\nimport cv2\nimport alphabet\n\nalphabet_str = alphabet.alphabet\n\n\n# 从文字库中随机选择n个字符\n\ndef sto_choice_from_info_str(quantity=7):\n random_str = \"\"\n chinese = \"皖沪津渝冀晋蒙辽吉黑苏浙京闽赣鲁豫鄂湘粤桂琼川贵云藏陕甘青宁新港澳\"\n english = \"ABCDEFGHJKLMNPQRSTUVWXYZ\"\n numbers = \"0123456789\"\n random_str += random.choice(alphabet_str)\n random_str += random.choice(alphabet_str)\n\n\n for i in range(2, quantity):\n if random.random() > 0.5:\n random_str += random.choice(alphabet_str)\n else:\n random_str += random.choice(alphabet_str)\n\n # print(random_str)\n return random_str\n\n\ndef random_word_color(random_back_color):\n if random_back_color == 0 or random_back_color == 1:\n font_color = (255, 255, 255)\n noise = np.array([random.randint(0, 50), random.randint(0, 50), random.randint(0, 50)])\n font_color = (np.array(font_color) - noise).tolist()\n\n else:\n font_color = (0, 0, 0)\n noise = np.array([random.randint(0, 50), random.randint(0, 50), random.randint(0, 50)])\n font_color = (np.array(font_color) + noise).tolist()\n return tuple(font_color)\n\n\ndef cut_img(img, x, y):\n \"\"\"\n 函数功能:进行图片裁剪(从中心点出发)\n :param img: 要裁剪的图片\n :param x: 需要裁剪的宽度\n :param y: 需要裁剪的高\n :return: 返回裁剪后的图片\n \"\"\"\n x_center = random.randint(x // 2, img.size[0] - x // 2)\n y_center = random.randint(y // 2, img.size[1] - y // 2)\n new_x1 = x_center - x//2\n new_y1 = y_center - y//2\n new_x2 = x_center + x//2\n new_y2 = y_center + y//2\n new_img = img.crop((new_x1, new_y1, new_x2, new_y2))\n return new_img\n\n\n# 生成一张图\ndef create_an_image(bground_path, random_back_color):\n bground_list = os.listdir(bground_path)\n bground_list.remove(\"noise\")\n noise_path = bground_path + \"noise/\"\n noise_list = os.listdir(noise_path)\n noise_choice = random.choice(noise_list)\n bground = Image.open(bground_path + bground_list[random_back_color]).convert(\"RGB\")\n noise = Image.open(noise_path + noise_choice).convert(\"RGB\")\n bground = bground.resize((140, 32))\n noise1 = cut_img(noise, 140, 32)\n bground1 = cut_img(bground, 140, 32)\n final_img2 = Image.blend(bground1, noise1, 0.3)\n return final_img2\n\n\n# 选取作用函数\ndef random_choice_in_process_func():\n pass\n\n\n# 模糊函数\ndef darken_func(image):\n # .SMOOTH\n # .SMOOTH_MORE\n # .GaussianBlur(radius=2 or 1)\n # .MedianFilter(size=3)\n # 随机选取模糊参数\n\n filter_ = random.choice(\n [\n ImageFilter.BLUR,\n ImageFilter.MedianFilter(size=3),\n ImageFilter.SMOOTH,\n ImageFilter.SMOOTH_MORE]\n )\n\n image = image.filter(filter_)\n\n return image\n\n\n# 随机选取文字贴合起始的坐标 根据背景的尺寸和字体的大小选择\ndef random_x_y_1(bground_size, font_size):\n width, height = bground_size\n # 为防止文字溢出图片,x,y要预留宽\n x = random.randint(0, max(int(width - font_size * 7), 0))\n y = random.randint(0, int((height - font_size) / 2))\n return x, y\n\n\ndef random_font_size():\n font_size = random.randint(17, 22)\n return font_size\n\n\ndef random_font():\n font_path = './font/Chinese/'\n\n font_list = os.listdir(font_path)\n random_font = random.choice(font_list)\n\n return font_path + random_font\n\n\ndef rotate_img(img, angle_range=10):\n height, width = img.shape[:2]\n heightNew = int(width * fabs(sin(radians(angle_range))) + height * fabs(cos(radians(angle_range))))\n widthNew = int(height * fabs(sin(radians(angle_range))) + width * fabs(cos(radians(angle_range))))\n\n matRotation = cv2.getRotationMatrix2D((width / 2, height / 2), angle_range, 1)\n\n matRotation[0, 2] += (widthNew - width) / 2\n matRotation[1, 2] += (heightNew - height) / 2\n imgRotation = cv2.warpAffine(img, matRotation, (widthNew, heightNew), borderValue=(0, 0, 0))\n return imgRotation\n\n\ndef sp_noise(image,prob):\n '''\n 添加椒盐噪声\n prob:噪声比例\n '''\n output = np.zeros(image.shape,np.uint8)\n thres = 1 - prob\n for i in range(image.shape[0]):\n for j in range(image.shape[1]):\n rdn = random.random()\n if rdn < prob:\n output[i][j] = 0\n elif rdn > thres:\n output[i][j] = 255\n else:\n output[i][j] = image[i][j]\n return output\n\n\ndef crop_img(img):\n pts1 = np.float32([[0, 0], [140, 0], [0, 32], [140, 32]])\n x1 = random.randint(-3, 10)\n y1 = random.randint(-3, 3)\n x2 = random.randint(-3, 10)\n y2 = random.randint(-3, 3)\n x3 = random.randint(-3, 10)\n y3 = random.randint(-3, 3)\n x4 = random.randint(-3, 10)\n y4 = random.randint(-3, 3)\n pts2 = np.float32([[0 + x1, 0 + y1], [140 - x2, 0 + y2], [0 + x3, 32 - y3], [140 - x4, 32 - y4]])\n M = cv2.getPerspectiveTransform(pts1, pts2)\n img_crop = cv2.warpPerspective(img, M, (140, 32))\n return img_crop\n\ndef main(save_path, num):\n random_word = sto_choice_from_info_str()\n # print(random_word)\n # 生成一张背景图片,已经剪裁好,宽高32*280\n random_back_color = random.randint(0, 4)\n # 4 黄色\n # 3 绿色\n # 2 绿色\n # 1 蓝色\n # 0 黑色\n # print(random_back_color)\n raw_image = create_an_image('./background/', random_back_color)\n\n # 随机选取字体大小\n font_size = random_font_size()\n # print(font_size)\n # 随机选取字体\n font_name = random_font()\n font_color = random_word_color(random_back_color)\n\n # 随机选取文字贴合的坐�?x,y\n draw_x, draw_y = random_x_y_1(raw_image.size, font_size)\n # 将文本贴到背景图\n font = ImageFont.truetype(font_name, font_size)\n draw = ImageDraw.Draw(raw_image)\n draw.text((draw_x, draw_y), random_word, fill=font_color, font=font)\n\n # 随机选取作用函数和数量作用于图片\n\n if draw_x < 40 and font_size < 15 and random.randint(1, 10) < 10:\n params = [1 - float(random.randint(1, 2)) / 51,\n 0,\n 0,\n 0,\n 1 - float(random.randint(1, 5)) / 50,\n float(random.randint(1, 4)) / 1200,\n 0.00005,\n float(random.randint(1, 4)) / 1150\n ]\n raw_image = raw_image.transform((140, 32), Image.PERSPECTIVE, params)\n\n if draw_x < 40 and font_size < 20 and random.randint(1, 10) < 7:\n params = [1 - float(random.randint(1, 2)) / 51,\n 0,\n 0,\n 0,\n 1 - float(random.randint(1, 4)) / 50,\n float(random.randint(1, 4)) / 1150,\n 0.00005,\n float(random.randint(1, 4)) / 1150\n ]\n raw_image = raw_image.transform((140, 32), Image.PERSPECTIVE, params)\n\n # raw_image = raw_image.rotate(0.3)\n # 保存文本信息和对应图片名 #with open(save_path[:-1]+'.txt', 'a+', encoding='utf-8') as file:\n img = cv2.cvtColor(np.asarray(raw_image), cv2.COLOR_RGB2BGR)\n if random.random() < 0.4:\n rotate_angle = random.randint(-5, 5)\n img = rotate_img(img, rotate_angle)\n if random.random() < 0.4:\n img = crop_img(img)\n if random.random() < 0.4:\n img = sp_noise(img, 0.01)\n image = Image.fromarray(cv2.cvtColor(img, cv2.COLOR_BGR2RGB))\n if random.random() < 0.4:\n random_p = random.random() + 0.5\n image = image.point(lambda p: p * random_p)\n if random.random() < 0.7:\n random_Gauss = random.random() + 0.5\n image = image.filter(ImageFilter.GaussianBlur(radius=random_Gauss))\n if random.randint(1, 10) < 5:\n image = darken_func(image)\n # file.write('train_set/' + str(num) + '.png ' + random_word + '\\n')\n image.save(save_path + '/img/img_{}.jpg'.format(num))\n savename = save_path + '/txt/img_{}.txt'.format(num)\n savef = open(savename, 'w', encoding=\"utf-8\")\n savef.write(random_word + \"\\n\" + str(random_back_color))\n savef.close()\n\n\nif __name__ == '__main__':\n\n # 图片标签\n total = 100000\n prev_time = time.time()\n for num in range(0, total):\n # print(num)\n main('./train_set', num)\n time_left = datetime.timedelta(seconds=(total - num) * (time.time() - prev_time) / (num + 1))\n if num % 1000 == 0:\n print('[%d/%d], [time_left: %s]' % (num, total, time_left))\n","sub_path":"generator.py","file_name":"generator.py","file_ext":"py","file_size_in_byte":8671,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"353216611","text":"from tkinter import *\nimport random\nfrom tkinter import messagebox\n\n# pady or padxm is used for spacing between 2 widgets\n# ipadx, ipady for internal padding(like in entry widget)\n\nwords = [\"nptoyh\", 'avja', 'twisf', 'diani', 'cdanaa', 'nehimac', 'phees', 'hopne',\n 'yubr', 'anir', 'nhica', 'rhaic', 'dlhei', 'mircaea', 'eeslp', 'belitutgh',\n 'buce', 'emony', 'pytma', 'koobeton', 'tolapp', 'ssraui',\n ]\n\nanswers = ['python', 'java', 'swift', 'india', 'canada', 'machine', 'sheep', 'phone',\n 'ruby', 'iran', 'china', 'chair', 'delhi', 'america', 'sleep', 'tubelight',\n 'cube', 'money', 'paytm', 'notebook', 'laptop', 'russia',\n ]\n\nnum = random.randrange(0, 22, 1)\nscore = 0\n# wrong = 0\n\ndef default():\n global words, answers, num\n lbl.config(text=words[num])\n\n\ndef check():\n global words, score, wrong, answers, num\n var = entry.get() # get the word from entry widget\n\n if var == answers[num]:\n score = score + 1\n scorelbl.config(text=\"Current Score : \" + str(score))\n messagebox.showinfo(\"Success\", \"Hurrrey ! Right answer\")\n reset()\n\n elif len(var) == 0:\n messagebox.showwarning(\"NULL\", \"Please enter something\")\n\n else:\n messagebox.showerror(\"Error\", \"Oooops ! Wrong answer\")\n entry.delete(0, END) # automatically delete the wrong word so that we can re enter\n\n\ndef reset():\n global words, answers, num\n num = random.randrange(0, 22, 1)\n lbl.config(text=words[num])\n entry.delete(0, END)\n\n\nroot = Tk()\n\nroot.title(\"Game\")\nroot.geometry(\"400x400+250+150\")\nroot.configure(background=\"black\")\nroot.resizable(0,0)\n\nscorelbl = Label(root, text=\"Current Score : 0\", font=(\"comic sans ms\", 17), fg=\"yellow\", bg=\"black\")\nscorelbl.pack()\n\nlbl = Label(root, text=\"Your here\", font=(\"verdana\", 18), bg=\"black\", fg=\"white\")\nlbl.pack(pady=20)\n\nentry = Entry(root, font=(\"verdana\", 16), bd=6, bg=\"powder blue\", justify=CENTER)\nentry.pack(ipady=5, ipadx=5)\n\nbtncheck = Button(root, text=\"Check\", font=(\"comic sans ms\", 16), width=14, bg=\"grey\", relief=GROOVE, command=check)\nbtncheck.pack(pady=35)\n\nbtnreset = Button(root, text=\"Reset\", font=(\"comic sans ms\", 16), width=14, bg=\"grey\", relief=GROOVE, command=reset)\nbtnreset.pack()\n\ndefault()\n\nroot.mainloop()\n","sub_path":"Project1(JumbledWords).py","file_name":"Project1(JumbledWords).py","file_ext":"py","file_size_in_byte":2312,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"536302943","text":"#!/usr/bin/env python3\n# coding: utf-8\n\n# This file contains functions used for multi-layered encryption\nfrom os import urandom\nfrom base64 import b64decode, b64encode\nfrom Crypto.PublicKey import RSA\nfrom Crypto.Cipher import AES\nfrom cryptography.hazmat.primitives import hashes\nfrom cryptography.hazmat.backends import default_backend\n\nbackend = default_backend()\nBLOCK_SIZE: int = 16\nPADDING: str = '=' # war } vorher\n\ndef pad(msg: str) -> str:\n \"\"\"Pad message in order to have 16 bytes blocks\n\n :return: The padded message\n :rtype: bytes\n \"\"\"\n assert isinstance(msg,str), \"The variable msg must be a string\"\n # (BLOCK_SIZE - len(msg) % BLOCK_SIZE) * PADDING\n msg = msg + (BLOCK_SIZE - len(msg) % BLOCK_SIZE) * PADDING \n #print(len(msg))\n #print(msg)\n #print(type(msg))\n return msg\n \n\ndef gen_aes_key() -> bytes:\n \"\"\"Create new key usable by AES\n\n Generate a random secret key using urandom\n\n :return: The key encoded in base 64\n :rtype: bytes\n \"\"\"\n secret: bytes = urandom(BLOCK_SIZE)\n return b64encode(secret)\n\ndef gen_rsa_key():\n \"\"\"Create new keypair usable by RSA\n\n Returns a tuple with public key as the first value and private key as the second\n\n :return: The private and public keys in PEM\n :rtype: bytes, bytes\n \"\"\"\n\n new_key = RSA.generate(2048, e=65537)\n public_key: bytes = new_key.publickey().exportKey('PEM')\n private_key: bytes = new_key.exportKey('PEM')\n\n return public_key, private_key\n\ndef aes_encrypt(key: bytes, msg: str) -> str:\n \"\"\"Encrypt msg in AES with key\n\n :param key: The AES key encoded in base 64\n :param msg: The message to encrypt\n :type key: bytes\n :type msg: str\n :return: The encrypted message in base 64\n :rtype: bytes\n \"\"\"\n\n assert isinstance(msg,str), \"The variable msg must be a string\"\n assert isinstance(key,bytes), \"The variable key must be bytes\"\n\n padded_msg: str = pad(msg)\n keydigest = hashes.Hash(hashes.SHA256(),backend=backend)\n keydigest.update(key)\n cipher = AES.new(keydigest.finalize())\n encrypted: str = cipher.encrypt(padded_msg)\n encoded = b64encode(encrypted)\n print(encoded)\n return str(encoded, 'utf-8')\n\ndef aes_decrypt(key: bytes, msg: str) -> bytes:\n \"\"\"Decrypt msg using AES with key\n\n :param key: The AES key encoded in base 64\n :param msg: The message to decrypt encoded in base 64\n :type key: bytes\n :type msg: bytes\n :return: The cleartext\n :rtype: bytes\n \"\"\"\n\n assert isinstance(msg,str), \"The variable msg must be bytes\"\n assert isinstance(key,bytes), \"The variable key must be bytes\"\n keydigest = hashes.Hash(hashes.SHA256(),backend=backend)\n #print(msg)\n keydigest.update(key)\n uncipher = AES.new(keydigest.finalize())\n # Get the string representation\n #paddedmsg = pad(msg.decode('utf8','ignore'))\n #b64_msg = b64decode(paddedmsg)\n #print(b64_msg)\n decoded = b64decode(msg)\n decrypted: str = uncipher.decrypt(decoded)\n #print(decrypted)\n # Remove the padding put before\n #decrypted = decrypted.decode()\n padding = PADDING.encode()\n #decrypted = decrypted.rstrip(padding)\n return str(decrypted, 'utf-8')\n\ndef rsa_encrypt(pub_key: bytes, msg: str) -> str:\n \"\"\"Encrypts using RSA public key\n\n :param priv_key: The RSA private key\n :param msg: The encrypted message\n :type pub_key: bytes\n :type msg: str\n :return: The encrypted message\n :rtype: bytes\n \"\"\"\n\n #assert isinstance(msg,str), \"The variable msg must be a string\"\n assert isinstance(pub_key,bytes), \"The public key must be bytes\"\n #(msg.decode())\n pub_key_obj = RSA.importKey(pub_key)\n encrypted: str = pub_key_obj.encrypt(msg, \"\")[0]\n #print(encrypted)\n return encrypted\n\ndef rsa_decrypt(priv_key: bytes, msg: bytes) -> bytes:\n \"\"\"Decrypts using RSA private key\n\n :param priv_key: The RSA private key\n :param msg: The encrypted message\n :type priv_key: bytes\n :type msg: bytes\n :return: The cleartext\n :rtype: bytes\n \"\"\"\n\n assert isinstance(msg,bytes), \"The variable msg must be bytes\"\n assert isinstance(priv_key,bytes), \"The variable key must be bytes\"\n\n priv_key_obj = RSA.importKey(priv_key)\n decrypted: bytes = priv_key_obj.decrypt(msg)\n\n return decrypted\n\ndef aes_rsa_encrypt(aes_key: bytes, rsa_key: bytes, msg: str):\n \"\"\"Encrypts msg using both AES and RSA\n\n :param aes_key: The AES key\n :param rsa_key: The RSA public key\n :param msg: The message\n :type aes_key: bytes\n :type rsa_key: bytes\n :type msg: str\n :return: The encrypted AES key, the encrypted message\n :rtype: bytes, bytes\n \"\"\"\n\n assert isinstance(msg,str), \"The variable msg must be a string\"\n assert isinstance(rsa_key,bytes), \"The variable rsa_key must be bytes\"\n assert isinstance(aes_key,bytes), \"The variable aes_key must be bytes\"\n\n encrypted_msg: bytes = aes_encrypt(aes_key, msg)\n encrypted_key: bytes = rsa_encrypt(rsa_key, aes_key)\n return encrypted_key, encrypted_msg\n\ndef aes_rsa_decrypt(aes_key: bytes, rsa_key: bytes, msg: bytes) -> bytes:\n \"\"\"Decrypts using both AES and RSA\n\n :param aes_key: The encrypted AES key\n :param rsa_key: The RSA private key\n :param msg: The encrypted message\n :type aes_key: bytes\n :type rsa_key: bytes\n :type msg: bytes\n :return: The decrypted message\n :rtype: str\n \"\"\"\n\n assert isinstance(msg,bytes), \"The variable msg must be bytes\"\n assert isinstance(rsa_key,bytes), \"The variable rsa_key must be bytes\"\n assert isinstance(aes_key,bytes), \"The variable aes_key must be bytes\"\n\n decrypted_key: bytes = rsa_decrypt(rsa_key, aes_key)\n decrypted_msg = aes_decrypt(decrypted_key, msg)\n return decrypted_msg\n\ndef easy_encrypt(rsa_key: bytes, msg: bytes):\n \"\"\"Encrypts using both AES and RSA after generating the AES key itself\n\n :param rsa_key: The RSA public key\n :param msg: The message to encrypt\n :return: The encrypted AES key, the encrypted message\n :rtype: bytes, bytes\n \"\"\"\n\n assert isinstance(msg,bytes), \"The variable msg must be a string\"\n assert isinstance(rsa_key,bytes), \"The variable key must be bytes\"\n\n return aes_rsa_encrypt(gen_aes_key(), rsa_key, msg)\n","sub_path":"src/aes_rsa.py","file_name":"aes_rsa.py","file_ext":"py","file_size_in_byte":6245,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"390415629","text":"class Solution:\n def shortestPalindrome(self, s):\n T = s + \"#\" + s[::-1]\n p = [0] * len(T)\n for i in range(1, len(T)):\n j = p[i-1]\n while j > 0 and T[j] != T[i]:\n j = p[j-1]\n if T[i] == T[j]:\n p[i] = j + 1\n return s[p[-1]:][::-1] + s","sub_path":"python/leetcode/214.py","file_name":"214.py","file_ext":"py","file_size_in_byte":327,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"456892444","text":"#dont forget to change key, modulus and string length.\n\nimport string\nalphabet = list(' !\"#$%&\\'()*+,-./0123456789:;<=>?@ABCDEFGHIJKLMNOPQRSTUVWXYZ[\\\\]^_`abcdefghijklmnopqrstuvwxyz{|}~')\ndef encryptOrdecrypt():\n print('________________________')\n print('|Please select a choice:|')\n print('|1.decrypt--------------|')\n print('|2.encrypt--------------|')\n print('|3.bruteforce-----------|')\n print('|_______________________|')\n mode = int(input())\n\n return mode\ndef MessageToEncryptOrDecrypt():\n print('Please enter your message:')\n message = str.lower(input())\n return message\ndef bruteforce():\n num = 0\n if mode == 3:\n for shift in range(0,95):\n decryptedmessage = ''\n ls = list(message)\n newmessage = []\n\n for letter in ls:\n\n\n\n index = alphabet.index(letter)\n try:\n newIndex = index - shift\n except IndexError:\n newIndex = 95 - index - shift\n newmessage.append(alphabet[newIndex])\n num + 1\n print(\"your decrypted message is:\" + \"\".join(newmessage) + ' with a shift of:' + str(shift))\n\n if mode != 3:\n pass\ndef GetShift():\n if mode < 3 and mode > 0:\n shift = 0\n while True:\n print(\"enter your shift:\")\n shift = int(input())\n if shift >= 0 and shift <= 25:\n return shift\n if mode == '2' or mode == '1':\n EncryptedOrDecryptedMessage()\n\n if mode == 3:\n pass\n\n# just needed to add modulus to handle when the number went out of the range\n\ndef EncryptedOrDecryptedMessage(message, mode, shift):\n if mode == 2:\n Encryptedmessage = ''\n ls = list(message)\n newmessage = []\n for letter in ls:\n\n index = alphabet.index(letter)\n newIndex = index + shift\n newmessage.append(alphabet[newIndex%95])\n #return new encryoted message\n print(\"your encrypted message is:\" + \"\".join(newmessage))\n if mode == 1:\n decryptedmessage = ''\n ls = list(message)\n newmessage = []\n for letter in ls:\n\n\n index = alphabet.index(letter)\n newIndex = index - shift\n newmessage.append(alphabet[newIndex%95])\n #return new decrypted message\n print(\"your decrypted message is:\" + \"\".join(newmessage))\n\nmode = encryptOrdecrypt()\n\nmessage = MessageToEncryptOrDecrypt()\n\n\nshift = GetShift()\nbruteforce()\nEncryptedOrDecryptedMessage(message, mode, shift)\n","sub_path":"ceaserchipercustomlist.py","file_name":"ceaserchipercustomlist.py","file_ext":"py","file_size_in_byte":2617,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"546722021","text":"# https://www.reddit.com/r/explainlikeimfive/comments/2shh95/eli5markov_chain_monte_carlo_i_honestly_want_to/\nfrom random import randint\nimport numpy\nimport matplotlib\nimport matplotlib.pyplot as plt\nmatrix = []\n\n\ndef randomTry(aDice, dDice, battleCount, aWin, dWin):\n aList = []\n dList = []\n aDub = aWin\n dDub = dWin\n while aDice > 0:\n aList.append(randint(1, 6))\n aDice -= 1\n\n while dDice > 0:\n dList.append(randint(1, 6))\n dDice -= 1\n aList = sorted(aList)[aDice - battleCount:]\n dList = sorted(dList)[dDice - battleCount:]\n\n while battleCount > 0:\n if aList[battleCount - 1] < dList[battleCount - 1]:\n return aDub, 1 + dDub\n break\n battleCount -= 1\n return 1 + aDub, dDub\n\n\n\ndef getWinRate(aDice, dDice, battleCount, aWin, dWin, tryCount):\n while tryCount > 0:\n aWin, dWin = randomTry(aDice, dDice, battleCount, aWin, dWin)\n tryCount -= 1\n print(aWin)\n print(dWin)\n return (aWin / (aWin + dWin))\n'''\na = int(input())\nb = int(input())\naWin = 0\ndWin = 0\nprint(getWinRate(a,b,min(a,b), aWin, dWin, 500000))\n'''\nfor a in range(1, 11):\n list = []\n for b in range(1, 11):\n aWin = 0\n dWin = 0\n list.append(getWinRate(a, b, min(a,b), aWin, dWin, 5000000))\n print(list)\n matrix.append(list)\nprint(matrix)","sub_path":"riskMatrixForTerritorytake.py","file_name":"riskMatrixForTerritorytake.py","file_ext":"py","file_size_in_byte":1360,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"509187943","text":"\n\n#create file\ndef def_text_file(name, text):\n file_path = 'G://file/python/test/'\n full_path = file_path + name + '.txt'\n file = open(full_path, 'w')\n file.write(text)\n file.close()\n print('done! The text has been filtered')\n\n#test def_text_file()\n#def_text_file('test', 'hello world')\n\n\n#create filter\nfilter_word = 'cao'\nchang_word = '***'\ndef def_text_filter(word, censored_word = filter_word, changed_word = chang_word):\n return word.replace(censored_word,changed_word)\n\n#test def_text_filter()\n#print(def_text_filter('wo cao cao coa co aoc ao coa co aoc aoc oa'))\n\n\n\n#save file\ndef def_save_file(name, text):\n clean_text = def_text_filter(text)\n def_text_file(name,clean_text)\n\n#input text\nname_file = input('输入你的你昵称:')\ntext_file = input('输入你的评论:')\n#print(name_file)\n#print(def_text_filter(text_file))\ndef_save_file(name_file, text_file)","sub_path":"PycharmProjects/test/def_text_censored.py","file_name":"def_text_censored.py","file_ext":"py","file_size_in_byte":900,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"204872839","text":"#\n# Django field type for a Bitcoin Address\n#\n\nfrom django.forms.util import ValidationError\nfrom django import forms\nfrom . import utils\nimport re\n\n\nclass BCAddressField(forms.CharField):\n default_error_messages = {\n 'invalid': 'Invalid Bitcoin address.',\n }\n\n def __init__(self, *args, **kwargs):\n super(BCAddressField, self).__init__(*args, **kwargs)\n\n def clean(self, value):\n if not value and not self.required:\n return None\n\n if not value.startswith(u\"1\") and not value.startswith(u\"3\"):\n raise ValidationError(self.error_messages['invalid'])\n value = value.strip()\n\n if \"\\n\" in value:\n raise ValidationError(u\"Multiple lines in the bitcoin address\")\n\n if \" \" in value:\n raise ValidationError(u\"Spaces in the bitcoin address\")\n\n if re.match(r\"[a-zA-Z1-9]{27,35}$\", value) is None:\n raise ValidationError(self.error_messages['invalid'])\n version = utils.get_bcaddress_version(value)\n if version is None:\n raise ValidationError(self.error_messages['invalid'])\n return value\n\n","sub_path":"django_bitcoin/fields/BCAddressField.py","file_name":"BCAddressField.py","file_ext":"py","file_size_in_byte":1134,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"220147854","text":"\"\"\"Simple bank.\"\"\"\n\n\nclass Account:\n \"\"\"Represent a bank account.\"\"\"\n\n def __init__(self, name, balance):\n \"\"\"\n Class constructor. Each account has owner's name and starting balance.\n\n :param name: account owner name. String.\n :param balance: starting balance of account. Integer.\n \"\"\"\n self.name = name\n self.balance = balance\n\n def withdraw(self, amount):\n \"\"\"\n Withdraw money from account.\n\n :param amount: amount to withdraw from account, has to be positive\n and the balance can't go below 0.\n \"\"\"\n if amount > 0:\n self.balance -= amount\n if amount > self.balance:\n self.balance = 0\n\n def deposit(self, amount):\n \"\"\"\n Deposit money to account.\n\n :param amount: amount to deposit to account, has to be positive\n \"\"\"\n if amount > 0:\n self.balance += amount\n\n\n def get_balance(self):\n \"\"\"\n Get account balance.\n\n :return: balance in double form\n \"\"\"\n return self.balance\n\n def get_name(self):\n \"\"\"\n Get account owner name.\n\n :return: owner name in string form\n \"\"\"\n return self.name\n\n\npaul_account = Account(\"Paul\", 100.00)\njakob_account = Account(\"Jakob\", 500.00)\n\n\nprint(\"Initial balance\")\nprint(paul_account.get_balance()) # 100.0\nprint(jakob_account.get_balance()) # 500.0\nassert paul_account.get_balance() == 100\nassert jakob_account.get_balance() == 500\n\njakob_account.withdraw(250.00)\nassert jakob_account.get_balance() == 250\nprint(\"Jakob's balance is now \", jakob_account.get_balance()) # Jakob's balance is now 250.0\npaul_account.deposit(250.00)\nassert paul_account.get_balance() == 350\nprint(\"Paul's balance is now\", paul_account.get_balance()) # Paul's balance is now 350.0\n\nprint(\"Final state\")\nprint(paul_account.get_balance()) # 350.0\nprint(jakob_account.get_balance()) # 250.0\n","sub_path":"EX10A/bank.py","file_name":"bank.py","file_ext":"py","file_size_in_byte":1955,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"138479086","text":"# -*- coding: utf-8 -*-\n# pragma pylint: disable=unused-argument, no-self-use\n\"\"\"Function implementation\"\"\"\n\nimport logging\nfrom resilient_lib import validate_fields, ResultPayload\nfrom resilient_circuits import ResilientComponent, function, handler, StatusMessage, FunctionResult, FunctionError\n\nPACKAGE_NAME = \"pt_integration_c\"\n\n\nclass FunctionComponent(ResilientComponent):\n \"\"\"Component that implements Resilient function 'pt_integration_c_process_added_artifact\"\"\"\n\n def __init__(self, opts):\n \"\"\"constructor provides access to the configuration options\"\"\"\n super(FunctionComponent, self).__init__(opts)\n self.options = opts.get(PACKAGE_NAME, {})\n\n @handler(\"reload\")\n def _reload(self, event, opts):\n \"\"\"Configuration options have changed, save new values\"\"\"\n self.options = opts.get(PACKAGE_NAME, {})\n\n @function(\"pt_integration_c_process_added_artifact\")\n def _pt_integration_c_process_added_artifact_function(self, event, *args, **kwargs):\n \"\"\"Function: Processes the Artifact added. Just returns a success = True\"\"\"\n try:\n log = logging.getLogger(__name__)\n\n # Instansiate ResultPayload\n rp = ResultPayload(PACKAGE_NAME, **kwargs)\n\n mandatory_fields = [\n \"pt_int_artifact_id\",\n \"pt_int_artifact_description\",\n \"pt_int_artifact_value\"\n ]\n\n # Get the function inputs:\n fn_inputs = validate_fields(mandatory_fields, kwargs)\n\n log.info(\"Processing Artifact: %s\", fn_inputs.get(\"pt_int_artifact_id\"))\n\n results_content = {\n \"artifact_description\": fn_inputs.get(\"pt_int_artifact_description\")\n }\n\n results = rp.done(True, results_content)\n\n log.info(\"Returning results to post-process script\")\n\n # Produce a FunctionResult with the results\n yield FunctionResult(results)\n except Exception:\n yield FunctionError()","sub_path":"resilient-circuits-performance-testing/pt_integration_c/pt_integration_c/components/funct_pt_integration_c_process_added_artifact.py","file_name":"funct_pt_integration_c_process_added_artifact.py","file_ext":"py","file_size_in_byte":2017,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"186859548","text":"#!/usr/bin/env python\n\"\"\"\nScript to get the mapping of SAML source name to GH login name\nUsed in part for ID, also auditing who's clicked the auth button.\n\"\"\"\n\nimport argparse\nfrom getpass import getpass\nimport sys\nimport requests\nfrom github3 import login\n\ndef parse_arguments():\n \"\"\"\n Look at the first arg and handoff to the arg parser for that specific\n \"\"\"\n parser = argparse.ArgumentParser(description=\"Get SAML account mappings out of a GitHub org\")\n parser.add_argument('org', type=str, help=\"The org to work on\",\n action='store')\n parser.add_argument('--url', type=str, help='the graphql URL',\n action='store', default=\"https://api.github.com/graphql\")\n parser.add_argument('--token', help='github token with perms to examine your org',\n action='store')\n parser.add_argument('-f', type=str, help='File to store CSV to',\n action='store', default=None, dest='output')\n args = parser.parse_args()\n if args.token is None:\n args.token = getpass('Please enter your GitHub token: ')\n return args\n\ndef make_query(org, cursor=None):\n \"\"\"\n Make the org query for SAML ID's --- handling pagination\n org --- the organization to query\n cursor --- any previous query run to handle - default to null, assuming first run\n return - the query with org and cursor embedded\n \"\"\"\n query = f'''\n{{\norganization(login: \\\"{org}\\\") {{\nsamlIdentityProvider {{\n ssoUrl,\n externalIdentities(first: 100, after: AFTER) {{\n edges {{\n node {{\n guid,\n samlIdentity {{\n nameId\n }}\n user {{\n login\n }}\n }}\n }}\n pageInfo {{\n hasNextPage\n endCursor\n }}\n }}\n}}\n}}\n}}'''.replace(\"AFTER\", f'\"{cursor}\"' if cursor else \"null\")\n return query\n\ndef run_query(org, headers, url):\n \"\"\"\n Run a query through github's graphql API\n And handling pagination... Note, the query has to have\n a stanza like this to work:\n pageInfo {{\n hasNextPage\n endCursor\n }}\n\n org -- the org to query\n headers -- string - any headers needed for auth.\n url -- graphql engpoint\n return - either the JSON return, or an exception.\n \"\"\"\n\n cursor = None\n has_next_page = True\n results = {}\n while has_next_page:\n query = make_query(org, cursor)\n # print(f'Query: {query}\\n\\n\\n')\n request = requests.post(url=url, json={'query': query}, headers=headers)\n jsonified = request.json()\n # print(f'Result of this loop - {request.json()}')\n if request.status_code == 200:\n results.update(jsonified)\n else:\n raise Exception(f'Query failed to run by returning code of'\n f' {request.status_code}. {query}')\n has_next_page = jsonified['data']['organization']\\\n ['samlIdentityProvider']['externalIdentities']['pageInfo']['hasNextPage']\n cursor = jsonified['data']['organization']\\\n ['samlIdentityProvider']['externalIdentities']['pageInfo']['endCursor']\n\n return results\n\ndef main():\n \"\"\"\n Query github org and return the mapping of the SAML to GH login\n \"\"\"\n args = parse_arguments()\n if args.token is None:\n args.token = getpass(\"Enter your PAT: \")\n\n headers = {\"content-type\": \"application/json\", \"Authorization\": \"Bearer \" + args.token}\n\n\n\n saml_dict = run_query(args.org, headers, args.url)\n #Get rid of the overarching structures we don't care about in the results\n saml_dict = saml_dict['data']['organization']\\\n ['samlIdentityProvider']['externalIdentities']['edges']\n\n # Have the SAML mapping - now let's get the whole list of users for the org\n user_mapping = {}\n gh_sess = login(token=args.token)\n org = gh_sess.organization(args.org)\n memberlist = org.members()\n for user in memberlist:\n user_mapping[user.login] = 'None'\n\n # Now we have the users for the org, with None in the field for SAML name\n # Go through saml, and match up the login to SAML id --- anyone without a\n # SAML will keep \"None\" in the SAML field.\n for line in saml_dict:\n saml_name = line[\"node\"][\"samlIdentity\"][\"nameId\"]\n user_mapping[line[\"node\"][\"user\"][\"login\"]] = saml_name\n\n output = sys.stdout\n if args.output is not None:\n output = open(args.output, 'w')\n\n print('SAML,GH Login', file = output)\n\n for gh_name, ldap in user_mapping.items():\n print(f'{ldap},{gh_name}', file = output)\n\n if args.output is not None:\n output.close()\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"samlreport.py","file_name":"samlreport.py","file_ext":"py","file_size_in_byte":4794,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"589529157","text":"#! /usr/bin/env python\n\nfrom os import path\nimport sys\nimport subprocess\nimport argparse\nimport io\nimport platform\n\nVERSION = '1.4.0'\n\n\ndef is_valid_file(parser, arg):\n if not path.isfile(arg):\n parser.error(\"the file %s does not exist!\"%arg)\n else:\n return open(arg, 'r')\n\n\ndef llvm2bplParser():\n parser = argparse.ArgumentParser(add_help=False)\n parser.add_argument('-v', '--version', action='version', version='SMACK version ' + VERSION)\n parser.add_argument('infile', metavar='',\n type=lambda x: is_valid_file(parser,x),\n help='input LLVM file')\n parser.add_argument('-o', '--output', dest='outfile', metavar='', default='a.bpl',\n type=argparse.FileType('w'),\n help='output Boogie file (default: %(default)s)')\n parser.add_argument('-d', '--debug', dest='debug', action=\"store_true\", default=False,\n help='turn on debug info')\n parser.add_argument('--mem-mod', dest='memmod', choices=['flat', 'twodim'], default='flat',\n help='set the memory model (flat=flat memory model, twodim=two dimensional memory model)')\n parser.add_argument('--mem-impls', dest='memimpls', action=\"store_true\", default=False,\n help='use procedure implementations for memory allocation')\n return parser\n\n\ndef llvm2bpl(infile, debugFlag, memmod, memImpls):\n \n cmd = ['smack', '-source-loc-syms', '-mem-mod=' + memmod, infile.name]\n if debugFlag: cmd.append('-debug')\n if memImpls: cmd.append('-mem-mod-impls')\n p = subprocess.Popen(cmd)\n\n p.wait()\n if p.returncode != 0:\n print >> sys.stderr, \"SMACK encountered an error:\"\n print >> sys.stderr, output[0:1000], \"... (output truncated)\"\n sys.exit(\"SMACK returned exit status %s\" % p.returncode)\n\n with open('a.bpl', 'r') as outputFile:\n output = outputFile.read()\n\n bplStartIndex = output.find('// SMACK-PRELUDE-BEGIN')\n bpl = output[bplStartIndex:]\n return bpl\n \n\nif __name__ == '__main__':\n\n # parse command line arguments\n parser = argparse.ArgumentParser(description='Outputs a plain Boogie file generated from the input LLVM file.', parents=[llvm2bplParser()])\n args = parser.parse_args()\n\n bpl = llvm2bpl(args.infile, args.debug, args.memmod, args.memimpls)\n\n # write final output\n args.outfile.write(bpl)\n args.outfile.close()\n\n","sub_path":"bin/llvm2bpl.py","file_name":"llvm2bpl.py","file_ext":"py","file_size_in_byte":2368,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"168151462","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri Dec 21 21:29:57 2018\n\n@author: fukui\n\"\"\"\n\nimport urllib.request\nimport json\nfrom lib import message\n\ndef _readLicense():\n \"\"\"\n Lambda 関数起動して有効なライセンスかどうかチェックする\n \"\"\"\n ## 失敗したらライセンス認証失敗\n try:\n ## monohiro.license ファイルの読み込み\n f = open(\"monohiro.license\")\n s = json.load(f)\n user_id = s[\"userId\"]\n return(user_id)\n \n except:\n message.msgAuthErr(\"missfile\")\n raise Exception('Error!')\n\n \ndef monoAuth():\n user_id = _readLicense()\n url = \"https://13gwgepmkk.execute-api.ap-northeast-1.amazonaws.com/prod/monoauth\"\n \n params = {\n 'user_id': user_id\n #'user_id': \"AB12CD34\"\n }\n \n ## Get Request\n req = urllib.request.Request('{}?{}'.format(url, urllib.parse.urlencode(params)))\n with urllib.request.urlopen(req) as res:\n body = res.read()\n t = body.decode()\n result = str(json.loads(t)[\"statusCode\"])\n \n if result == \"200\":\n print(\"認証成功\")\n return True, user_id\n else:\n message.msgAuthErr(\"wrong_userid\")\n return False, user_id\n\n \nif __name__ == \"__main__\":\n try:\n res = monoAuth()\n print(res)\n except:\n print(\"miss\")\n ","sub_path":"lib/monoauth.py","file_name":"monoauth.py","file_ext":"py","file_size_in_byte":1355,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"244093309","text":"'''\r\nModel-based features selection\r\n用cv防止过拟合\r\n'''\r\nimport matplotlib.pyplot as plt\r\nimport numpy as np\r\nimport pandas as pd\r\nfrom sklearn.svm import LinearSVC\r\nimport preprocessing\r\ncolumns=preprocessing.columns\r\nnames=np.array(columns)\r\nx=preprocessing.x_normalized\r\ny=preprocessing.y\r\n#Embedded1 基于惩罚项(l1 or l2)的特征选择\r\n'''\r\nSelectFromModel(estimation,threshold=None)基于特征重要性,选择重要性高于threshold的特征\r\nSequentialFeatureSelection(estimator,n_features_to_select=None,direction='forward'/'backward',cv=5,scoring='')\r\n对于SVM和逻辑回归,参数C控制稀疏性:C越小,被选中的特征越少\r\n'''\r\n'''\r\n如果如果特征数远远大于样本数的情况下,使用线性核就可以了.\r\n如果特征数和样本数都很大,例如文档分类,一般使用线性核, LIBLINEAR比LIBSVM速度要快很多.\r\n如果特征数远小于样本数,这种情况一般使用RBF.但是如果一定要用线性核,则选择LIBLINEAR较好,而且使用-s 2选项\r\n'''\r\n\r\n#使用LinearSCV penalty='l2'做base model 用SelectFromModel做feature select\r\n\r\nfrom sklearn.feature_selection import SelectFromModel\r\nmodel1=LinearSVC(C=0.01,penalty='l2',dual=False,random_state=0).fit(x,y)#越小惩罚力度越大\r\nsfm=SelectFromModel(model1).fit(x,y)\r\nprint('Features selected by SelectFromModel selection l2:',names[sfm.get_support()])\r\n\r\n#使用LinearSCV penalty='l2'做base model 用SFS做feature select\r\n\r\nfrom sklearn.feature_selection import SequentialFeatureSelector\r\nmodel=LinearSVC(C=0.01,penalty='l2',dual=False,random_state=0).fit(x,y)#C越小惩罚力度越大\r\nnames=np.array(columns)\r\nsfs_forward=SequentialFeatureSelector(model,n_features_to_select=7,scoring='accuracy',cv=5).fit(x,y)\r\nsfs_backward=SequentialFeatureSelector(model,n_features_to_select=7,scoring='accuracy',direction='backward',cv=5).fit(x,y)\r\nprint('Features selected by forward SFS selection l2:',names[sfs_forward.get_support()])\r\nprint('Features selected by backward SFS selection l2:',names[sfs_backward.get_support()])\r\n\r\n'''\r\nL1惩罚项降维的原理在于保留多个对目标值具有同等相关性的特征中的一个,所以没选到的特征不代表不重要。\r\n故,可结合L2惩罚项来优化。具体操作为:若一个特征在L1中的权值为1,选择在L2中权值差别不大且在L1中权值为0的特征构成同类集合,\r\n将这一集合中的特征平分L1中的权值,故需要构建一个新的模型:\r\n'''\r\nclass LS(LinearSVC):\r\n\tdef __init__(self, threshold=0.01, dual=False, tol=1e-4, C=0.01,\r\n fit_intercept=True, intercept_scaling=1, class_weight=None,\r\n random_state=0, max_iter=100,\r\n multi_class='ovr', verbose=0):\r\n\t\tself.threshold=threshold\r\n\t\tLinearSVC.__init__(self, penalty='l1', dual=dual, tol=tol, C=C,\r\n fit_intercept=fit_intercept, intercept_scaling=intercept_scaling, class_weight=class_weight,\r\n random_state=random_state, max_iter=max_iter,\r\n multi_class=multi_class, verbose=verbose)\r\n\t\t# 使用同样的参数创建L2逻辑回归\r\n\t\tself.l2 = LinearSVC(penalty='l2', dual=dual, tol=tol, C=C, fit_intercept=fit_intercept,\r\n\t\t intercept_scaling=intercept_scaling, class_weight=class_weight,\r\n\t\t random_state=random_state, max_iter=max_iter,\r\n\t\t multi_class=multi_class, verbose=verbose)\r\n\r\n\tdef fit(self, X, y, sample_weight=None):\r\n\t\t# 训练L1逻辑回归\r\n\t\tsuper(LS, self).fit(X, y, sample_weight=sample_weight)\r\n\t\tself.coef_old_ = self.coef_.copy()\r\n\t\t# 训练L2逻辑回归\r\n\t\tself.l2.fit(X, y, sample_weight=sample_weight)\r\n\r\n\t\tcntOfRow, cntOfCol = self.coef_.shape\r\n\t\t# 权值系数矩阵的行数对应目标值的种类数目\r\n\t\tfor i in range(cntOfRow):\r\n\t\t\tfor j in range(cntOfCol):\r\n\t\t\t\tcoef = self.coef_[i][j]\r\n\t\t\t\t# L1逻辑回归的权值系数不为0\r\n\t\t\t\tif coef != 0:\r\n\t\t\t\t\tidx = [j]\r\n\t\t\t\t\t# 对应在L2逻辑回归中的权值系数\r\n\t\t\t\t\tcoef1 = self.l2.coef_[i][j]\r\n\t\t\t\t\tfor k in range(cntOfCol):\r\n\t\t\t\t\t\tcoef2 = self.l2.coef_[i][k]\r\n\t\t\t\t\t\t# 在L2逻辑回归中,权值系数之差小于设定的阈值,且在L1中对应的权值为0\r\n\t\t\t\t\t\tif abs(coef1 - coef2) < self.threshold and j != k and self.coef_[i][k] == 0:\r\n\t\t\t\t\t\t\tidx.append(k)\r\n\t\t\t\t\t# 计算这一类特征的权值系数均值\r\n\t\t\t\t\tmean = coef / len(idx)\r\n\t\t\t\t\tself.coef_[i][idx] = mean\r\n\t\treturn self\r\n#带L1和L2惩罚项的逻辑回归作为基模型的特征选择\r\n#参数threshold为权值系数之差的阈值\r\nmodel2=SelectFromModel(LS(threshold=0.5, C=0.01)).fit(x,y)\r\nprint('Features selected by SelectFromModel selection linearSVC_l1+l2:',names[model2.get_support()])\r\n\r\n#Embedded2 基于树模型的特征选择\r\n'''\r\n利用SelectFromModel类结合ETC/RF模型\r\nRF是在一个随机子集内得到最佳分叉属性,而ET是完全随机的得到分叉值,从而实现对决策树进行分叉的(分裂随机)\r\n'''\r\nfrom sklearn.ensemble import ExtraTreesClassifier\r\netc=ExtraTreesClassifier(random_state=0)\r\netc.fit(x,y)\r\nimportance=etc.feature_importances_\r\nmodel4=SelectFromModel(etc).fit(x,y)\r\nprint('Features selected by SelectFromModel ETC:',names[model4.get_support()])\r\netc_forward=SequentialFeatureSelector(etc,n_features_to_select=7,scoring='accuracy',cv=5).fit(x,y)\r\netc_backward=SequentialFeatureSelector(etc,n_features_to_select=7,scoring='accuracy',direction='backward',cv=5).fit(x,y)\r\nprint('Features selected by forward SFS selection ETC:',names[etc_forward.get_support()])\r\nprint('Features selected by backward SFS selection ETC:',names[etc_backward.get_support()])\r\n\r\n\r\n#Random Forest + SFS\r\nfrom sklearn.ensemble import RandomForestClassifier\r\nrf=RandomForestClassifier(random_state=0)\r\nrf_forward=SequentialFeatureSelector(rf,n_features_to_select=7,scoring='accuracy',cv=5).fit(x,y)\r\nrf_backward=SequentialFeatureSelector(rf,n_features_to_select=7,scoring='accuracy',direction='backward',cv=5).fit(x,y)\r\nprint('Features selected by forward SFS selection rf:',names[rf_forward.get_support()])\r\nprint('Features selected by backward SFS selection rf:',names[rf_backward.get_support()])\r\nrf.fit(x,y)\r\n#importance_rf=rf.feature_importances_\r\n#Random Forest + SFM\r\nrf_model=SelectFromModel(rf).fit(x,y)\r\nprint('Features selected by SelectFromModel RF:',names[rf_model.get_support()])\r\n\r\n#LogisticRegression\r\n#L2-based feature selection\r\nfrom sklearn.linear_model import LogisticRegression\r\nlr1=LogisticRegression(C=0.1,penalty='l2',dual=False,random_state=0).fit(x,y)\r\nlr=SelectFromModel(lr1).fit(x,y)\r\nprint('Features selected by SelectFromModel selection lr-l2:',names[lr.get_support()])\r\nlr_forward=SequentialFeatureSelector(lr1,n_features_to_select=7,scoring='accuracy',cv=5).fit(x,y)\r\nlr_backward=SequentialFeatureSelector(lr1,n_features_to_select=7,scoring='accuracy',direction='backward',cv=5).fit(x,y)\r\nprint('Features selected by forward SFS selection lr-l2:',names[lr_forward.get_support()])\r\nprint('Features selected by backward SFS selection lr-l2:',names[lr_backward.get_support()])\r\n\r\n#l1+l2 for logistic\r\nclass LR(LogisticRegression):\r\n def __init__(self, threshold=0.01, dual=False, tol=1e-4, C=1.0,\r\n fit_intercept=True, intercept_scaling=1, class_weight=None,\r\n random_state=None, solver='liblinear', max_iter=100,\r\n multi_class='ovr', verbose=0, warm_start=False, n_jobs=1):\r\n\r\n #权值相近的阈值\r\n self.threshold = threshold\r\n LogisticRegression.__init__(self, penalty='l1', dual=dual, tol=tol, C=C,\r\n fit_intercept=fit_intercept, intercept_scaling=intercept_scaling, class_weight=class_weight,\r\n random_state=random_state, solver=solver, max_iter=max_iter,\r\n multi_class=multi_class, verbose=verbose, warm_start=warm_start, n_jobs=n_jobs)\r\n #使用同样的参数创建L2逻辑回归\r\n self.l2 = LogisticRegression(penalty='l2', dual=dual, tol=tol, C=C, fit_intercept=fit_intercept, intercept_scaling=intercept_scaling, class_weight = class_weight, random_state=random_state, solver=solver, max_iter=max_iter, multi_class=multi_class, verbose=verbose, warm_start=warm_start, n_jobs=n_jobs)\r\n\r\n def fit(self, X, y, sample_weight=None):\r\n #训练L1逻辑回归\r\n super(LR, self).fit(X, y, sample_weight=sample_weight)\r\n self.coef_old_ = self.coef_.copy()\r\n #训练L2逻辑回归\r\n self.l2.fit(X, y, sample_weight=sample_weight)\r\n\r\n cntOfRow, cntOfCol = self.coef_.shape\r\n #权值系数矩阵的行数对应目标值的种类数目\r\n for i in range(cntOfRow):\r\n for j in range(cntOfCol):\r\n coef = self.coef_[i][j]\r\n #L1逻辑回归的权值系数不为0\r\n if coef != 0:\r\n idx = [j]\r\n #对应在L2逻辑回归中的权值系数\r\n coef1 = self.l2.coef_[i][j]\r\n for k in range(cntOfCol):\r\n coef2 = self.l2.coef_[i][k]\r\n #在L2逻辑回归中,权值系数之差小于设定的阈值,且在L1中对应的权值为0\r\n if abs(coef1-coef2) < self.threshold and j != k and self.coef_[i][k] == 0:\r\n idx.append(k)\r\n #计算这一类特征的权值系数均值\r\n mean = coef / len(idx)\r\n self.coef_[i][idx] = mean\r\n return self\r\nSelectFromModel(LR(threshold=0.5, C=0.1)).fit_transform(x,y)\r\nprint('Features selected by SelectFromModel selection LR_l1+l2:',names[model2.get_support()])","sub_path":"Embedded.py","file_name":"Embedded.py","file_ext":"py","file_size_in_byte":9687,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"366119889","text":"#!/usr/bin/env python3\n\n# See LICENSE file for copyright and license details.\n# TUM CS Bot - https://github.com/ro-i/tumcsbot\n\n\"\"\"Wrapper around Zulip's Client class.\n\nClasses:\n--------\nClient A wrapper around zulip.Client to be used by the plugins.\n See the class doc for the additional attributes and methods.\n\"\"\"\n\nimport logging\nimport re\nimport time\n\nfrom collections.abc import Iterable as IterableClass\nfrom typing import cast, Any, Callable, Dict, IO, Iterable, List, Pattern, Optional, Set, Union\nfrom zulip import Client as ZulipClient\n\nfrom tumcsbot.lib import stream_names_equal, DB, Response, MessageType\n\n\nclass Client(ZulipClient):\n \"\"\"Wrapper around zulip.Client.\n\n Additional attributes:\n id direct access to get_profile()['user_id']\n ping string used to ping the bot \"@****\"\n ping_len len(ping)\n\n Additional Methods:\n -------------------\n get_public_stream_names Get the names of all public streams.\n get_streams_from_regex Get the names of all public streams\n matching a regex.\n get_stream_name Get stream name for provided stream id.\n private_stream_exists Check if there is a private stream with\n the given name.\n send_response Send one single response.\n send_responses Send a list of responses.\n subscribe_all_from_stream_to_stream\n Try to subscribe all users from one public\n stream to another.\n subscribe_users Subscribe a list of user ids to a public\n stream.\n \"\"\"\n\n def __init__(self, *args: Any, **kwargs: Any) -> None:\n \"\"\"Override the constructor of the parent class.\"\"\"\n super().__init__(*args, **kwargs)\n self.id: int = self.get_profile()['user_id']\n self.ping: str = '@**{}**'.format(self.get_profile()['full_name'])\n self.ping_len: int = len(self.ping)\n self.register_params: Dict[str, Any] = {}\n self._db = DB()\n self._db.checkout_table(\n 'PublicStreams', '(StreamName text primary key, Subscribed integer not null)'\n )\n\n def call_endpoint(\n self,\n url: Optional[str] = None,\n method: str = \"POST\",\n request: Optional[Dict[str, Any]] = None,\n longpolling: bool = False,\n files: Optional[List[IO[Any]]] = None,\n timeout: Optional[float] = None\n ) -> Dict[str, Any]:\n \"\"\"Override zulip.Client.call_on_each_event.\n\n This is the backend for almost all API-user facing methods.\n Automatically resend requests if they failed because of the\n API rate limit.\n \"\"\"\n result: Dict[str, Any]\n\n while True:\n result = super().call_endpoint(url, method, request, longpolling, files, timeout)\n if not (result['result'] == 'error'\n and 'code' in result\n and result['code'] == 'RATE_LIMIT_HIT'):\n break\n secs: float = result['retry-after'] if 'retry-after' in result else 1\n logging.warning('hit API rate limit, waiting for %f seconds...', secs)\n time.sleep(secs)\n\n return result\n\n def call_on_each_event(\n self,\n callback: Callable[[Dict[str, Any]], None],\n event_types: Optional[List[str]] = None,\n narrow: Optional[List[List[str]]] = None,\n **kwargs: Any\n ) -> None:\n \"\"\"Override zulip.Client.call_on_each_event.\n\n Add additional parameters to pass to register().\n See https://zulip.com/api/register-queue for the parameters\n the register() method accepts.\n \"\"\"\n self.register_params = kwargs\n super().call_on_each_event(callback, event_types, narrow)\n\n def get_messages(self, message_filters: Dict[str, Any]) -> Dict[str, Any]:\n \"\"\"Override zulip.Client.get_messages.\n\n Defaults to 'apply_markdown' = False.\n \"\"\"\n message_filters['apply_markdown'] = False\n return super().get_messages(message_filters)\n\n def get_public_stream_names(self, use_db: bool = True) -> List[str]:\n \"\"\"Get the names of all public streams.\n\n Use the database in conjunction with the plugin \"autosubscriber\"\n to avoid unnecessary network requests.\n In case of an error, return an empty list.\n \"\"\"\n def without_db() -> List[str]:\n result: Dict[str, Any] = self.get_streams(\n include_public = True, include_subscribed = False\n )\n if result['result'] != 'success':\n return []\n return list(map(lambda d: cast(str, d['name']), result['streams']))\n\n if not use_db:\n return without_db()\n\n try:\n return list(map(\n lambda t: cast(str, t[0]),\n self._db.execute('select StreamName from PublicStreams')\n ))\n except Exception as e:\n logging.exception(e)\n return without_db()\n\n def get_streams_from_regex(self, regex: str) -> List[str]:\n \"\"\"Get the names of all public streams matching a regex.\n\n The regex has to match the full stream name.\n Note that Zulip handles stream names case insensitively at the\n moment.\n\n Return an empty list if the regex is not valid.\n \"\"\"\n if not regex:\n return []\n\n try:\n pat: Pattern[str] = re.compile(regex, flags = re.I)\n except re.error:\n return []\n\n return [\n stream_name for stream_name in self.get_public_stream_names()\n if pat.fullmatch(stream_name)\n ]\n\n def get_stream_name(self, stream_id: int) -> Optional[str]:\n \"\"\"Get stream name for provided stream id.\n\n Return the stream name as string or None if the stream name\n could not be determined.\n \"\"\"\n result: Dict[str, Any] = self.get_streams(include_all_active = True)\n if result['result'] != 'success':\n return None\n\n for stream in result['streams']:\n if stream['stream_id'] == stream_id:\n return cast(str, stream['name'])\n\n return None\n\n def get_user_ids_from_attribute(\n self,\n attribute: str,\n values: Iterable[Any],\n case_sensitive: bool = True\n ) -> Optional[List[int]]:\n \"\"\"Get the user ids from a given user attribute.\n\n Get and return a list of user ids of all users whose profiles\n contain the attribute \"attribute\" with a value present in\n \"values.\n If case_sensitive is set to False, the values will be\n interpreted as strings and compared case insensitively.\n Return None on error.\n \"\"\"\n result: Dict[str, Any] = self.get_users()\n if result['result'] != 'success':\n return None\n\n if not case_sensitive:\n values = map(lambda x: str(x).lower(), values)\n\n value_set: Set[Any] = set(values)\n\n return [\n user['user_id']\n for user in result['members']\n if attribute in user and (\n user[attribute] in value_set if case_sensitive\n else str(user[attribute]).lower() in value_set\n )\n ]\n\n def get_user_ids_from_display_names(\n self,\n display_names: Iterable[str]\n ) -> Optional[List[int]]:\n \"\"\"Get the user id from a user display name.\n\n Since there may be multiple users with the same display name,\n the returned list of user ids may be longer than the given list\n of user display names.\n Return None on error.\n \"\"\"\n return self.get_user_ids_from_attribute('full_name', display_names)\n\n def get_user_ids_from_emails(\n self,\n emails: Iterable[str]\n ) -> Optional[List[int]]:\n \"\"\"Get the user id from a user email address.\n\n Return None on error.\n \"\"\"\n return self.get_user_ids_from_attribute('delivery_email', emails, case_sensitive = False)\n\n def get_users(self, request: Optional[Dict[str, Any]] = None) -> Dict[str, Any]:\n \"\"\"Override method from parent class.\"\"\"\n # Try to minimize the network traffic.\n if request is not None:\n request.update(client_gravatar = True, include_custom_profile_fields = False)\n return super().get_users(request)\n\n def is_only_pm_recipient(self, message: Dict[str, Any]) -> bool:\n \"\"\"Check whether the bot is the only recipient of the given pm.\n\n Check whether the message is a private message and the bot is\n the only recipient.\n \"\"\"\n if not message['type'] == 'private' or message['sender_id'] == self.id:\n return False\n\n # Note that the list of users who received the pm includes the sender.\n\n recipients: List[Dict[str, Any]] = message['display_recipient']\n if len(recipients) != 2:\n return False\n\n return self.id in [recipients[0]['id'], recipients[1]['id']]\n\n def private_stream_exists(self, stream_name: str) -> bool:\n \"\"\"Check if there is a private stream with the given name.\n\n Return true if there is a private stream with the given name.\n Return false if there is no stream with this name or if the\n stream is not private.\n \"\"\"\n result: Dict[str, Any] = self.get_streams(include_all_active = True)\n if result['result'] != 'success':\n return False # TODO?\n\n for stream in result['streams']:\n if stream_names_equal(stream['name'], stream_name):\n return bool(stream['invite_only'])\n\n return False\n\n def register(\n self,\n event_types: Optional[Iterable[str]] = None,\n narrow: Optional[List[List[str]]] = None,\n **kwargs: Any\n ) -> Dict[str, Any]:\n \"\"\"Override zulip.Client.register.\n\n Override the parent method in order to enable additional\n parameters for the register() call internally used by\n call_on_each_event.\n \"\"\"\n logging.debug('event_types: %s, narrow: %s', str(event_types), str(narrow))\n return super().register(event_types, narrow, **self.register_params)\n\n def send_response(self, response: Response) -> Dict[str, Any]:\n \"\"\"Send one single response.\"\"\"\n logging.debug('send_response: %s', str(response))\n\n if response.message_type == MessageType.MESSAGE:\n return self.send_message(response.response)\n if response.message_type == MessageType.EMOJI:\n return self.add_reaction(response.response)\n return {}\n\n def send_responses(\n self,\n responses: Union[\n Response,\n Iterable[Union[Response, Iterable[Response]]],\n Union[Response, Iterable[Response]]\n ]\n ) -> None:\n \"\"\"Send the given responses.\"\"\"\n if responses is None:\n logging.debug('responses is None, this should never happen')\n return\n\n if not isinstance(responses, IterableClass):\n self.send_response(responses)\n return\n\n for response in responses:\n self.send_responses(response)\n\n\n def subscribe_all_from_stream_to_stream(\n self,\n from_stream: str,\n to_stream: str,\n description: Optional[str] = None\n ) -> bool:\n \"\"\"Try to subscribe all users from one public stream to another.\n\n Arguments:\n ----------\n from_stream An existant public stream.\n to_stream The stream to subscribe to.\n Must be public, if already existant. If it does\n not already exists, it will be created.\n description An optional description to be used to\n create the stream first.\n\n Return true on success or false otherwise.\n \"\"\"\n if (self.private_stream_exists(from_stream)\n or self.private_stream_exists(to_stream)):\n return False\n\n subs: Dict[str, Any] = self.get_subscribers(stream = from_stream)\n if subs['result'] != 'success':\n return False\n\n return self.subscribe_users(subs['subscribers'], to_stream, description)\n\n def subscribe_users(\n self,\n user_ids: List[int],\n stream_name: str,\n description: Optional[str] = None,\n allow_private_streams: bool = False\n ) -> bool:\n \"\"\"Subscribe a list of user ids to a public stream.\n\n Arguments:\n ----------\n user_ids The list of user ids to subscribe.\n stream_name The name of the stream to subscribe to.\n description An optional description to be used to\n create the stream first.\n\n Return true on success or false otherwise.\n \"\"\"\n chunk_size: int = 100\n success: bool = True\n\n if not allow_private_streams and self.private_stream_exists(stream_name):\n return False\n\n subscription: Dict[str, str] = {'name': stream_name}\n if description is not None:\n subscription.update(description = description)\n\n for i in range(0, len(user_ids), chunk_size):\n # (a too large index will be automatically reduced to len())\n user_id_chunk: List[int] = user_ids[i:i + chunk_size]\n\n while True:\n result: Dict[str, Any] = self.add_subscriptions(\n streams = [subscription],\n principals = user_id_chunk\n )\n if result['result'] == 'success':\n break\n if result['code'] == 'UNAUTHORIZED_PRINCIPAL' and 'principal' in result:\n user_id_chunk.remove(result['principal'])\n continue\n logging.warning(str(result))\n success = False\n break\n\n return success\n\n# def subscribe_user(\n# self,\n# user_id: int,\n# stream_name: str\n# ) -> bool:\n# \"\"\"Subscribe a user to a public stream.\n#\n# The subscription is only executed if the user is not yet\n# subscribed to the stream with the given name.\n# See docs: https://zulip.com/api/get-events#stream-add.\n# Do not subscribe to private streams.\n#\n# Return True if the user has already subscribed to the given\n# stream or if they now are subscribed and False otherwise.\n# \"\"\"\n# result: Dict[str, Any]\n#\n# if self.private_stream_exists(stream_name):\n# return False\n#\n# result = self.get_stream_id(stream_name)\n# if result['result'] != 'success':\n# return False\n# stream_id: int = result['stream_id']\n#\n# # Check whether the user has already subscribed to that stream.\n# result = self.call_endpoint(\n# url = '/users/{}/subscriptions/{}'.format(user_id, stream_id),\n# method = 'GET'\n# )\n# # If the request failed, we try to subscribe anyway.\n# if result['result'] == 'success' and result['is_subscribed']:\n# return True\n# elif result['result'] != 'success':\n# logging.warning('failed subscription status check, stream_id %s', stream_id)\n#\n# success: bool = self.subscribe_users([user_id], stream_name)\n# if not success:\n# logging.warning('cannot subscribe %s to stream: %s', user_id, str(result))\n#\n# return success\n\n def user_is_privileged(self, user_id: int) -> bool:\n \"\"\"Check whether a user is allowed to perform privileged commands.\n\n Some commands of this bot are only allowed to be performed by\n privileged users. Which user roles are considered to be privileged\n in the context of this bot:\n - prior to Zulip 4.0:\n Organization owner, Organization administrator\n - since Zulip 4.0:\n Organization owner, Organization administrator,\n Organization moderator\n\n Arguments:\n ----------\n user_id The user_id to examine.\n \"\"\"\n result: Dict[str, Any] = self.get_user_by_id(user_id)\n if result['result'] != 'success':\n return False\n user: Dict[str, Any] = result['user']\n\n if 'role' in user and isinstance(user['role'], int) and user['role'] in [100, 200]:\n return True\n if 'is_admin' in user and isinstance(user['is_admin'], bool):\n return user['is_admin']\n\n return False\n","sub_path":"src/tumcsbot/client.py","file_name":"client.py","file_ext":"py","file_size_in_byte":16632,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"204557729","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\n基本的用户角色\r\n\"\"\"\r\n\r\nDEPARTMENT_LEADER = 1 #: 车间主任\r\nTEAM_LEADER = 2 #: 班组长\r\nLOADER = 3 #: 装卸工\r\nQUALITY_INSPECTOR = 4 #: 质检员\r\nCARGO_CLERK = 5 #: 收发员\r\nSCHEDULER = 6 #: 调度员\r\nACCOUNTANT = 7 #: 财会人员\r\nADMINISTRATOR = 8 #: 管理员\r\n\r\nGROUP_NAME_LIST = {DEPARTMENT_LEADER: u\"车间主任\",\r\n TEAM_LEADER: u\"班组长\",\r\n LOADER: u\"装卸工\",\r\n QUALITY_INSPECTOR: u\"质检员\",\r\n CARGO_CLERK: u\"收发员\",\r\n SCHEDULER: u\"调度员\",\r\n ACCOUNTANT: u\"财会人员\",\r\n ADMINISTRATOR: u\"管理员\"}\r\n","sub_path":"lite_mms/constants/groups.py","file_name":"groups.py","file_ext":"py","file_size_in_byte":702,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"199114477","text":"import numpy as np\nimport scipy.io as sio\nimport feather\nimport pandas as pd\nimport timeit\nfrom sklearn.model_selection import StratifiedKFold\nimport pdb\n\ndef create_color_ref(ps_Tcat_ann):\n '''Used to generate `cluster_colors` from the FACS reference dataset. Unused function, for record keeping only.'''\n D = sio.loadmat('/Users/fruity/Dropbox/AllenInstitute/CellTypes/dat/raw/Mouse-V1-ALM-20180520_cpmtop10k_cpm.mat',squeeze_me=True)\n ctype_list = []\n col_list = []\n for ctype,col in list(set(zip(D['cluster'],D['cluster_color']))):\n ctype_list.append(ctype)\n col_list.append(col)\n D = pd.DataFrame({'celltype':ctype_list,'cluster_color':col_list})\n D = D.loc[D['celltype'].isin(np.unique(ps_Tcat_ann['topLeaf_label'].values))]\n D = D.reset_index(drop=True)\n D.to_csv('type_color_reference.csv',index=False)\n return\n\ndef reorder_ps_TE(ps_T_dat,ps_T_ann,ps_E_dat):\n '''Concatenates data with paired T and E cells first, and exclusive cells later\n \\nOutputs: `ps_Tcat_dat`, `ps_Tcat_ann`, `ps_Ecat_dat`, `ispairedT`, `ispairedE`'''\n #Transcriptomic exclusive:\n Tonly_bool = ~np.isin(ps_T_ann['spec_id_label'].values.astype(int),ps_E_dat['spec_id_label'].values)\n ps_Tonly_ann = ps_T_ann.iloc[Tonly_bool].copy()\n ps_Tonly_dat = ps_T_dat.iloc[Tonly_bool].copy()\n\n #Transcriptomic with match in Ephys:\n TinE_bool = np.isin(ps_T_ann['spec_id_label'].values.astype(int),ps_E_dat['spec_id_label'].values)\n ps_TinE_ann = ps_T_ann.iloc[TinE_bool].copy()\n ps_TinE_dat = ps_T_dat.iloc[TinE_bool].copy()\n\n print('-----------------------------------------------')\n print(\"{} exclusive, {} matched, total {} in T\".format(\n np.sum(Tonly_bool), np.sum(TinE_bool), ps_T_dat.shape[0]))\n\n #Ephys exclusive:\n Eonly_bool = ~np.isin(ps_E_dat['spec_id_label'].values,ps_T_ann['spec_id_label'].values.astype(int))\n ps_Eonly_dat = ps_E_dat.iloc[Eonly_bool].copy()\n \n #Ephys with match in Transcriptomic:\n EinT_bool = np.isin(ps_E_dat['spec_id_label'].values,ps_T_ann['spec_id_label'].values.astype(int))\n ps_EinT_dat = ps_E_dat.iloc[EinT_bool].copy()\n\n print('-----------------------------------------------')\n print(\"{} exclusive, {} matched, total {} in E\".format(\n np.sum(Eonly_bool), np.sum(EinT_bool), ps_E_dat.shape[0]))\n\n #Enforce order of matched data to be same for Transcriptomics and Ephys\n #T annotations and E features are matched through spec_id_label\n #T data and annotations are aligned through indexing\n ps_EinT_dat.sort_values('spec_id_label',inplace = True) # Contains ephys features\n ps_TinE_ann.sort_values('spec_id_label',inplace = True) # Contains transcriptome annotations\n ps_TinE_dat = ps_TinE_dat.reindex(ps_TinE_ann.index,copy=True) # Contains gene expression count data\n\n #Concatenate transcriptomic data\n ps_Tcat_dat = pd.concat([ps_TinE_dat, ps_Tonly_dat], ignore_index=True)\n ps_Tcat_ann = pd.concat([ps_TinE_ann, ps_Tonly_ann], ignore_index=True)\n ispairedT = np.concatenate((np.ones((ps_TinE_dat.shape[0],)),np.zeros((ps_Tonly_dat.shape[0],))),axis=0)\n\n #Concatenate Ephys data\n ps_Ecat_dat = pd.concat([ps_EinT_dat, ps_Eonly_dat], ignore_index=True)\n ispairedE = np.concatenate((np.ones((ps_EinT_dat.shape[0],)),np.zeros((ps_Eonly_dat.shape[0],))),axis=0)\n\n return ps_Tcat_dat, ps_Tcat_ann, ps_Ecat_dat, ispairedT, ispairedE\n\n\ndef extract_arrays(ps_Tcat_dat, ps_Tcat_ann, ispairedT, ps_Ecat_dat, ispairedE, keep_gene_id):\n '''Crops the data to only genes of interest.\n Output: `matdict` dictionary to save in .mat format\n '''\n ps_gene_id = np.array(ps_Tcat_dat.columns[1:],dtype=np.object)\n keep_gene_index = np.where(np.isin(ps_gene_id,keep_gene_id))[0]\n \n matdict={}\n #0th column is sample_id in the ps_Tcat_dat array\n matdict['T_dat'] = np.log1p(ps_Tcat_dat.values[:,1:].astype(np.float32))[:,keep_gene_index]\n matdict['T_spec_id_label'] = ps_Tcat_ann.loc[:,'spec_id_label'].values.astype(int)\n matdict['T_ispaired'] = ispairedT\n\n #Transcriptomic annotations:\n matdict['gene_id'] = ps_Tcat_dat.columns[1:].values[keep_gene_index]\n matdict['cluster'] = ps_Tcat_ann.loc[:,'cluster_label'].values\n matdict['clusterID'] = ps_Tcat_ann.loc[:,'cluster_id'].values.astype(int)\n matdict['cluster_color'] = ps_Tcat_ann.loc[:,'cluster_color'].values\n matdict['sample_id'] = ps_Tcat_ann.loc[:,'sample_id'].values\n matdict['map_conf'] = ps_Tcat_ann.loc[:,'map_conf'].values\n\n matdict['E_dat'] = ps_Ecat_dat.values[:,1:].astype(np.float32) \n matdict['E_spec_id_label'] = ps_Ecat_dat.loc[:,'spec_id_label'].values.astype(int)\n matdict['E_ispaired'] = ispairedE\n return matdict\n\n\ndef shuffle_dataset_mat(matdict):\n '''Data will be deterministically shuffled'''\n np.random.seed(seed=10)\n\n #Common shuffled indices for paired data\n shuffle_TE = np.where(matdict['T_ispaired']==1)[0]\n np.random.shuffle(shuffle_TE)\n\n #Shuffled indices for T exclusive data\n shuffle_Tonly = np.where(matdict['T_ispaired']==0)[0]\n np.random.shuffle(shuffle_Tonly)\n\n #Shuffled indices for E exclusive data\n shuffle_Eonly = np.where(matdict['E_ispaired']==0)[0]\n np.random.shuffle(shuffle_Eonly)\n\n #Shuffle arrays:\n shuffle_T = np.concatenate((shuffle_TE,shuffle_Tonly),axis = 0)\n shuffle_E = np.concatenate((shuffle_TE,shuffle_Eonly),axis = 0)\n shuffle_M = shuffle_E.copy()\n\n matdict['T_dat'] = matdict['T_dat'][shuffle_T,:]\n matdict['T_spec_id_label'] = matdict['T_spec_id_label'][shuffle_E]\n matdict['T_ispaired'] = matdict['T_ispaired'][shuffle_T]\n matdict['cluster'] = matdict['cluster'][shuffle_T] \n matdict['clusterID'] = matdict['clusterID'][shuffle_T]\n matdict['cluster_color'] = matdict['cluster_color'][shuffle_T]\n matdict['sample_id'] = matdict['sample_id'][shuffle_T] \n\n matdict['E_dat'] = matdict['E_dat'][shuffle_E,:] \n matdict['E_spec_id_label'] = matdict['E_spec_id_label'][shuffle_E]\n matdict['E_ispaired'] = matdict['E_ispaired'][shuffle_E] \n\n matdict['M_dat'] = matdict['M_dat'][shuffle_M,:]\n return matdict\n\ndef TEM_dataset(matdict):\n '''Returns dictionary in which only data common to all three modalities is retained.\n Samples in the same row index are the same across modalities for paired recordings.'''\n\n print('{:d} M datapoints have nans'.format(np.sum(np.isnan(matdict['M_dat']))))\n keep_cells = np.intersect1d(np.flatnonzero(matdict['T_ispaired']==1), np.flatnonzero(matdict['E_ispaired']==1))\n keep_cells = np.intersect1d(keep_cells, np.flatnonzero(~np.isnan(matdict['M_dat'])))\n \n TEM_matdict={}\n TEM_matdict['T_dat'] = matdict['T_dat'][keep_cells,:]\n TEM_matdict['T_spec_id_label'] = matdict['T_spec_id_label'][keep_cells]\n TEM_matdict['T_ispaired'] = matdict['T_ispaired'][keep_cells]\n TEM_matdict['cluster'] = matdict['cluster'][keep_cells] \n TEM_matdict['clusterID'] = matdict['clusterID'][keep_cells]\n TEM_matdict['cluster_color'] = matdict['cluster_color'][keep_cells]\n TEM_matdict['sample_id'] = matdict['sample_id'][keep_cells] \n\n TEM_matdict['E_dat'] = matdict['E_dat'][keep_cells,:] \n TEM_matdict['E_spec_id_label'] = matdict['E_spec_id_label'][keep_cells]\n TEM_matdict['E_ispaired'] = matdict['E_ispaired'][keep_cells]\n\n TEM_matdict['M_dat'] = matdict['M_dat'][keep_cells]\n\n assert np.array_equal(TEM_matdict['E_spec_id_label'],TEM_matdict['E_spec_id_label']),'Sample order is not the same across datasets'\n assert np.size(TEM_matdict['M_dat'])==np.shape(TEM_matdict['T_dat'])[0],'Dataset sizes are mismatched'\n assert np.size(TEM_matdict['M_dat'])==np.shape(TEM_matdict['E_dat'])[0],'Dataset sizes are mismatched'\n print('{:d} remaining samples'.format(np.size(TEM_matdict['M_dat'])))\n print('{:d} remaining T-types '.format(np.size(np.unique(TEM_matdict['cluster']))))\n return TEM_matdict\n\n\ndef TEM_rem_lowsampled_classes(matdict,count_threshold=10):\n \"\"\"Removes poorly sampled classes. Assumes that T,E and M datasets have completely matched rows.\n Arguments:\n matdict {dict} -- output of TEM_dataset()\n count_threshold {int} -- classes with less than this number of samples are removed)\n Returns:\n matdict {dict} -- similar to output of TEM_dataset()\n \"\"\"\n\n unique, counts = np.unique(matdict['cluster'], return_counts=True)\n keep_cells = np.isin(matdict['cluster'],unique[counts>=count_threshold])\n print('Keeping {:d} out of {:d} cells'.format(np.sum(keep_cells),np.size(keep_cells)))\n\n matdict['T_dat'] = matdict['T_dat'][keep_cells,:]\n matdict['T_spec_id_label'] = matdict['T_spec_id_label'][keep_cells]\n matdict['T_ispaired'] = matdict['T_ispaired'][keep_cells]\n matdict['cluster'] = matdict['cluster'][keep_cells] \n matdict['clusterID'] = matdict['clusterID'][keep_cells]\n matdict['cluster_color'] = matdict['cluster_color'][keep_cells]\n matdict['sample_id'] = matdict['sample_id'][keep_cells] \n\n matdict['E_dat'] = matdict['E_dat'][keep_cells,:] \n matdict['E_spec_id_label'] = matdict['E_spec_id_label'][keep_cells]\n matdict['E_ispaired'] = matdict['E_ispaired'][keep_cells] \n\n matdict['M_dat'] = matdict['M_dat'][keep_cells]\n\n assert np.array_equal(matdict['E_spec_id_label'],matdict['E_spec_id_label']),'Sample order is not the same across datasets'\n assert np.size(matdict['M_dat'])==np.shape(matdict['T_dat'])[0],'Dataset sizes are mismatched'\n assert np.size(matdict['M_dat'])==np.shape(matdict['E_dat'])[0],'Dataset sizes are mismatched'\n print('{:d} remaining samples'.format(np.size(matdict['M_dat'])))\n print('{:d} remaining T-types '.format(np.size(np.unique(matdict['cluster']))))\n\n return matdict\n\ndef TEM_get_splits(matdict):\n \"\"\"Creates 1 test set, and from the remaining cells creates 9 other sets for 9-fold cross validation\n Data in the test and validation sets is stratified based on T cluster labels.\n \n Returns:\n cvset -- list with 9 cross-validation folds. Train indices: cvset[0]['train'] and Validation indices: cvset[0]['val']\n testset -- indices for test cells\n Arguments:\n matdict -- output of TEM_rem_lowsampled_classes().\n \"\"\"\n assert np.array_equal(matdict['T_spec_id_label'],matdict['E_spec_id_label']),'Sample order or size is not the same across modalities'\n assert np.size(matdict['M_dat'])==np.shape(matdict['T_dat'])[0],'Dataset sizes are mismatched'\n assert np.size(matdict['M_dat'])==np.shape(matdict['E_dat'])[0],'Dataset sizes are mismatched'\n X = matdict['T_dat']\n y = matdict['cluster']\n\n #Split data into 10 folds first deterministically.\n skf = StratifiedKFold(n_splits=10, random_state=0)\n ind_dict = [{'train':train_ind, 'val':val_ind} for train_ind, val_ind in skf.split(X, y)]\n\n #Define the test set\n testset = ind_dict[0]['val']\n\n #Remove test cells from remaining folds (expected to overlap only with the training cells).\n cvset = []\n for i in range(1,len(ind_dict),1):\n ind_dict[i]['train'] = np.setdiff1d(ind_dict[i]['train'],testset)\n ind_dict[i]['val'] = np.setdiff1d(ind_dict[i]['val'],testset)\n cvset.append(ind_dict[i])\n\n return cvset,testset\n\ndef labelwise_samples(labels,min_samples=60,max_samples=100,random_seed=0):\n '''\n Discards labels with less than `min_sample` number of samples. Draws `max_sample` number of samples from the remaining labels.\n \n '''\n\n #Remove labels that have fewer than the number of samples required\n ttypes,counts = np.unique(labels,return_counts=True)\n keep_ind = counts>=min_samples\n for ttype in ttypes[~keep_ind]:\n print('Removing: {}.'.format(ttype))\n ttypes = ttypes[keep_ind]\n counts = counts[keep_ind]\n\n #Set random seed only in local scope\n r = np.random.RandomState(random_seed)\n train_ind = np.empty(shape=(0,),dtype=int)\n for ttype in ttypes:\n ttype_ind = np.flatnonzero(labels==ttype)\n keep_ind = r.choice(ttype_ind, size=np.minimum(max_samples,ttype_ind.size), replace=False)\n train_ind = np.concatenate([train_ind,keep_ind])\n return train_ind\n\n\ndef generate_color_ref(ref_types):\n '''\n One time use to generate reference color files\n '''\n read_file = '/Users/fruity/Dropbox/AllenInstitute/CellTypes/dat/raw/Mouse-V1-ALM-20180520_cpmtop10k_cpm.mat'\n write_file = '/Users/fruity/Dropbox/AllenInstitute/CellTypes/dat/raw/patchseq-v4/type_color_reference.csv'\n D = sio.loadmat(read_file,squeeze_me=True)\n ctype_list = []\n col_list = []\n for ctype,col in list(set(zip(D['cluster'],D['cluster_color']))):\n ctype_list.append(ctype)\n col_list.append(col)\n D = pd.DataFrame({'celltype':ctype_list,'cluster_color':col_list})\n D = D.loc[D['celltype'].isin(np.unique(ref_types))]\n D = D.reset_index(drop=True)\n D.to_csv(write_file,index=False)\n return\n\n\ndef TE_get_splits(matdict):\n \"\"\"Creates 1 test set, and from the remaining cells creates 9 other sets for 9-fold cross validation\n Data in the test and validation sets is stratified based on T cluster labels.\n \n Returns:\n cvset -- list with 9 cross-validation folds. Train indices: cvset[0]['train'] and Validation indices: cvset[0]['val']\n testset -- indices for test cells\n Arguments:\n matdict -- dataset dictionary\n \"\"\"\n assert np.array_equal(matdict['T_spec_id_label'],matdict['E_spec_id_label']),'Sample order is not the same across datasets'\n X = matdict['T_dat']\n y = matdict['cluster']\n\n #Split data into 10 folds first deterministically.\n skf = StratifiedKFold(n_splits=10, random_state=0, shuffle=True)\n ind_dict = [{'train':train_ind, 'val':val_ind} for train_ind, val_ind in skf.split(X, y)]\n\n #Define the test set\n testset = ind_dict[0]['val']\n\n #Remove test cells from remaining folds (expected to overlap only with the training cells).\n cvset = []\n for i in range(1,len(ind_dict),1):\n ind_dict[i]['train'] = np.setdiff1d(ind_dict[i]['train'],testset)\n ind_dict[i]['val'] = np.setdiff1d(ind_dict[i]['val'],testset)\n cvset.append(ind_dict[i])\n\n return cvset,testset\n\n\n\ndef TE_get_splits_50(matdict):\n \"\"\"Creates 1 test set, and from the remaining cells creates 45 other sets for 45-fold cross validation\n Data in the test and validation sets is stratified based on T cluster labels.\n \n Returns:\n cvset -- list with 45 cross-validation folds. Train indices: cvset[0]['train'] and Validation indices: cvset[0]['val']\n testset -- indices for test cells\n Arguments:\n matdict -- dataset dictionary\n \"\"\"\n\n skf = StratifiedKFold(n_splits=50, random_state=0, shuffle=True)\n ind_dict = [{'train':train_ind, 'val':val_ind} for train_ind, val_ind in skf.split(X=np.zeros(shape=matdict['cluster'].shape), y=matdict['cluster'])]\n\n #Define the test set\n testset = []\n for i in range(45,50,1):\n testset.append(ind_dict[i]['val'])\n testset = np.concatenate(testset)\n \n cvset = []\n for i in range(1,len(ind_dict),1):\n ind_dict[i]['train'] = np.setdiff1d(ind_dict[i]['train'],testset)\n ind_dict[i]['val'] = np.setdiff1d(ind_dict[i]['val'],testset)\n cvset.append(ind_dict[i])\n\n return cvset,testset\n\n\ndef TE_high_conf(matdict):\n \"\"\"Restricts the matched TE dataset to only cells that are mapped (T) with high confidence.\n \n Returns:\n matdict -- dictionary with matched TE data, containing only cells mapped with high confidence.\n \"\"\"\n pass\n return matdict","sub_path":"data_funcs.py","file_name":"data_funcs.py","file_ext":"py","file_size_in_byte":15848,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"518939893","text":"# coding: utf-8\nimport argparse\n\n\ndef get_arguments():\n arg_parser = argparse.ArgumentParser()\n\n arg_parser.add_argument(\"-k\", action=\"store_true\", help=\"Read credentials from key file\")\n\n args = arg_parser.parse_args()\n return args\n","sub_path":"src/ui/console/argparser.py","file_name":"argparser.py","file_ext":"py","file_size_in_byte":245,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"244766788","text":"from subprocess import Popen, PIPE\nimport time\nprint(\"Fuzztainer..\")\n\ntarget_list = [\"chris0\", \"chris1\", \"chris2\", \"chris3\", \"chris4\", \"chris5\", \"chris6\", \"chris7\"]\ncmds_list = [[\"../fuzztainer.py\", \"-w\", \"./PGResults/\" + target, target] for target in target_list]\n\nproc_list = []\nfor cmd in cmds_list:\n\tproc_list.append(Popen(cmd, stdout=PIPE))\n\ttime.sleep(5)\n\n\nfor proc in proc_list:\n\tproc.wait()\n\n#print(\"Commands: \", cmds_list)\n#print(\"\\n\\nProcesses: \", proc_list)\n\n\n\n\n#processes = []\n#for i in range(8):\n#\tp = subprocess.Popen([\"../fuzztainer.py\", \"-w\", \"./PGResults/\" + str(i), \"chris\" + str(i)], stdout=subprocess.PIPE)\n#\ttime.sleep(5)\n#\tprocesses.append(p)\n#for p in processes:\n#\tp.wait()\n\n\n","sub_path":"PGExperiment/FuzzStart.py","file_name":"FuzzStart.py","file_ext":"py","file_size_in_byte":699,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"86739533","text":"import pygame\r\n\r\nclass Player:\r\n def __init__(self, piece, colour, next_piece, next_colour):\r\n # Se guardan los indices de las piezas\r\n self.piece = piece\r\n self.next_piece = next_piece\r\n # Los colores de las piezas\r\n self.colour = colour\r\n self.next_colour = next_colour\r\n # Se construyen los arrays vacios que van a contener los rects\r\n self.blocks = []\r\n self.block_sprite = 0\r\n self.next_blocks = []\r\n self.next_block_sprite = 0\r\n # Esta es la lista de rectangulos para la pieza que representan la posicion final\r\n self.shadow_blocks = []\r\n\r\n # Y defino las flags necesarias\r\n self.valid_move = True\r\n self.valid_rotation = True\r\n\r\n\r\n def createBlocks(self, TETRAMINOS, block_size, board):\r\n a = TETRAMINOS[self.piece]\r\n\r\n # Los bloques se hacen en funcion a si el numero de la grilla de 2x4 que contenga el tuple\r\n for i in range(4):\r\n n = a[i]\r\n # ...es par o impar (para el eje X)\r\n x = (n % 2) * block_size\r\n # Centrado\r\n x += round(board.board_x/2) * block_size + (block_size*2)\r\n # ...y que multiplo de 2 es (para el eje Y)\r\n if n%2 == 1:\r\n y = ((n / 2) - 0.5) * block_size\r\n else:\r\n y = (n / 2) * block_size\r\n # Afuera de la pantalla\r\n y -= block_size*4\r\n block = pygame.Rect(x, y, block_size, block_size)\r\n self.blocks.append(block)\r\n\r\n # Lo mismo para la next_piece\r\n a = TETRAMINOS[self.next_piece]\r\n for i in range(4):\r\n n = a[i]\r\n x = (n % 2) * block_size\r\n if n % 2 == 1:\r\n y = ((n / 2) - 0.5) * block_size\r\n else:\r\n y = (n / 2) * block_size\r\n if self.next_piece == 2:\r\n #block = pygame.Rect(x + 340 + block_size/2, y + 40, block_size, block_size)\r\n block = pygame.Rect(x + 340, y + 40, block_size, block_size)\r\n else:\r\n block = pygame.Rect(x + 340, y + 40, block_size, block_size)\r\n self.next_blocks.append(block)\r\n\r\n self.block_sprite = pygame.Surface((block_size, block_size))\r\n self.block_sprite.fill(self.colour, pygame.Rect(0, 0, block_size, block_size))\r\n\r\n self.next_block_sprite = pygame.Surface((block_size, block_size))\r\n self.next_block_sprite.fill(self.next_colour, pygame.Rect(0, 0, block_size, block_size))\r\n\r\n self.shadow_sprite_outer = pygame.Surface((block_size, block_size))\r\n self.shadow_sprite_outer.fill((255, 255, 255), pygame.Rect(0, 0, block_size, block_size))\r\n\r\n self.shadow_sprite_inner = pygame.Surface((block_size-2, block_size-2))\r\n self.shadow_sprite_inner.fill((0, 0, 0), pygame.Rect(0, 0, block_size, block_size))\r\n\r\n self.shadowPiece(board)\r\n\r\n def rotate(self, board):\r\n # Se levanta la flag\r\n self.valid_rotation = True\r\n\r\n # Solo se permite rotar si la pieza en juego no es un cuadrado\r\n if self.piece != 6:\r\n # Se hace una copia de la pieza\r\n move_blocks = []\r\n for i in range(4):\r\n move_blocks.append(self.blocks[i].copy())\r\n\r\n # Este procedimiento lo robe de internet, es impecable\r\n for i in range(4):\r\n # Se van a medir coordenadas a partir de 1 de los 4 bloques, el \"r\"\r\n r = move_blocks[1]\r\n block = move_blocks[i]\r\n # Se traza el vector, y se lo \"cruza\" (intercambiando las coordenadas)\r\n x = block.y - r.y\r\n y = block.x - r.x\r\n # Y una de la dos coordenas se restan (eso determina la direccion de la rotacion)\r\n move_blocks[i].x = r.x - x\r\n move_blocks[i].y = r.y + y\r\n\r\n # Aca se evalua si la copia no esta chocando contra los extremos o contra los bloques del board\r\n for y in range(len(board.board_blocks)):\r\n for x in range(len(board.board_blocks[y])):\r\n rect = board.board_blocks[y][x]\r\n if rect is not None:\r\n if move_blocks[i].colliderect(board.board_blocks[y][x]):\r\n self.valid_rotation = False\r\n elif move_blocks[i].x < board.block_size*4 or move_blocks[i].x > ((board.board_x+3) * board.block_size):\r\n self.valid_rotation = False\r\n\r\n # Si la flag no se bajo, la lista original se reemplaza por la copia rotada y se construye la shadow_piece\r\n if self.valid_rotation:\r\n self.blocks = move_blocks\r\n\r\n self.shadowPiece(board)\r\n\r\n\r\n def move(self, dx, dy, block_size, board_x, board):\r\n # Se levanta la flag\r\n self.valid_move = True\r\n\r\n # Se hace una copia de la pieza en juego\r\n move_blocks = []\r\n for i in range(4):\r\n move_blocks.append(self.blocks[i].copy())\r\n\r\n # Se desplazan los rectangulos y se evalua si la copia esta chocando con los extremos o con las piezas del board\r\n for i in range(4):\r\n move_blocks[i].x += dx\r\n move_blocks[i].y += dy\r\n if move_blocks[i].x < block_size*4 or move_blocks[i].x > (board_x-1) * block_size + block_size*4:\r\n self.valid_move = False\r\n for y in range(len(board.board_blocks)):\r\n for rect in board.board_blocks[y]:\r\n if rect is not None:\r\n if move_blocks[i].colliderect(rect):\r\n self.valid_move = False\r\n\r\n # Si no se bajo la flag la lista original se reemplaza por la copia desplazada\r\n if self.valid_move:\r\n self.blocks = move_blocks\r\n\r\n self.shadowPiece(board)\r\n\r\n def movedown(self, block_size, board):\r\n # Se hace una copia de la pieza en juego\r\n move_blocks = []\r\n for i in range(4):\r\n move_blocks.append(self.blocks[i].copy())\r\n\r\n # Se mueve a la siguiente fila\r\n for i in range(4):\r\n move_blocks[i].y += block_size\r\n for y in range(len(board.board_blocks)):\r\n for rect in board.board_blocks[y]:\r\n if rect is not None:\r\n # Si esta copia colisiona con el board se inserta la pieza original al board\r\n if move_blocks[i].colliderect(rect):\r\n board.insertBlock(self.blocks, self.piece)\r\n # Aca se \"avisa\" que hay que hacer un bloque nuevo\r\n return True\r\n # Si no hubo colision el bloque original es reemplazado por la copia desplazada\r\n self.blocks = move_blocks\r\n # Aca se \"avisa\" que hay que este bloque sigue en juego\r\n return False\r\n\r\n def shadowPiece(self, board):\r\n # La shadow piece primero es identica a la pieza\r\n self.shadow_blocks = self.blocks.copy()\r\n # Se hace una copia de la shadow piece\r\n move_blocks = self.shadow_blocks.copy()\r\n # Y se crea la flag que interrumpe el loop\r\n break_flag = False\r\n # Se empieza un loop tan largo como la cantidad total de filas (considerando las que estan fuera de la pantalla)\r\n for y in range(board.board_y + 4):\r\n # La shadow piece es identica a la ultima posicion de la copia desplazada\r\n self.shadow_blocks = move_blocks.copy()\r\n for i in range(4):\r\n # Se desplazan los rectangulos de la copia\r\n move_blocks[i] = move_blocks[i].move(0, board.block_size)\r\n for index, row in enumerate(board.board_blocks):\r\n for rect in row:\r\n if rect is not None:\r\n # Si al copia colisiona se levanta la flag\r\n if move_blocks[i].colliderect(rect):\r\n break_flag = True\r\n # La flag detiene el loop principal\r\n if break_flag:\r\n break\r\n\r\n def releaseBlock(self):\r\n self.blocks = self.shadow_blocks.copy()\r\n\r\n def draw(self, screen):\r\n for i in range(4):\r\n screen.blit(self.shadow_sprite_outer, self.shadow_blocks[i])\r\n screen.blit(self.shadow_sprite_inner, self.shadow_blocks[i].move(1, 1))\r\n screen.blit(self.block_sprite, self.blocks[i])\r\n screen.blit(self.next_block_sprite, self.next_blocks[i])","sub_path":"tetris_player.py","file_name":"tetris_player.py","file_ext":"py","file_size_in_byte":8644,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"144931141","text":"# Copyright 2015 Lockheed Martin Corporation\n# \n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n# \n# http://www.apache.org/licenses/LICENSE-2.0\n# \n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# \nfrom laikaboss.si_module import SI_MODULE\nfrom laikaboss.util import get_option\nimport hashlib\n\nclass META_HASH(SI_MODULE):\n def __init__(self,):\n self.module_name = \"META_HASH\"\n #defaults are a set.\n # Existance in this set means run the function\n # Absense from the set means skip the function\n # prevents need to update default with all possible hash functions.\n # could set and forget this value while adding additional functions in _run\n self.module_defaults = set([\"md5\", #md5.hexdigest\n \"SHA1\", #sha1.hexdigest\n \"SHA256\",#sha256.hexdigest\n \"ssdeep\", #ssdeep is not a standard package\n \"SHA512\",#sha512.hexdigest\n ])\n\n def _run(self, scanObject, result, depth, args):\n '''\n Assumes:\n there is a string like object in scanObject.buffer\n Ensures:\n hash values added using scanObject.addMetadata\n\n Laika Config File Options:\n hashmd5: \"1\" = md5.hexdigest, \"0\" = omit\n hashSHA1: \"1\" = sha1.hexdigest, \"0\" = omit\n hashSHA256: \"1\" = sha256.hexdigest, \"0\" = omit\n hashSHA512: \"1\" = sha256.hexdigest, \"0\" = omit\n hashSHA1: \"1\" = sha1.hexdigest, \"0\" = omit\n ssdeep: \"1\" = ssdeep.hash, \"0\" = omit\n\n Function Arguments:\n :param scanObject:\n :param result:\n :param depth:\n :param args: --execution flow controls--\n Valid args names (value must be 1, 0, \"1\", or \"0\")\n 1/\"1\": Generate the hash of named type\n 0/\"0\": Omit the hash of named type\n default args:\n {\"md5\":1,\n \"SHA1\":0,\n \"SHA256\":1,\n \"SHA512\":1,\n \"ssdeep\":0}\n\n :return: Always returns a empty list (no child objects)\n '''\n moduleResult = []\n metaDict = {}\n if int(get_option(args, 'md5', 'hashmd5', \"md5\" in self.module_defaults)):\n metaDict['md5'] = hashlib.md5(scanObject.buffer).hexdigest()\n if int(get_option(args, 'SHA1', 'hashSHA1', \"SHA1\" in self.module_defaults)):\n metaDict['SHA1'] = hashlib.sha1(scanObject.buffer).hexdigest()\n if int(get_option(args, 'SHA256', 'hashSHA256', \"SHA256\" in self.module_defaults)):\n metaDict['SHA256'] = hashlib.sha256(scanObject.buffer).hexdigest()\n if int(get_option(args, 'SHA512', 'hashSHA512', \"SHA512\" in self.module_defaults)):\n metaDict['SHA512'] = hashlib.sha512(scanObject.buffer).hexdigest()\n if int(get_option(args, 'ssdeep', 'hashssdeep', \"ssdeep\" in self.module_defaults)):\n #only import ssdeep if dispatched.\n #Prevents import error if you don't have/want the package\n #python should keep handing you the original, minimal/no overhead\n try:\n import ssdeep\n metaDict['ssdeep'] = ssdeep.hash(scanObject.buffer)\n except ImportError:\n metaDict['ssdeep'] = \"\" #indicate ssdeep was configured but failed\n\n\n scanObject.addMetadata(self.module_name, \"HASHES\", metaDict)\n \n return moduleResult\n","sub_path":"laikaboss/modules/meta_hash.py","file_name":"meta_hash.py","file_ext":"py","file_size_in_byte":4113,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"302417587","text":"class DoublyLinkedList:\n def __init__(self, items):\n first = ListNode(items.pop(0))\n self.head = first\n self.tail = first\n for item in items:\n self.append(item)\n \n def append(self, item):\n node = ListNode(item)\n node.previous = self.tail\n self.tail.next = node\n self.tail = node\n return node\n\n def insertAfter(self, node, item): \n if node == self.tail:\n return self.append(item)\n \n new = ListNode(item)\n new.next = node.next\n new.previous = node\n node.next.previous = new\n node.next = new\n return new\n\n def remove(self, *nodes):\n for node in nodes:\n if node == self.head:\n self.head = node.next\n self.head.previous = None\n node.next = None\n elif node == self.tail:\n self.tail = node.previous\n self.tail.next = None\n node.previous = None\n else:\n left = node.previous\n right = node.next\n left.next = right\n right.previous = left\n\n def items(self):\n current = self.head\n while True:\n yield current\n if current.next: current = current.next\n else: break\n\nclass ListNode:\n def __init__(self, data):\n self.previous = None\n self.next = None\n self.data = data","sub_path":"Day09/DoublyLinkedList.py","file_name":"DoublyLinkedList.py","file_ext":"py","file_size_in_byte":1480,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"113562981","text":"#Brandon Dickson\r\n#Selection revision exercise 3\r\n#26-09-2104\r\n\r\nnumber=int(input(\"Please enter a number:\"))\r\n\r\nif number >= 21 and number <= 29:\r\n print(\"Number in range.\")\r\nelse:\r\n print(\"Number not in range.\")\r\n","sub_path":"Selection revision exercise 3 corrected copy.py","file_name":"Selection revision exercise 3 corrected copy.py","file_ext":"py","file_size_in_byte":220,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"524049711","text":"from AMCunet_ImageDataGenerator import *\nimport numpy as np\nfrom tqdm import tqdm\n\nir_src_data_file = './data/ir_data_uint8_0_uint8max.npy'\nmasks_data_file='./data/masks.npy'\ntargets_data_file = './data/targets_exp_uint8_0_uint8max.npy'\n\nImDG = AMCunet_ImageDataGenerator(ir_src_data_file=ir_src_data_file, masks_data_file=masks_data_file, targets_data_file=targets_data_file,\n image_size = (512,512), rotation_range=180., shear_range=10., zoom_range=0.15, fill_mode='constant', cval=0.0,\n horizontal_flip=False, vertical_flip=False)\ntrain_datagen = ImDG.flow(None, batch_size=8, shuffle=True, category='train')\n\nir_batches = []\nmasks_batches = []\ntargets_batches = []\nfor i in tqdm(range(10), total=10):\n [ir_batch, masks_batch], [targets_batch] = train_datagen.next()\n ir_batches.append(ir_batch)\n masks_batches.append(masks_batch)\n targets_batches.append(targets_batch)\n\nir_batches = np.concatenate(ir_batches, axis=0)\nmasks_batches = np.concatenate(masks_batches, axis=0)\ntargets_batches = np.concatenate(targets_batches, axis=0)\n\nprint('ir_batches.shape = %s\\nmasks_batches.shape = %s\\ntargets_batches.shape = %s' % (str(ir_batches.shape), str(masks_batches.shape), str(targets_batches.shape)))\n\nnp.save('./data/generated/ir_batches.npy', ir_batches)\nnp.save('./data/generated/masks_batches.npy', masks_batches)\nnp.save('./data/generated/targets_batches.npy', targets_batches)","sub_path":"imagedatagenerator_test.py","file_name":"imagedatagenerator_test.py","file_ext":"py","file_size_in_byte":1447,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"485903837","text":"def printShortestPath(n, i_start, j_start, i_end, j_end):\n a,b,c,d=i_start,j_start,i_end,j_end\n ans=[]\n if (i_start%2==0 and i_end%2!=0) or (i_start%2!=0 and i_end%2==0):\n print(\"Impossible\")\n elif i_start==i_end:\n if (j_end-j_start)==2:\n print(\"R\")\n else:\n print(\"Impossible\")\n elif abs(i_start - i_end) % 4 == 0 and (j_start == j_end):\n val = abs(i_start - j_end) - 1\n print(val)\n for i in range(val):\n if i % 2 != 0:\n print(\"LL\", end=\" \")\n else:\n print(\"LR\", end=\" \")\n else:\n val=abs(i_start-j_end)-1\n print(val)\n for i in range(val):\n if i_start>i_end and j_start>j_end:\n ans.append(\"UL\")\n i_start-=2\n j_start-=1\n elif i_startj_end:\n ans.append(\"UR\")\n i_start-=2\n j_start+=1\n elif i_start==i_end and (j_end-j_start)>0 and (j_end-j_start)%2==0:\n ans.append(\"R\")\n j_start+=2\n elif i_start>i_end and j_start>j_end:\n ans.append(\"LR\")\n i_start+=2\n j_start+=1\n elif i_startj_end:\n ans.append(\"LL\")\n i_start+=2\n j_start-=1\n elif i_start==i_end and (j_end-j_start)<0 and (j_end-j_start)%2==0:\n ans.append(\"L\")\n j_start-=2\n if i_end==c and j_end==d:\n print(\" \".join(ans))\n else:\n print(\"Impossible\")\nif __name__ == \"__main__\":\n n = int(input().strip())\n i_start, j_start, i_end, j_end = input().strip().split(' ')\n i_start, j_start, i_end, j_end = [int(i_start), int(j_start), int(i_end), int(j_end)]\n printShortestPath(n, i_start, j_start, i_end, j_end)","sub_path":"red knight shortest path.py","file_name":"red knight shortest path.py","file_ext":"py","file_size_in_byte":1896,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"164731670","text":"# coding: UTF-8\n\nimport numpy as np\nfrom PIL import Image\nfrom labels import ID2COLOR\n\n# ターゲット認識器を表現する画像のサイズ\nIMAGE_SIZE = 512\n\n# 構築したクローン認識器を評価するためのクラス\nclass LV1_Evaluator:\n\n def __init__(self):\n h = IMAGE_SIZE // 2\n self.size = IMAGE_SIZE * IMAGE_SIZE\n self.samples = np.zeros((self.size, 2))\n for i in range(0, self.size):\n x = i % IMAGE_SIZE\n y = i // IMAGE_SIZE\n self.samples[i][0] = np.float32((x - h) / h)\n self.samples[i][1] = np.float32(-(y - h) / h)\n self.samples = np.float32(self.samples)\n\n # クローン認識器を可視化する(可視化結果を画像として保存する)\n # model: クローン認識器\n # filename: 可視化結果の保存先画像のファイルパス\n def visualize(self, model, filename):\n self.clone_labels = model.predict(self.samples)\n img = Image.new('RGB', (IMAGE_SIZE, IMAGE_SIZE))\n for i in range(0, self.size):\n x = i % IMAGE_SIZE\n y = i // IMAGE_SIZE\n img.putpixel((x, y), ID2COLOR[self.clone_labels[i]])\n img.save(filename)\n\n # ターゲット認識器とクローン認識器の出力の一致率を求める\n # target: ターゲット認識器\n # model: クローン認識器\n def calc_accuracy(self, target, model):\n self.target_labels = target.predict(self.samples)\n self.clone_labels = model.predict(self.samples)\n n = np.count_nonzero(self.target_labels - self.clone_labels)\n return (self.size - n) / self.size\n","sub_path":"lv1_python_sample_code/evaluation.py","file_name":"evaluation.py","file_ext":"py","file_size_in_byte":1657,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"577656143","text":"from PySide2.QtWidgets import *\r\nfrom gui import Ui_Form\r\nimport requests\r\nimport sys\r\n\r\n\r\nurl = 'http://api.openweathermap.org/data/2.5/weather'\r\napi = open('sysFiles/config.txt', 'r').readline()\r\n\r\n\r\nif __name__ == '__main__':\r\n\tapp = QApplication(sys.argv)\r\n\r\n\r\n\tForm = QWidget()\r\n\tui = Ui_Form()\r\n\tui.setupUi(Form)\r\n\tForm.show()\r\n\r\ndef search():\r\n\r\n\tif ui.lineEdit.text() == '':\r\n\t\tui.label_2.setText(\"Введите название города (Поселка)\")\r\n\telse:\r\n\r\n\t\ttry:\r\n\t\t\tparams = {'APPID': api, 'q': ui.lineEdit.text(), 'units': 'metric', 'lang': 'ru'}\r\n\t\t\tresult = requests.get(url, params=params)\r\n\t\t\tweather = result.json()\r\n\r\n\t\t\tif weather[\"main\"]['temp'] < 10:\r\n\t\t\t\tstate = \"Сейчас холодно!\"\r\n\t\t\telif weather[\"main\"]['temp'] < 20:\r\n\t\t\t\tstate = \"Сейчас прохладно!\"\r\n\t\t\telif weather[\"main\"]['temp'] > 38:\r\n\t\t\t\tstate = \"Сейчас жарко!\"\r\n\t\t\telse:\r\n\t\t\t\tstate = \"Сейчас отличная температура!\"\r\n\r\n\t\t\tui.label_2.setText(\"В городе \" + str(weather[\"name\"]) + \" температура \" + str(\r\n\t\t\t\tfloat(weather[\"main\"]['temp'])) + \" °C\" + \"\\n\" +\r\n\t\t\t\t\t\t\t \"Максимальная температура \" + str(float(weather['main']['temp_max'])) + \" °C\" + \"\\n\" +\r\n\t\t\t\t\t\t\t \"Минимальная температура \" + str(float(weather['main']['temp_min'])) + \" °C\" + \"\\n\" +\r\n\t\t\t\t\t\t\t \"Скорость ветра \" + str(float(weather['wind']['speed'])) + \"\\n\" +\r\n\t\t\t\t\t\t\t \"Давление \" + str(float(weather['main']['pressure'])) + \"\\n\" +\r\n\t\t\t\t\t\t\t \"Влажность \" + str(float(weather['main']['humidity'])) + \"\\n\" +\r\n\t\t\t\t\t\t\t \"Видимость \" + str(weather['visibility']) + \"\\n\" +\r\n\t\t\t\t\t\t\t \"Описание \" + str(weather['weather'][0][\"description\"]) + \"\\n\" + \"\\n\" + state)\r\n\r\n\t\texcept KeyError:\r\n\t\t\tui.label_2.setText(\"Город \" + ui.lineEdit.text() + \" не найден\")\r\n\r\n\r\n\r\nui.pushButton.clicked.connect( search )\r\n\r\nsys.exit(app.exec_())\r\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1978,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"379480215","text":"import logging\nimport os\nfrom medical_service_register.path import LOGGING_DIR\n\n\ndef get_logger(name):\n logger_name = ''\n name_part = name.split('.')\n if len(name_part) > 0:\n logger_name = name_part[-1]\n\n logger = logging.getLogger(logger_name)\n logger.setLevel(logging.INFO)\n\n file_handler = logging.FileHandler(os.path.join(LOGGING_DIR, 'registry_processing.log'))\n file_handler.setLevel(logging.INFO)\n formatter = logging.Formatter('%(asctime)s %(name)s - %(levelname)s - %(message)s')\n file_handler.setFormatter(formatter)\n logger.addHandler(file_handler)\n\n return logger\n","sub_path":"main/logger.py","file_name":"logger.py","file_ext":"py","file_size_in_byte":616,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"87715991","text":"# vim: tabstop=4 shiftwidth=4 softtabstop=4\n\n# Copyright 2015 Zerbtech\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nimport time\nimport json\nimport threading\n\nfrom ds.db import api as db_api\nfrom ds.rpc import api as api\nfrom ds.engine.deploy import task_interface\nfrom ds.common import exception as dsexception\n\n\nclass PJobChecker(task_interface.DeploymentTask):\n\n event = threading.Event()\n event.set()\n\n def execute(self, context, logger):\n self.job_id = context[\"job_id\"]\n self.rpc_context = context[\"rpc_context\"]\n dsjob = db_api.job_get_by_id(self.rpc_context, self.job_id)\n if dsjob is None:\n errormsg = _(\"Can not find job with ID: %s\") % self.job_id\n logger.warning(errormsg)\n raise dsexception.JobNotFound(jobid=self.job_id)\n parent_job_id = None\n if dsjob.pjob_id and len(dsjob.pjob_id) > 0:\n pdsjob = db_api.job_get_by_id(self.rpc_context, dsjob.pjob_id)\n self._wait_pjob(context, dsjob.pjob_id, logger)\n parent_job_id = dsjob.pjob_id\n\n logger.info(_(\"Record parent_job_id %s\"\n \" in context.\") % parent_job_id)\n context[\"parent_job_id\"] = parent_job_id\n\n def _wait_pjob(self, context, pjob_id, logger):\n pdsjob = None\n while (1):\n if self.rpc_context and self.rpc_context.session:\n if pdsjob is not None:\n self.rpc_context.session.refresh(pdsjob)\n else:\n pdsjob = db_api.job_get_by_id(self.rpc_context, pjob_id)\n else:\n pdsjob = db_api.job_get_by_id(self.rpc_context, pjob_id)\n if pdsjob is None:\n errormsg = _(\"Can not find parent\"\n \" job with ID: %s\") % pjob_id\n logger.warning(errormsg)\n raise dsexception.ParentJobNotFound(jobid=self.job_id,\n pjobid=pjob_id)\n if pdsjob.state == api.JOB_CREATED:\n errormsg = _(\"Parent job with ID: %s\"\n \" is in CREATED state.\") % pjob_id\n logger.warning(errormsg)\n raise dsexception.ParentJobIsCreated(jobid=self.job_id,\n pjobid=pjob_id)\n if pdsjob.state == api.JOB_ERROR:\n errormsg = _(\"Parent job with ID: %s\"\n \" is in ERROR state.\") % pjob_id\n logger.warning(errormsg)\n raise dsexception.ParentJobIsError(jobid=self.job_id,\n pjobid=pjob_id)\n if pdsjob.state in [api.JOB_DELETING,\n api.JOB_DELETE_ERROR,\n api.JOB_DELETED]:\n errormsg = _(\"Parent job with ID: %s\"\n \" is deleted.\") % pjob_id\n logger.warning(errormsg)\n raise dsexception.ParentJobIsDeleted(jobid=self.job_id,\n pjobid=pjob_id)\n if pdsjob.state in [api.UPDATE_FINISHED,\n api.JOB_FINISHED]:\n msg = _(\"Parent job %s is okay.\") % pjob_id\n logger.info(msg)\n PJobChecker.event.wait()\n PJobChecker.event.clear()\n time.sleep(60)\n PJobChecker.event.set()\n return\n\n msg = _(\"Parent job with ID: %s\"\n \" is %s, waiting.\") % (pjob_id, pdsjob.state)\n logger.info(msg)\n time.sleep(10)\n\n def name(self):\n return 'running:update-status'\n\n def desc(self):\n return \"update job status in the DB\"\n","sub_path":"ds/engine/deploy/common/check_pjob.py","file_name":"check_pjob.py","file_ext":"py","file_size_in_byte":4287,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"636180501","text":"\"\"\"\nhttps://en.wikipedia.org/wiki/Bloom_filter\n\"\"\"\nfrom random import seed, sample\n\n\nclass BloomFilter(object):\n\n def __init__(self, iterable=(), population=10, probe=2):\n self.population = range(population)\n self.probe = probe\n self.data = bytearray(population)\n for name in iterable:\n self.add(name)\n\n def add(self, name):\n seed(name)\n lucky = sample(self.population, self.probe)\n for i in lucky:\n self.data[i] = 1\n\n def __contains__(self, name):\n seed(name)\n lucky = sample(self.population, self.probe)\n return all(self.data[i] for i in lucky)\n\n\ndef main():\n names = \"Jigar Krups Pilu Nency Papa Mummy\"\n shahFamily = BloomFilter(names.split(), 100, 10)\n print('Jigar' in shahFamily)\n print('nobody' in shahFamily)\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"8-DS-Design/DS_bloom_filter_optimized.py","file_name":"DS_bloom_filter_optimized.py","file_ext":"py","file_size_in_byte":869,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"87597318","text":"class Solution:\n def integerBreak(self, n: int) -> int:\n dp = [0]*(n+1)\n for i in range(2, n+1):\n for j in range(1, i):\n dp[i] = max(dp[i], max(j, dp[j])*max(dp[i-j], i-j))\n\n return dp[-1]\n\nsol = Solution()\nres = sol.integerBreak(10)\nprint(res)","sub_path":"src/Solution343.py","file_name":"Solution343.py","file_ext":"py","file_size_in_byte":294,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"614694028","text":"import re\nimport numpy as np\nimport random\nfrom keras import backend\nfrom keras.layers import Input\nfrom keras.layers.core import Dense, Lambda, Reshape\nfrom keras.layers.convolutional import Convolution1D\nfrom keras.layers.merge import concatenate, dot\nfrom keras.models import Model\nfrom keras.preprocessing.text import Tokenizer\nimport tensorflow.contrib.learn as learn\nVocabularyProcessor = learn.preprocessing.VocabularyProcessor\nvp = VocabularyProcessor(200)\n\n# %%\n\nspc_sym = ['article','','

          ','','','

          ','','abstract','=']\nqry_l = []\npos_doc_l = []\nJ = 4\nre_splt = re.compile(' |=|-') # 因为keras的tokenizer把abc-def这种处理成一个单词了, 这里为了简化\nwith open('text_data','r') as f:\n all_l = f.readlines()\n for i,line in enumerate(all_l):\n if len(line) > 10:\n one_l = line.split('\\t')\n qry_l.append(' '.join(filter(lambda x: x.lower() not in spc_sym , re_splt.split(one_l[1]) )))\n pos_doc_l.append(' '.join(filter(lambda x: x.lower() not in spc_sym , re_splt.split(one_l[0]) )))\n\nprint(len(qry_l), qry_l)\n\ni = 20\nN_qry = len(qry_l)\nrnd_rng = list(range(0,i))+list(range(i+1,N_qry))\nrnd_rng\nrandom.choice(rnd_rng)\nneg_docs_l = [[pos_doc_l[j] for j in random.sample(list(range(0,i))+list(range(i+1,N_qry)), J) ] for i in range(len(qry_l)) ]\n\ntokenizer = Tokenizer(num_words=None, lower=True, split=' ')\n\ntokenizer.fit_on_texts(qry_l+pos_doc_l)\n\n# fit的时候一整句话扔进去就行, 但是计算的时候就需要把词全都分开\nfor i in range(N_qry):\n asd1 = tokenizer.texts_to_sequences(qry_l[i].strip('.| ').split())\n print(len(qry_l[i].strip('.| ').split()), len(asd1),asd1)\n print(len(qry_l[i].strip('.| ').split()), len(asd1),qry_l[i].strip('.| ').split())\n\nnp.random.choice([1,2,3,4,5],2)\nrandom.sample([1,2,3,4,5],2)\n\nnp.random.randint(1,10)\nnp.random.rand(3,5)\nnp.random.rand(1,3,5)\n# %%\nqry_l\nhaha = ['and ukraine would-be determined by their respective constitutions']\nvp.fit(qry_l+pos_doc_l)\nlist(vp.fit_transform(haha))\n","sub_path":"clsm_keras_v2.py","file_name":"clsm_keras_v2.py","file_ext":"py","file_size_in_byte":2040,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"584790616","text":"import pandas as pd\nimport glob\n\nfrom matplotlib import pyplot as plt\n\nfrom HourlyWeatherData import HourlyWeatherData as hwd\n\n\nclass HourlyWeatherDataParser:\n weatherData = []\n\n # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n def __init__(self):\n pass\n\n # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n def plotMonth(self, inMonthsDays, inWeatherData):\n '''\n\n :param inMonthsDays: [[MONTH, DAY]...[MONTH, DAY]] pairs\n :param inWeatherDataCol:\n :return:\n '''\n\n # each month/day pair gets its own figure\n figures = []\n\n # iterate through desired months\n for monthDayPair in inMonthsDays:\n\n # add current day's figure\n figures.append(plt.figure())\n\n month = monthDayPair[0]\n # list of years available for a given month\n yearsAvailable = []\n\n # iterate through available raw data\n for dataFrame in self.weatherData:\n # get the month of the current data\n dataMonth = dataFrame.iloc[:, hwd.MONTH.value].values[0]\n\n # if the current data is for the current desired month, process\n if dataMonth == month:\n dayList = dataFrame.iloc[:, hwd.DAY.value].values\n day = monthDayPair[1]\n dayStartIdx = findDayStartEnd(day, dayList)\n dayEndIdx = dayStartIdx + 24\n\n if dayStartIdx != -1:\n # append current data year to list\n yearsAvailable.append(dataFrame.iloc[:, hwd.YEAR.value].values[0])\n\n # get hour data\n x = range(24)\n\n # get desired data for given day\n y = dataFrame.iloc[dayStartIdx:dayEndIdx, inWeatherData.value].values\n\n # setup graphs\n figure = figures[-1]\n axis = figure.add_subplot(111)\n axis.plot(x, y, 'o--', label=repr(yearsAvailable[-1]))\n plt.title(inWeatherData.name + ' for ' + repr(month) + '/' + repr(day))\n plt.legend()\n plt.xticks(range(24))\n plt.xlabel('Hour of Day')\n plt.ylabel('Temperature [degC]')\n\n print('Years available for month = ', month)\n print(yearsAvailable)\n print()\n\n # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n def importCSVFiles(self):\n files = glob.glob(hwd.RES_FOLDER.value + \"*.csv\")\n for file in files:\n data = pd.read_csv(file, skiprows=hwd.DATA_START_ROW.value)\n self.weatherData.append(data)\n\n # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n def showPlots(self):\n plt.show()\n\n\n# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\ndef hourStr2Int(inStrList):\n outIntList = []\n for str in inStrList:\n (h, m) = str.split(':')\n outIntList.append(int(h))\n\n return outIntList\n\n\n# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\ndef findDayStartEnd(inDay, inDayList):\n '''\n\n :param inDay:\n :param inDayList:\n :return: Returns start idx for given day, -1 if not found\n '''\n\n outStartIdx = -1\n\n for i, day in enumerate(inDayList):\n if day == inDay:\n outStartIdx = i\n break\n\n return outStartIdx\n","sub_path":"HourlyWeatherDataParser.py","file_name":"HourlyWeatherDataParser.py","file_ext":"py","file_size_in_byte":3787,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"400561847","text":"import numpy as np\nimport h5py\nimport matplotlib.pyplot as plt\n\n\nf1 = h5py.File(\"snapshot_001.hdf5\",\"r\")\nf2 = h5py.File(\"snapshot_003.hdf5\",\"r\")\nf3 = h5py.File(\"snapshot_005.hdf5\",\"r\")\n\nfig = plt.figure()\nplt.subplots_adjust(hspace=0.06)\nplt.subplots_adjust(wspace=0.06)\n\ndata11 = np.array(f1[u'PartType1'][u'Coordinates'])\n\ndata21 = np.array(f2[u'PartType1'][u'Coordinates'])\n\ndata31 = np.array(f3[u'PartType1'][u'Coordinates'])\n\npos11x = data11[:,0]\npos11y = data11[:,1]\npos11z = data11[:,2]\n\nxc11 = np.sum(pos11x)/len(pos11x)\nyc11 = np.sum(pos11y)/len(pos11y)\nzc11 = np.sum(pos11z)/len(pos11z)\n\nix11 = np.abs(pos11z-zc11)<500\nax11 = fig.add_subplot(331)\nax11.scatter(pos11x[ix11], pos11y[ix11], s=0.01, color=\"r\")\nplt.setp(plt.gca(),xticklabels=[])\nplt.ticklabel_format(style='sci',axis='y',scilimits=(0,0))\n\n\nix12 = np.abs(pos11x-xc11)<500\nax12 = fig.add_subplot(332)\nax12.scatter(pos11y[ix12], pos11z[ix12], s=0.01, color=\"r\")\nplt.setp(plt.gca(),yticklabels=[])\nplt.setp(plt.gca(),xticklabels=[])\n\n\nix13 = np.abs(pos11y-yc11)<500\nax13 = fig.add_subplot(333)\nax13.scatter(pos11x[ix13], pos11z[ix13], s=0.01, color=\"r\")\nplt.setp(plt.gca(),yticklabels=[])\nplt.setp(plt.gca(),xticklabels=[])\n\n\npos21x = data21[:,0]\npos21y = data21[:,1]\npos21z = data21[:,2]\n\nxc21 = np.sum(pos21x)/len(pos21x)\nyc21 = np.sum(pos21y)/len(pos21y)\nzc21 = np.sum(pos21z)/len(pos21z)\n\nix21 = np.abs(pos21z-zc21)<500\nax21 = fig.add_subplot(334)\nax21.scatter(pos21x[ix21], pos21y[ix21], s=0.01, color=\"r\")\nplt.setp(plt.gca(),xticklabels=[])\nplt.ticklabel_format(style='sci',axis='y',scilimits=(0,0))\n\n\nix22 = np.abs(pos21x-xc21)<500\nax22 = fig.add_subplot(335)\nax22.scatter(pos21y[ix22], pos21z[ix22], s=0.01, color=\"r\")\nplt.setp(plt.gca(),yticklabels=[])\nplt.setp(plt.gca(),xticklabels=[])\n\n\nix23 = np.abs(pos21y-yc21)<500\nax23 = fig.add_subplot(336)\nax23.scatter(pos21x[ix23], pos21z[ix23], s=0.01, color=\"r\")\nplt.setp(plt.gca(),yticklabels=[])\nplt.setp(plt.gca(),xticklabels=[])\n\n\npos31x = data31[:,0]\npos31y = data31[:,1]\npos31z = data31[:,2]\n\nxc31 = np.sum(pos31x)/len(pos31x)\nyc31 = np.sum(pos31y)/len(pos31y)\nzc31 = np.sum(pos31z)/len(pos31z)\n\nix31 = np.abs(pos31z-zc31)<500\nax31 = fig.add_subplot(337)\nax31.scatter(pos31x[ix31], pos31y[ix31], s=0.01, color=\"r\")\nplt.ticklabel_format(style='sci',axis='x',scilimits=(0,0))\nplt.ticklabel_format(style='sci',axis='y',scilimits=(0,0))\n\n\nix32 = np.abs(pos31x-xc31)<500\nax32 = fig.add_subplot(338)\nax32.scatter(pos31y[ix32], pos31z[ix32], s=0.01, color=\"r\")\nplt.setp(plt.gca(),yticklabels=[])\nplt.ticklabel_format(style='sci',axis='x',scilimits=(0,0))\n\n\nix33 = np.abs(pos31y-yc31)<500\nax33 = fig.add_subplot(339)\nax33.scatter(pos31x[ix33], pos31z[ix33], s=0.01, color=\"r\")\nplt.setp(plt.gca(),yticklabels=[])\n\nplt.ticklabel_format(style='sci',axis='x',scilimits=(0,0))\n\n\n\n\nplt.savefig(\"2-D cross sections 3 Particle type-2.jpeg\", format=\"jpeg\")\nplt.show()\n\n\n","sub_path":"FinalProject/2D cross section for 3 snapshot.py","file_name":"2D cross section for 3 snapshot.py","file_ext":"py","file_size_in_byte":2883,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"439202069","text":"def _get_absolute_path(self, path):\n original_path = path\n if path.startswith('rsync://'):\n return path\n if (self._task._role is not None):\n path = self._loader.path_dwim_relative(self._task._role._role_path, 'files', path)\n else:\n path = self._loader.path_dwim_relative(self._loader.get_basedir(), 'files', path)\n if (original_path and (original_path[(- 1)] == '/') and (path[(- 1)] != '/')):\n path += '/'\n return path","sub_path":"Data Set/bug-fixing-5/0d94d39689a5b8e0dbba3cdaa77399d05d77c443-<_get_absolute_path>-fix.py","file_name":"0d94d39689a5b8e0dbba3cdaa77399d05d77c443-<_get_absolute_path>-fix.py","file_ext":"py","file_size_in_byte":465,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"195634741","text":"from bottle import request, response, redirect\nfrom bottle import post, route, get\nfrom config.urls import _SKIUTC_SERVICE\n\nfrom connexion.connexion import authenticate\nfrom paiement.paiement import Paiement\n\nimport json\n\nfrom user.user import User\nfrom webapis.meta import get_meta\n\n@post('/paiement')\n@authenticate\ndef paiement(user=None):\n \"\"\"\n route to pay the pack\n \"\"\"\n try:\n try:\n data = json.loads(request.body.read())\n service = data.get('service', None)\n\n except:\n raise ValueError\n\n api_response = Paiement().pay_pack(user, service)\n\n if api_response == -1:\n reponse.status = 200\n return json.dumps({ \"message\": \"User has allready paid.\"})\n\n return api_response\n\n except ValueError:\n response.status = 400\n return json.dumps({ \"error\" : \"Invalid datas.\"})\n\n except Exception as e:\n response.status = 500\n return json.dumps({ \"error\": e })\n\n@get('/validatePaiement')\ndef paiement():\n '''route to validate the transaction'''\n try:\n query_string = request.query.decode()\n\n login = query_string.get('login', None)\n service = query_string.get('service', _SKIUTC_SERVICE)\n\n if login is None:\n raise ValueError\n\n try:\n Paiement().update_transaction_status(login)\n except Exception as e:\n raise ValueError\n\n redirect(service)\n \n except Exception as e:\n raise e\n","sub_path":"python/lib/webapis/paiement.py","file_name":"paiement.py","file_ext":"py","file_size_in_byte":1505,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"178569264","text":"from django.contrib.auth import get_user_model\nfrom django.core.exceptions import ValidationError\nfrom django.db import IntegrityError\nfrom django.test import TestCase\nfrom django.utils.text import slugify\n\nfrom model_clone import create_copy_of_instance\nfrom sample.models import Library, Book\n\nUser = get_user_model()\n\n\nclass CreateCopyOfInstanceTestCase(TestCase):\n @classmethod\n def setUpTestData(cls):\n cls.user1 = User.objects.create(username=\"user 1\")\n cls.user2 = User.objects.create(username=\"user 2\")\n\n def test_cloning_model_with_custom_id(self):\n instance = Library.objects.create(name=\"First library\", user=self.user1)\n clone = create_copy_of_instance(instance, attrs={\"user\": self.user2})\n\n self.assertNotEqual(instance.pk, clone.pk)\n self.assertEqual(clone.user, self.user2)\n\n def test_cloning_unique_fk_field_without_a_fallback_value_is_invalid(self):\n name = \"New Library\"\n instance = Library.objects.create(name=name, user=self.user1)\n\n with self.assertRaises(ValidationError):\n create_copy_of_instance(instance)\n\n def test_cloning_excluded_field_without_a_fallback_value_is_invalid(self):\n name = \"New Library\"\n instance = Book.objects.create(\n name=name, created_by=self.user1, slug=slugify(name)\n )\n\n with self.assertRaises(IntegrityError):\n create_copy_of_instance(\n instance, exclude={\"slug\"}, attrs={\"created_by\": self.user2}\n )\n\n def test_raises_error_when_create_copy_of_instance_uses_an_invalid_attrs_value(\n self,\n ):\n instance = Library.objects.create(name=\"First library\", user=self.user1)\n\n with self.assertRaises(ValueError):\n create_copy_of_instance(instance, attrs=\"user\")\n\n def test_cloning_an_invalid_object_is_invalid(self):\n class InvalidObj:\n def __init__(self):\n pass\n\n instance = InvalidObj()\n\n with self.assertRaises(ValueError):\n create_copy_of_instance(instance, attrs={\"created_by\": self.user2})\n","sub_path":"model_clone/tests/test_create_copy_of_instance.py","file_name":"test_create_copy_of_instance.py","file_ext":"py","file_size_in_byte":2105,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"382982492","text":"#!/usr/bin/python\n\n\nfrom __future__ import division\nimport itertools\nimport math\nimport nltk.corpus\nimport nltk.stem\nimport string\nimport warnings\n\nfrom irsystem import OptionParser, SearchEngine, Tokenizer, Vector\nfrom tfidf import TfIdf\n\n\nwarnings.simplefilter('ignore', DeprecationWarning) # for old version of NLTK\n\n\nclass Stopwords(SearchEngine):\n __excludes__ = set(nltk.corpus.stopwords.words('english'))\n __length_threshold__ = 2\n\n @staticmethod\n def exclude(word, no_small=False, no_numbers=False, no_digits=False):\n if word in Stopwords.__excludes__:\n return True\n\n if no_small and len(word) <= Stopwords.__length_threshold__:\n return True\n\n if no_numbers:\n try:\n float(word)\n return True\n except ValueError:\n pass\n\n if no_digits and any(c.isdigit() for c in word):\n return True\n\n return False\n\n def process(self, words):\n return [w for w in words if not Stopwords.exclude(w)]\n\n\nclass Ngrams(SearchEngine):\n __ngram_len__ = 4\n\n @staticmethod\n def ngrams(word):\n if len(word) <= Ngrams.__ngram_len__:\n yield word\n else:\n for n in xrange(len(word) - Ngrams.__ngram_len__ + 1):\n yield word[n:n + Ngrams.__ngram_len__]\n\n def process(self, words):\n return [ngram for w in words for ngram in Ngrams.ngrams(w)]\n\n\nclass Stemmer(SearchEngine):\n __stemmer__ = nltk.stem.porter.PorterStemmer()\n\n @staticmethod\n def stem(word):\n return Stemmer.__stemmer__.stem(word)\n\n def process(self, words):\n return [Stemmer.stem(w) for w in words]\n\n\nclass Lemmatizer(SearchEngine):\n __lemmatizer__ = nltk.stem.wordnet.WordNetLemmatizer()\n\n @staticmethod\n def lemma(word):\n return Lemmatizer.__lemmatizer__.lemmatize(word)\n\n def process(self, words):\n return [Lemmatizer.lemma(w) for w in words]\n\n\nclass Wordnet(SearchEngine):\n __separator__ = '_'\n\n @staticmethod\n def disambiguate(term, context, wider_context=None):\n senses = nltk.corpus.wordnet.synsets(term)\n if not senses:\n return None\n\n context = Vector(Tokenizer.simple_split(context))\n best_sense, best_score = None, 0\n for sense in senses:\n definition = Vector(Tokenizer.simple_split(sense.definition))\n score = context.dot_product(definition)\n if score > best_score:\n best_sense, best_score = sense, score\n\n if best_sense is None:\n if wider_context is None:\n best_sense = senses[0]\n else:\n best_sense = Wordnet.disambiguate(term, wider_context)\n\n return best_sense\n\n @staticmethod\n def synonyms(term, context, wider_context=None):\n word_sense = Wordnet.disambiguate(term, context, wider_context)\n if word_sense is None:\n return []\n\n synonyms = []\n for synonym in word_sense.lemma_names:\n if synonym.find(Wordnet.__separator__) > -1:\n for word_compound in synonym.split(Wordnet.__separator__):\n synonyms.append(word_compound)\n synonyms.append(synonym.translate(None, Wordnet.__separator__))\n else:\n synonyms.append(synonym)\n return [w.lower() for w in synonyms]\n\n def preprocess_query(self, words):\n synonyms = [syn for term in words\n for syn in Wordnet.synonyms(term, ' '.join(words),\n self.documents.rawtext)]\n\n return words + synonyms\n\n\nclass Soundex(SearchEngine):\n __transtable__ = string.maketrans(\n 'bfpv' + 'cgjkqsxz' + 'dt' + 'l' + 'mn' + 'r' + 'aeiouyhw',\n '1111' + '22222222' + '33' + '4' + '55' + '6' + '--------')\n\n @staticmethod\n def fingerprint(word):\n raw_soundex = word[0] + word[1:].translate(Soundex.__transtable__)\n soundex = ''.join(c for c, _ in itertools.groupby(raw_soundex)\n if not c == '-')\n return (soundex + '000')[:4]\n\n def _get_document_words(self):\n if not hasattr(self, '_document_words'):\n self._document_words = Vector(w for _, d in self.documents\n for w in d.domain())\n return self._document_words\n\n def _get_lookup_dict(self):\n if not hasattr(self, '_lookup_dict'):\n soundex = [Soundex.fingerprint(w) for w in self._document_words]\n\n d = dict.fromkeys(soundex)\n for w, s in itertools.izip(self._document_words, soundex):\n if not d.get(s):\n d[s] = [w]\n else:\n d[s].append(w)\n for k, v in d.iteritems():\n v.sort(\n key=lambda w: self._document_words[w],\n reverse=True)\n d[k] = v[0] if v else None\n\n self._lookup_dict = d\n return self._lookup_dict\n\n def correct(self, word):\n if word in self._get_document_words():\n return word\n return self._get_lookup_dict().get(Soundex.fingerprint(word)) or word\n\n def postprocess_query(self, query):\n return query.transform(self.correct)\n\n\nclass Cosine(SearchEngine):\n def similarity(self, query, document):\n dot_pr = 0\n qws_sq = 0\n dws_sq = 0\n\n # resolve references now to avoid doing it in every loop iteration\n tf = TfIdf.term_frequency\n idf = TfIdf.inverse_document_frequency\n queries = self.queries\n documents = self.documents\n\n for w in query:\n tf_wq = tf(w, query, queries)\n tf_wd = tf(w, document, documents)\n idf_w = idf(w, documents)\n\n qw = tf_wq * idf_w\n dw = tf_wd * idf_w\n\n dot_pr += qw * dw\n qws_sq += qw ** 2\n dws_sq += dw ** 2\n\n return dot_pr / (math.sqrt(qws_sq) + math.sqrt(dws_sq))\n\n\nclass PseudoFeedback(TfIdf, SearchEngine):\n __num_expansions__ = 10\n\n def postprocess_query(self, query):\n scores = sorted([(TfIdf.similarity(self, query, document), did)\n for did, document in self.documents], reverse=True)\n\n for _, did in scores[:PseudoFeedback.__num_expansions__]:\n query.union(self.documents[did])\n return query\n\n\nif __name__ == '__main__':\n class BestSearchEngine(PseudoFeedback, Stemmer, Stopwords, Cosine):\n def similarity(self, query, document):\n return Cosine.similarity(self, query, document)\n\n def postprocess_query(self, query):\n return PseudoFeedback.postprocess_query(self, query)\n\n def process(self, words):\n words = Stopwords.process(self, words)\n words = words + Stemmer.process(self, words)\n return words\n\n parser = OptionParser(calling_module=__file__)\n query_db, document_db, outpath = parser.parse_args()\n search_engine = BestSearchEngine(query_db, document_db)\n search_engine.write_scores(outpath)\n","sub_path":"src/best.py","file_name":"best.py","file_ext":"py","file_size_in_byte":7120,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"229491452","text":"# -*- encoding: utf-8 -*-\n#\n# Copyright © 2018 Mehdi Abaakouk \n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\nimport logging\n\nimport pytest\nimport yaml\n\nfrom mergify_engine import config\nfrom mergify_engine import constants\nfrom mergify_engine import context\nfrom mergify_engine import utils\nfrom mergify_engine.tests.functional import base\n\n\nLOG = logging.getLogger(__name__)\n\n\nclass TestMergeAction(base.FunctionalTestBase):\n SUBSCRIPTION_ACTIVE = True\n\n async def test_merge_draft(self):\n rules = {\n \"pull_request_rules\": [\n {\n \"name\": \"Merge\",\n \"conditions\": [\n f\"base={self.main_branch_name}\",\n \"label=automerge\",\n ],\n \"actions\": {\"merge\": {}},\n },\n ]\n }\n\n await self.setup_repo(yaml.dump(rules))\n\n p, _ = await self.create_pr(draft=True)\n await self.add_label(p[\"number\"], \"automerge\")\n await self.run_engine()\n\n ctxt = await context.Context.create(self.repository_ctxt, p, [])\n checks = await ctxt.pull_engine_check_runs\n assert len(checks) == 2\n check = checks[1]\n assert check[\"conclusion\"] is None\n assert check[\"output\"][\"title\"] == \"Draft flag needs to be removed\"\n assert check[\"output\"][\"summary\"] == \"\"\n\n await self.remove_label(p[\"number\"], \"automerge\")\n await self.run_engine()\n ctxt = await context.Context.create(self.repository_ctxt, p, [])\n checks = await ctxt.pull_engine_check_runs\n assert len(checks) == 2\n check = checks[1]\n assert check[\"conclusion\"] == \"cancelled\"\n assert check[\"output\"][\"title\"] == \"The rule doesn't match anymore\"\n\n async def test_merge_with_installation_token(self):\n rules = {\n \"pull_request_rules\": [\n {\n \"name\": \"merge on main\",\n \"conditions\": [f\"base={self.main_branch_name}\"],\n \"actions\": {\"merge\": {}},\n },\n ]\n }\n\n await self.setup_repo(yaml.dump(rules))\n\n p, _ = await self.create_pr()\n await self.run_engine()\n await self.wait_for(\"pull_request\", {\"action\": \"closed\"})\n\n p = await self.get_pull(p[\"number\"])\n self.assertEqual(True, p[\"merged\"])\n self.assertEqual(config.BOT_USER_LOGIN, p[\"merged_by\"][\"login\"])\n\n async def test_merge_with_oauth_token(self):\n rules = {\n \"pull_request_rules\": [\n {\n \"name\": \"merge on main\",\n \"conditions\": [f\"base={self.main_branch_name}\"],\n \"actions\": {\"merge\": {\"merge_bot_account\": \"{{ body }}\"}},\n },\n ]\n }\n\n await self.setup_repo(yaml.dump(rules))\n\n p, _ = await self.create_pr(message=\"mergify-test4\")\n await self.run_engine()\n await self.wait_for(\"pull_request\", {\"action\": \"closed\"})\n\n p = await self.get_pull(p[\"number\"])\n self.assertEqual(True, p[\"merged\"])\n self.assertEqual(\"mergify-test4\", p[\"merged_by\"][\"login\"])\n\n @pytest.mark.skipif(\n not config.GITHUB_URL.startswith(\"https://github.com\"),\n reason=\"required_conversation_resolution requires GHES 3.2\",\n )\n async def test_merge_branch_protection_conversation_resolution(self):\n rules = {\n \"pull_request_rules\": [\n {\n \"name\": \"merge\",\n \"conditions\": [f\"base={self.main_branch_name}\"],\n \"actions\": {\"merge\": {}},\n }\n ]\n }\n\n await self.setup_repo(yaml.dump(rules))\n\n protection = {\n \"required_status_checks\": None,\n \"required_linear_history\": False,\n \"required_pull_request_reviews\": None,\n \"required_conversation_resolution\": True,\n \"restrictions\": None,\n \"enforce_admins\": False,\n }\n\n await self.branch_protection_protect(self.main_branch_name, protection)\n\n p1, _ = await self.create_pr(\n files={\"my_testing_file\": \"foo\", \"super_original_testfile\": \"42\\ntest\\n\"}\n )\n\n await self.create_review_thread(\n p1[\"number\"],\n \"Don't like this line too much either\",\n path=\"super_original_testfile\",\n line=2,\n )\n\n thread = (await self.get_review_threads(p1[\"number\"]))[\"repository\"][\n \"pullRequest\"\n ][\"reviewThreads\"][\"edges\"][0][\"node\"]\n\n await self.run_engine()\n\n ctxt = await context.Context.create(self.repository_ctxt, p1, [])\n summary = await ctxt.get_engine_check_run(constants.SUMMARY_NAME)\n assert summary is not None\n\n assert (\n \"- [ ] `#review-threads-unresolved=0` [🛡 GitHub branch protection]\"\n in summary[\"output\"][\"summary\"]\n )\n\n is_resolved = await self.resolve_review_thread(thread_id=thread[\"id\"])\n assert is_resolved\n\n thread = (await self.get_review_threads(p1[\"number\"]))[\"repository\"][\n \"pullRequest\"\n ][\"reviewThreads\"][\"edges\"][0][\"node\"]\n assert thread[\"isResolved\"]\n\n # NOTE(Syfe): We need to generate an event with send_pull_refresh() in order\n # to trigger the summary check update after resolve_review_thread() since GitHub doesn't\n # generate one after resolving a conversation (issue related MRGFY-907)\n with utils.yaaredis_for_stream() as redis_stream:\n await utils.send_pull_refresh(\n ctxt.redis,\n redis_stream,\n ctxt.pull[\"base\"][\"repo\"],\n pull_request_number=p1[\"number\"],\n action=\"internal\",\n source=\"test\",\n )\n\n await self.run_engine()\n\n ctxt._caches.pull_check_runs.delete()\n summary = await ctxt.get_engine_check_run(constants.SUMMARY_NAME)\n assert summary is not None\n\n assert (\n \"- [X] `#review-threads-unresolved=0` [🛡 GitHub branch protection]\"\n in summary[\"output\"][\"summary\"]\n )\n\n async def test_merge_branch_protection_linear_history(self):\n rules = {\n \"pull_request_rules\": [\n {\n \"name\": \"merge\",\n \"conditions\": [f\"base={self.main_branch_name}\"],\n \"actions\": {\"merge\": {}},\n }\n ]\n }\n\n await self.setup_repo(yaml.dump(rules))\n\n protection = {\n \"required_status_checks\": None,\n \"required_linear_history\": True,\n \"required_pull_request_reviews\": None,\n \"restrictions\": None,\n \"enforce_admins\": False,\n }\n\n await self.branch_protection_protect(self.main_branch_name, protection)\n\n p1, _ = await self.create_pr()\n await self.run_engine()\n await self.wait_for(\"check_run\", {\"check_run\": {\"conclusion\": \"failure\"}})\n\n ctxt = await context.Context.create(self.repository_ctxt, p1, [])\n checks = [\n c\n for c in await ctxt.pull_engine_check_runs\n if c[\"name\"] == \"Rule: merge (merge)\"\n ]\n assert \"failure\" == checks[0][\"conclusion\"]\n assert (\n \"Branch protection setting 'linear history' conflicts with Mergify configuration\"\n == checks[0][\"output\"][\"title\"]\n )\n\n async def test_merge_template_with_empty_body(self):\n rules = {\n \"pull_request_rules\": [\n {\n \"name\": \"merge on main\",\n \"conditions\": [f\"base={self.main_branch_name}\"],\n \"actions\": {\n \"merge\": {\n \"commit_message_template\": \"\"\"{{ title }} (#{{ number }})\n\n{{body}}\n\"\"\",\n }\n },\n },\n ]\n }\n await self.setup_repo(yaml.dump(rules))\n\n p, _ = await self.create_pr(message=\"\")\n await self.run_engine()\n await self.wait_for(\"pull_request\", {\"action\": \"closed\"})\n\n p = await self.get_pull(p[\"number\"])\n self.assertEqual(True, p[\"merged\"])\n c = await self.get_commit(p[\"merge_commit_sha\"])\n assert (\n f\"\"\"test_merge_template_with_empty_body: pull request n1 from fork (#{p['number']})\"\"\"\n == c[\"commit\"][\"message\"]\n )\n\n async def test_merge_template(self):\n rules = {\n \"pull_request_rules\": [\n {\n \"name\": \"merge on main\",\n \"conditions\": [f\"base={self.main_branch_name}\"],\n \"actions\": {\n \"merge\": {\n \"commit_message_template\": \"\"\"{{ title }} (#{{ number }})\n{{body}}\nsuperRP!\n\"\"\",\n }\n },\n },\n ]\n }\n await self.setup_repo(yaml.dump(rules))\n\n p, _ = await self.create_pr(message=\"mergify-test4\")\n await self.run_engine()\n await self.wait_for(\"pull_request\", {\"action\": \"closed\"})\n\n p2 = await self.get_pull(p[\"number\"])\n self.assertEqual(True, p2[\"merged\"])\n p3 = await self.get_commit(p2[\"merge_commit_sha\"])\n assert (\n f\"\"\"test_merge_template: pull request n1 from fork (#{p2['number']})\n\nmergify-test4\nsuperRP!\"\"\"\n == p3[\"commit\"][\"message\"]\n )\n ctxt = await context.Context.create(self.repository_ctxt, p, [])\n summary = await ctxt.get_engine_check_run(constants.SUMMARY_NAME)\n assert (\n \"\"\"\n:bangbang: **Action Required** :bangbang:\n\n> **The configuration uses the deprecated `commit_message` mode of the merge action.**\n> A brownout is planned for the whole March 21th, 2022 day.\n> This option will be removed on April 25th, 2022.\n> For more information: https://docs.mergify.com/actions/merge/\n\n\"\"\"\n not in summary[\"output\"][\"summary\"]\n )\n\n async def test_merge_branch_protection_strict(self):\n rules = {\n \"pull_request_rules\": [\n {\n \"name\": \"merge\",\n \"conditions\": [f\"base={self.main_branch_name}\"],\n \"actions\": {\"merge\": {}},\n }\n ]\n }\n\n await self.setup_repo(yaml.dump(rules))\n\n # Check policy of that branch is the expected one\n protection = {\n \"required_status_checks\": {\n \"strict\": True,\n \"contexts\": [\"continuous-integration/fake-ci\"],\n },\n \"required_pull_request_reviews\": None,\n \"restrictions\": None,\n \"enforce_admins\": False,\n }\n\n p1, _ = await self.create_pr()\n p2, _ = await self.create_pr()\n\n await self.merge_pull(p1[\"number\"])\n\n await self.branch_protection_protect(self.main_branch_name, protection)\n\n await self.run_engine()\n await self.wait_for(\"pull_request\", {\"action\": \"closed\"})\n\n await self.create_status(p2)\n await self.run_engine()\n\n ctxt = await context.Context.create(self.repository_ctxt, p2, [])\n summary = await ctxt.get_engine_check_run(constants.SUMMARY_NAME)\n assert summary is not None\n assert \"[ ] `#commits-behind=0`\" in summary[\"output\"][\"summary\"]\n\n await self.create_comment(p2[\"number\"], \"@mergifyio update\")\n await self.run_engine()\n await self.wait_for(\"issue_comment\", {\"action\": \"created\"})\n await self.wait_for(\"pull_request\", {\"action\": \"synchronize\"})\n await self.run_engine()\n\n p2 = await self.get_pull(p2[\"number\"])\n await self.create_status(p2)\n await self.run_engine()\n ctxt = await context.Context.create(self.repository_ctxt, p2, [])\n summary = await ctxt.get_engine_check_run(constants.SUMMARY_NAME)\n assert summary is not None\n assert \"[X] `#commits-behind=0`\" in summary[\"output\"][\"summary\"]\n\n p2 = await self.get_pull(p2[\"number\"])\n assert p2[\"merged\"]\n","sub_path":"mergify_engine/tests/functional/actions/test_merge.py","file_name":"test_merge.py","file_ext":"py","file_size_in_byte":12717,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"366874324","text":"'''\n73. Set Matrix Zeroes\n\nGiven a m x n matrix, if an element is 0, set its entire row and column to 0. Do it in-place.\n\nExample 1:\n\nInput: \n[\n [1,1,1],\n [1,0,1],\n [1,1,1]\n]\nOutput: \n[\n [1,0,1],\n [0,0,0],\n [1,0,1]\n]\nExample 2:\n\nInput: \n[\n [0,1,2,0],\n [3,4,5,2],\n [1,3,1,5]\n]\nOutput: \n[\n [0,0,0,0],\n [0,4,5,0],\n [0,3,1,0]\n]\nFollow up:\n\nA straight forward solution using O(mn) space is probably a bad idea.\nA simple improvement uses O(m + n) space, but still not the best solution.\nCould you devise a constant space solution?\n'''\n\nclass Solution(object):\n def setZeroes(self, matrix):\n \"\"\"\n :type matrix: List[List[int]]\n :rtype: void Do not return anything, modify matrix in-place instead.\n \"\"\"\n# Best solution:\n# if matrix == []:\n# return\n# column = False\n# row = False\n# m = len(matrix)\n# n = len(matrix[0])\n# for i in range(m):\n# if matrix[i][0] == 0:\n# row = True\n# break\n# for j in range(n):\n# if matrix[0][j] == 0:\n# column = True\n# break\n# for i in range(1,m):\n# for j in range(1,n):\n# if matrix[i][j] == 0:\n# matrix[i][0] = 0\n# matrix[0][j] = 0 \n# for i in range(1,m):\n# if matrix[i][0] == 0:\n# for j in range(1,n):\n# matrix[i][j] = 0\n# for j in range(1,n):\n# if matrix[0][j] == 0:\n# for i in range(1,m):\n# matrix[i][j] = 0\n# if column:\n# for j in range(n):\n# matrix[0][j] = 0\n# if row:\n# for i in range(m):\n# matrix[i][0] = 0\n \n \n# better solution:\n# if not matrix:return\n \n# rows=len(matrix)\n# cols=len(matrix[0])\n \n# rowset=set()\n# collset=set()\n \n# \"\"\"\n# m+n space appraoch\n# \"\"\"\n# for i in range(rows):\n# for j in range(cols):\n# if matrix[i][j]==0:\n# rowset.add(i)\n# collset.add(j)\n \n# for i in rowset:\n# for j in range(cols):\n# matrix[i][j]=0\n \n# for i in collset:\n# for j in range(rows):\n# matrix[j][i]=0\n \n \n m = len(matrix)\n n = len(matrix[0])\n row = [1] * m\n col = [1] * n\n \n for i in range(m):\n for j in range(n):\n if col[j] or row[i]:\n if matrix[i][j] == 0:\n row[i] = 0\n col[j] = 0\n \n # for i in range(m):\n # if row[i] == 0:\n # matrix[i] = [0] * n\n # for j in range(n):\n # if col[j] == 0:\n # for k in range(m):\n # matrix[k][j] = 0\n for i in range(m):\n if row[i] == 0:\n matrix[i] = [0]*n\n else:\n for j in range(n):\n if col[j] == 0:\n matrix[i][j] = 0\n \n","sub_path":"73_SetMatrixZeroes.py","file_name":"73_SetMatrixZeroes.py","file_ext":"py","file_size_in_byte":3326,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"379570966","text":"import pycom\nimport time\npycom.heartbeat(False)\n\ncolor = 0x7f0000\nprint(\"Color rojo: \",color)\n\ncolor = 0x7f7f00\nprint(\"Color amarillo: \",color)\n\nwhile (True):\n pycom.rgbled(color) # green\n time.sleep(0.5)\n #print(\"Color es: \",color)\n color = color - 64\n if (int(color) <= 8323072 ):\n color = 0x7f0000\n break\n \npycom.rgbled(color) # red\n","sub_path":"Code/LED/main2.py","file_name":"main2.py","file_ext":"py","file_size_in_byte":368,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"254616890","text":"import sys\nimport os\nfrom PyQt5.QtCore import *\nfrom PyQt5.QtGui import *\nfrom PyQt5.QtWidgets import *\nfrom cv2 import *\nimport threading\nimport time\nfrom PyQt5.QtGui import QPixmap\nfrom PyQt5.QtWidgets import QWidget, QApplication, QGroupBox, QPushButton, QLabel, QHBoxLayout, QVBoxLayout, QGridLayout, QFormLayout, QLineEdit, QTextEdit\nfrom MTCNN import create_Kao_Onet, create_Kao_Rnet, create_Kao_Pnet\nimport imutils\nfrom rectangleDrawThread import rectangleThread\n# from PIL import Image\n# from pypinyin import pinyin, lazy_pinyin\n# import pypinyin\nimport cv2\n\nclass VideoBox(QWidget):\n VIDEO_TYPE_OFFLINE = 0\n VIDEO_TYPE_REAL_TIME = 1\n\n STATUS_INIT = 0\n STATUS_PLAYING = 1\n STATUS_PAUSE = 2\n\n video_url = \"\"\n progress = 0\n\n def __init__(self, video_url=\"\", video_type=VIDEO_TYPE_OFFLINE, auto_play=False):\n super(VideoBox, self).__init__()\n self.createGridGroupBox()\n self.creatVboxGroupBox()\n self.preTime = 0\n mainLayout = QVBoxLayout()\n hboxLayout = QHBoxLayout()\n hboxLayout.addStretch()\n hboxLayout.addWidget(self.gridGroupBox)\n hboxLayout.addWidget(self.vboxGroupBox)\n mainLayout.addLayout(hboxLayout)\n self.setLayout(mainLayout)\n self.threshold = [0.6, 0.6, 0.7]\n\n self.video_url = video_url\n self.video_type = video_type # 0: offline 1: realTime\n self.auto_play = auto_play\n self.status = self.STATUS_INIT # 0: init 1:playing 2: pause\n self.timer = VideoTimer()\n self.timer.timeSignal.signal[str].connect(self.show_video_images)\n # video 初始设置\n self.playCapture = VideoCapture()\n if self.video_url != \"\":\n self.set_timer_fps()\n if self.auto_play:\n self.switch_video()\n self.thread2 = threading.Thread(target=self.update_timer)\n self.thread2.setDaemon(True)\n self.thread2.start()\n\n def initNet(self,Pnet,Rnet,Onet,lock):\n self.Pnet=Pnet\n self.Rnet = Rnet\n self.Onet = Onet\n self.lock = lock\n\n def createGridGroupBox(self):\n self.gridGroupBox = QGroupBox(\"Grid layout\")\n layout = QGridLayout()\n self.pictureLabel = QLabel()\n init_image = QPixmap(\"1.png\").scaled(1000, 700)\n self.pictureLabel.setPixmap(init_image)\n self.threadId = 0\n self.playButton = QPushButton()\n self.playButton.setEnabled(True)\n self.playButton.setIcon(self.style().standardIcon(QStyle.SP_MediaPlay))\n self.playButton.clicked.connect(self.switch_video)\n\n control_box = QHBoxLayout()\n control_box.setContentsMargins(0, 0, 0, 0)\n control_box.addWidget(self.playButton)\n llayout = QVBoxLayout()\n llayout.addWidget(self.pictureLabel)\n llayout.addLayout(control_box)\n layout = QHBoxLayout()\n layout.addLayout(llayout)\n self.gridGroupBox.setLayout(layout)\n self.setWindowTitle('Basic Layout')\n\n def creatVboxGroupBox(self):\n self.vboxGroupBox = QGroupBox(\"Vbox layout\")\n layout = QVBoxLayout()\n init_image = QPixmap(\"1.png\").scaled(200, 200)\n self.imgeLabel_0 = QLabel()\n self.imgeLabel_0.setPixmap(init_image)\n self.textbox = QLineEdit(self)\n self.textbox.setText('Name')\n self.imgeLabel_1 = QLabel()\n self.imgeLabel_1.setPixmap(init_image)\n self.imgeLabel_2 = QLabel()\n self.imgeLabel_2.setPixmap(init_image)\n\n layout.addWidget(self.imgeLabel_0)\n layout.addWidget(self.textbox)\n # layout.addWidget(self.imgeLabel_1)\n # layout.addWidget(self.imgeLabel_2)\n self.vboxGroupBox.setLayout(layout)\n\n def reset(self):\n self.timer.stop()\n self.playCapture.release()\n self.status = VideoBox.STATUS_INIT\n self.playButton.setIcon(self.style().standardIcon(QStyle.SP_MediaPlay))\n\n def set_timer_fps(self):\n self.playCapture.open(self.video_url)\n fps = self.playCapture.get(CAP_PROP_FPS)\n self.timer.set_fps(fps)\n self.playCapture.release()\n\n def set_video(self, url, video_type=VIDEO_TYPE_OFFLINE, auto_play=False):\n self.reset()\n self.video_url = url\n self.video_type = video_type\n self.auto_play = auto_play\n self.set_timer_fps()\n if self.auto_play:\n self.switch_video()\n\n def play(self):\n if self.video_url == \"\" or self.video_url is None:\n return\n if not self.playCapture.isOpened():\n self.playCapture.open(self.video_url)\n self.timer.start()\n self.playButton.setIcon(self.style().standardIcon(QStyle.SP_MediaPause))\n self.status = VideoBox.STATUS_PLAYING\n\n def stop(self):\n if self.video_url == \"\" or self.video_url is None:\n return\n if self.playCapture.isOpened():\n self.timer.stop()\n if self.video_type is VideoBox.VIDEO_TYPE_REAL_TIME:\n self.playCapture.release()\n self.playButton.setIcon(self.style().standardIcon(QStyle.SP_MediaPlay))\n self.status = VideoBox.STATUS_PAUSE\n\n def re_play(self):\n if self.video_url == \"\" or self.video_url is None:\n return\n self.playCapture.release()\n self.playCapture.open(self.video_url)\n self.timer.start()\n self.playButton.setIcon(self.style().standardIcon(QStyle.SP_MediaPause))\n self.status = VideoBox.STATUS_PLAYING\n\n def show_video_images(self):\n\n if self.playCapture.isOpened():\n success, frame = self.playCapture.read()\n if success:\n start = time.time()\n frame = imutils.resize(frame, width=1000)\n thread1 =rectangleThread(self.threadId, frame, self.Pnet, self.Rnet, self.Onet, lock, self.imgeLabel_0,\\\n self.textbox, self.imgeLabel_1, self.imgeLabel_2)\n self.threadId = self.threadId + 1\n thread1.start()\n end = time.time()\n height, width = frame.shape[:2]\n if frame.ndim == 3:\n rgb = cvtColor(frame, COLOR_BGR2RGB)\n elif frame.ndim == 2:\n rgb = cvtColor(frame, COLOR_GRAY2BGR)\n\n temp_image = QImage(rgb.flatten(), width, height, QImage.Format_RGB888)\n temp_pixmap = QPixmap.fromImage(temp_image)\n self.pictureLabel.setPixmap(temp_pixmap)\n\n else:\n print(\"read failed, no frame data\")\n success, frame = self.playCapture.read()\n if not success and self.video_type is VideoBox.VIDEO_TYPE_OFFLINE:\n print(\"play finished\") # 判断本地文件播放完毕\n self.reset()\n self.playButton.setIcon(self.style().standardIcon(QStyle.SP_MediaStop))\n return\n else:\n print(\"open file or capturing device error, init again\")\n self.reset()\n\n def update_timer(self):\n while (True):\n if self.status is VideoBox.STATUS_PLAYING:\n self.progress = self.progress + 1\n time.sleep(0.4)\n if (self.progress == 15):\n self.progress = 0\n\n def switch_video(self):\n if self.video_url == \"\" or self.video_url is None:\n return\n if self.status is VideoBox.STATUS_INIT:\n self.playCapture.open(self.video_url)\n self.timer.start()\n self.playButton.setIcon(self.style().standardIcon(QStyle.SP_MediaPause))\n elif self.status is VideoBox.STATUS_PLAYING:\n self.timer.stop()\n if self.video_type is VideoBox.VIDEO_TYPE_REAL_TIME:\n self.playCapture.release()\n self.playButton.setIcon(self.style().standardIcon(QStyle.SP_MediaPlay))\n elif self.status is VideoBox.STATUS_PAUSE:\n if self.video_type is VideoBox.VIDEO_TYPE_REAL_TIME:\n self.playCapture.open(self.video_url)\n self.timer.start()\n self.playButton.setIcon(self.style().standardIcon(QStyle.SP_MediaPause))\n\n self.status = (VideoBox.STATUS_PLAYING,\n VideoBox.STATUS_PAUSE,\n VideoBox.STATUS_PLAYING)[self.status]\n\nclass Communicate(QObject):\n signal = pyqtSignal(str)\n\nclass VideoTimer(QThread):\n\n def __init__(self, frequent=20):\n QThread.__init__(self)\n self.stopped = False\n self.frequent = frequent\n self.timeSignal = Communicate()\n self.mutex = QMutex()\n\n def run(self):\n with QMutexLocker(self.mutex):\n self.stopped = False\n while True:\n if self.stopped:\n return\n self.timeSignal.signal.emit(\"1\")\n time.sleep(1 / self.frequent)\n\n def stop(self):\n with QMutexLocker(self.mutex):\n self.stopped = True\n\n def is_stopped(self):\n with QMutexLocker(self.mutex):\n return self.stopped\n\n def set_fps(self, fps):\n self.frequent = fps\n\ndef loadNet():\n global Pnet, Rnet, Onet\n Pnet = create_Kao_Pnet(r'12net.h5')\n Rnet = create_Kao_Rnet(r'24net.h5')\n Onet = create_Kao_Onet(r'48net.h5') # will not work. caffe and TF incompatible\n img = cv2.imread('1.png')\n scale_img = cv2.resize(img, (100, 100))\n input = scale_img.reshape(1, *scale_img.shape)\n Pnet.predict(input)\n img = cv2.imread('1.png')\n scale_img = cv2.resize(img, (24, 24))\n input = scale_img.reshape(1, *scale_img.shape)\n Rnet.predict(input)\n img = cv2.imread('1.png')\n scale_img = cv2.resize(img, (48, 48))\n input = scale_img.reshape(1, *scale_img.shape)\n Onet.predict(input)\n return Pnet,Rnet,Onet\n\nif __name__ == \"__main__\":\n os.environ[\"CUDA_VISIBLE_DEVICES\"] = \"1\"\n mapp = QApplication(sys.argv)\n Pnet, Rnet, Onet = loadNet()\n\n mw = VideoBox()\n lock = threading.Lock()\n mw.initNet(Pnet,Rnet,Onet,lock)\n mw.set_video(\"east.mp4\", VideoBox.VIDEO_TYPE_OFFLINE, False)\n mw.show()\n\n sys.exit(mapp.exec_())","sub_path":"src/predict_GUI_v1.py","file_name":"predict_GUI_v1.py","file_ext":"py","file_size_in_byte":10138,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"67846820","text":"# -*- coding: utf-8 -*-\n\nimport numpy as np\nimport pytest\nfrom scipy.optimize import basinhopping, minimize\n\nfrom python_inferno.ba_model import gen_to_optimise\nfrom python_inferno.basinhopping import BoundedSteps\nfrom python_inferno.hyperopt import HyperoptSpace, get_space_template\nfrom python_inferno.iter_opt import (\n ALWAYS_OPTIMISED,\n IGNORED,\n configuration_to_hyperopt_space_spec,\n get_next_x0,\n next_configurations_iter,\n)\nfrom python_inferno.space import generate_space_spec\nfrom python_inferno.space_opt import fail_func, success_func\n\n# NOTE Modified `space_opt` function.\n\n\ndef mod_space_opt(\n *,\n space,\n dryness_method,\n fuel_build_up_method,\n include_temperature,\n discrete_params,\n defaults=None,\n basinhopping_options=None,\n minimizer_options=None,\n mode=\"basinhopping\",\n x0=None,\n):\n \"\"\"Optimisation of the continuous (float) part of a given `space`.\"\"\"\n to_optimise = gen_to_optimise(\n fail_func=fail_func,\n success_func=success_func,\n # Init (data) params.\n dryness_method=dryness_method,\n fuel_build_up_method=fuel_build_up_method,\n include_temperature=include_temperature,\n _uncached_data=False,\n **discrete_params,\n )\n\n defaults_dict = defaults if defaults is not None else {}\n\n def to_optimise_with_discrete(x):\n return to_optimise(\n **space.inv_map_float_to_0_1(dict(zip(space.continuous_param_names, x))),\n **defaults_dict,\n )\n\n def basinhopping_callback(x, f, accept):\n # NOTE: Parameters recorded here are authoritative, since hyperopt will not\n # properly report values modified as in e.g. `mod_quniform`.\n values = {\n **space.inv_map_float_to_0_1(dict(zip(space.continuous_param_names, x))),\n **discrete_params,\n **defaults_dict,\n }\n values[\"dryness_method\"] = dryness_method\n values[\"fuel_build_up_method\"] = fuel_build_up_method\n values[\"include_temperature\"] = include_temperature\n\n minimizer_options_dict = minimizer_options if minimizer_options is not None else {}\n basinhopping_options_dict = (\n basinhopping_options if basinhopping_options is not None else {}\n )\n\n if x0 is None:\n x0 = space.continuous_x0_mid\n\n if mode == \"basinhopping\":\n res = basinhopping(\n to_optimise_with_discrete,\n x0=x0,\n seed=0,\n callback=basinhopping_callback,\n take_step=BoundedSteps(\n stepsize=0.3, rng=np.random.default_rng(0), verbose=True\n ),\n **{\n \"disp\": True,\n \"minimizer_kwargs\": dict(\n method=\"L-BFGS-B\",\n jac=None,\n bounds=[(0, 1)] * len(space.continuous_param_names),\n options={\n \"maxiter\": 60,\n \"ftol\": 1e-5,\n \"eps\": 1e-3,\n **minimizer_options_dict,\n },\n ),\n \"T\": 0.05,\n \"niter\": 100,\n \"niter_success\": 15,\n **basinhopping_options_dict,\n },\n )\n elif mode == \"minimize\":\n res = minimize(\n to_optimise_with_discrete,\n x0=x0,\n method=\"L-BFGS-B\",\n jac=None,\n bounds=[(0, 1)] * len(space.continuous_param_names),\n options={\n \"maxiter\": 60,\n \"ftol\": 1e-5,\n \"eps\": 1e-3,\n **minimizer_options_dict,\n },\n )\n else:\n raise ValueError\n\n return res\n\n\n@pytest.mark.slow\n@pytest.mark.parametrize(\"seed\", range(5))\n@pytest.mark.parametrize(\"opt_mode\", [\"minimize\", \"basinhopping\"])\ndef test_perf(model_params, opt_mode, seed):\n params = next(iter(model_params.values()))\n\n dryness_method = int(params[\"dryness_method\"])\n fuel_build_up_method = int(params[\"fuel_build_up_method\"])\n include_temperature = int(params[\"include_temperature\"])\n\n # NOTE Full space template.\n space_template = get_space_template(\n dryness_method=dryness_method,\n fuel_build_up_method=fuel_build_up_method,\n include_temperature=include_temperature,\n )\n\n discrete_param_names = HyperoptSpace(\n generate_space_spec(space_template)\n ).discrete_param_names\n\n # NOTE Constant.\n defaults = dict(\n dryness_method=dryness_method,\n fuel_build_up_method=fuel_build_up_method,\n include_temperature=include_temperature,\n )\n discrete_params = {}\n\n # Most basic config possible.\n # Keys specify which parameters are potentially subject to optimisation. All other\n # keys will be taken from the optimal configuration as set out in `params`.\n start_config = defaults.copy()\n\n base_spec = {}\n\n for key in space_template:\n if key in ALWAYS_OPTIMISED:\n base_spec.update(generate_space_spec({key: space_template[key]}))\n elif key in IGNORED:\n if key in discrete_param_names:\n # NOTE Also constant.\n for pft_key in (f\"{key}{suffix}\" for suffix in (\"\", \"2\", \"3\")):\n if pft_key in params:\n discrete_params[pft_key] = params[pft_key]\n\n assert key in discrete_params, \"At least 1 key should be present\"\n else:\n raise ValueError(key)\n else:\n start_config[key] = 0\n\n rng = np.random.default_rng(seed)\n\n # Generate hypothetical chain of configurations randomly.\n configurations = []\n\n next_config = start_config.copy()\n next_configurations = True\n\n while True:\n next_configurations = list(\n next_configurations_iter({**start_config, **next_config})\n )\n if not next_configurations:\n break\n next_config = rng.choice([config for config, _ in next_configurations])\n configurations.append(next_config)\n\n # Investigate some of the later configurations.\n losses = []\n x0_dict = None\n prev_spec = None\n prev_constants = None\n\n for configuration in (configurations[i] for i in range(-9, 0, 1)):\n space_spec, constants = configuration_to_hyperopt_space_spec(configuration)\n space = HyperoptSpace({**base_spec, **space_spec})\n\n if x0_dict is not None:\n x0 = get_next_x0(\n new_space=space,\n x0_dict=x0_dict,\n prev_spec=prev_spec,\n prev_constants=prev_constants,\n )\n else:\n x0 = None\n\n res = mod_space_opt(\n space=space,\n dryness_method=dryness_method,\n fuel_build_up_method=fuel_build_up_method,\n include_temperature=include_temperature,\n discrete_params=discrete_params,\n defaults={**defaults, **constants},\n minimizer_options=dict(maxiter=500),\n basinhopping_options=dict(niter_success=10),\n mode=opt_mode,\n x0=x0,\n )\n x0_dict = {key: val for key, val in zip(space.continuous_param_names, res.x)}\n\n prev_spec = space_spec\n prev_constants = constants\n losses.append(res.fun)\n\n assert np.all(np.diff(losses) < 1e-6)\n","sub_path":"tests/test_iter_opt_perf.py","file_name":"test_iter_opt_perf.py","file_ext":"py","file_size_in_byte":7351,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"169664381","text":"def abrr_number(number, decimals=0):\r\n\r\n scales = {\r\n pow(10, 24): \"Y\", # Septillón / Cuatrillón\r\n pow(10, 21): \"Z\", # Se10tillón / Mil trillones\r\n pow(10, 18): \"E\", # Quintillón / Trillón\r\n pow(10, 15): \"P\", # Cuatrillón / Mil billones\r\n pow(10, 12): \"T\", # Trillón / Billón\r\n pow(10, 9): \"G\", # Billón / Millardo\r\n pow(10, 6): \"M\", # Millón\r\n pow(10, 3): \"k\", # Mil / Millar\r\n pow(10, 2): \"h\", # Cien / Centena\r\n pow(10, 1): \"da\", # Diez / Decena\r\n }\r\n\r\n if not number in range(min(scales), max(scales)):\r\n return f\"{number}\"\r\n\r\n for digit, symbol in scales.items():\r\n minimum = int(\"9\" * str(digit).count(\"0\"))\r\n maximum = int(str(minimum) + \"999\")\r\n\r\n if number in range(minimum, maximum):\r\n return f\"{number / digit:.{decimals}f} {symbol}\"\r\nabrr_number(1000000)\r\nabrr_number(10000)\r\n\r\n","sub_path":"Script.py","file_name":"Script.py","file_ext":"py","file_size_in_byte":931,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"28070954","text":"import numpy as np\nimport pandas as pd\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.linear_model import SGDClassifier\nfrom sklearn.metrics import accuracy_score, classification_report\nfrom phe import paillier\nfrom tqdm import tqdm\nimport pickle\n\nprint(\"Generating Keypair\")\npublic_key, private_key = paillier.generate_paillier_keypair()\n\nprint(\"Loading model\")\nwith open(\"../inputs/model_all.pickle\", \"rb\") as f:\n\tclf = pickle.load(f)\n\nprint(\"Loading Testing Data\")\nwith open(\"../inputs/X_test_all.pickle\", \"rb\") as f:\n\tX_test = pickle.load(f)\nwith open(\"../inputs/y_test_all.pickle\", \"rb\") as f:\n\ty_test = pickle.load(f)\n\nprint(\"Encrypting Test Data\")\nenc_X_test = [[public_key.encrypt(j) for j in i] for i in tqdm(X_test)]\nenc_X_test = np.array(enc_X_test)\n\nprint(\"Mapping encrypted testing data\")\nenc_mapping = list()\nfor i in tqdm(enc_X_test):\n tot = 0\n for j in range(len(i)):\n tot += i[j] * clf.coef_[0,j]\n enc_mapping.append(tot)\n\nprint(\"Decrypt mapping value\")\ndec_mapping = [private_key.decrypt(i) for i in tqdm(enc_mapping)]\n\nprint(\"Get prediction\")\ny_pred = [0 if i < -clf.intercept_[0] else 1 for i in dec_mapping]\ny_pred = np.array(y_pred)\n\nprint(\"Evaluate Prediction\")\nprint(\"Accuracy Score: \", accuracy_score(y_test,y_pred))\nprint(classification_report(y_test,y_pred))\nprint(\"Done!\")","sub_path":"program/test_encrypted_all.py","file_name":"test_encrypted_all.py","file_ext":"py","file_size_in_byte":1335,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"113263483","text":"import logging\n\nlog_path = '/usr/src/app/log/main_log.log'\nlog = logging.getLogger(__name__)\n\nclass LogManager(object):\n\n def __init__(self):\n global log\n global log_path\n\n log = logging.getLogger(__name__)\n log.setLevel(logging.INFO)\n\n # create a file handler\n handler = logging.FileHandler(log_path)\n handler.setLevel(logging.INFO)\n\n # create a logging format\n formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')\n handler.setFormatter(formatter)\n\n # add the handlers to the log\n log.addHandler(handler)\n pass\n \n # Aggiunge alla lista una misura dati i valori\n def info(self, message):\n global log\n log.info(message)\n\n # Aggiunge alla lista una misura dati i valori\n def fatal(self, message):\n global log\n log.fatal(message)\n\n # Aggiunge alla lista una misura dati i valori\n def warning(self, message):\n global log\n log.warning(message)","sub_path":"Sensor_Hat/src/mod_log.py","file_name":"mod_log.py","file_ext":"py","file_size_in_byte":1028,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"415719067","text":"# -*- Python -*-\n#\n# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n#\n# Jiao Lin\n# California Institute of Technology\n# (C) 2006-2009 All Rights Reserved\n#\n# {LicenseText}\n#\n# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n#\n\ndef start(task, debug=False):\n from vnf.utils import launch_detached, bindir\n import os\n cmd = os.path.join(bindir, task_runner)\n\n options = {\n 'iworker': task.worker,\n 'id': task.id,\n }\n\n extra_options = task.options\n for i in range(len(extra_options)/2):\n k = 'iworker.%s' % extra_options[2*i]\n v = extra_options[2*i+1]\n options[k] = v\n continue\n\n optstr = ' '.join(\n [ '--%s=\"%s\"' % (k,v) for k,v in options.iteritems() ])\n cmd += ' ' + optstr\n\n launch_detached(cmd, debug=debug)\n return\n\n\ndef progressbarID(task):\n return 'itask-%s-pbar' % task.id\n\n\ntask_runner = 'itaskapp.py'\n\n\n# version\n__id__ = \"$Id$\"\n\n# End of file \n","sub_path":"espresso/jobmanager/jobmanager/temp/components/itask_utils.py","file_name":"itask_utils.py","file_ext":"py","file_size_in_byte":1082,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"612796834","text":"import pandas as pd\nimport matplotlib.pyplot as plt\nimport seaborn as sns\n\nfrom sklearn.model_selection import train_test_split, cross_val_predict\nfrom sklearn.metrics import roc_curve, roc_auc_score\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn.ensemble import RandomForestClassifier\nsns.set(font_scale=1)\nsns.set_style(\"whitegrid\")\n# sns.set_context(\"poster\")\nsns.set_style(rc={\n 'axes.edgecolor': 'black',\n 'axes.labelcolor': 'black',\n 'xtick.bottom': True,\n 'xtick.color': 'black',\n 'ytick.left': True,\n 'ytick.right': True,\n 'ytick.color': 'black',\n 'text.color': 'black',\n 'text.size': '12',\n 'font.sans-serif': ['DejaVu Sans',\n 'Liberation Sans',\n 'Bitstream Vera Sans',\n 'sans-serif'],\n })\n\nclass Lr:\n def __init__(self, x, y, size=0.2, random_state=42):\n self.xtr, self.xte, self.ytr, self.yte = train_test_split(x, y, test_size=size, random_state=random_state)\n self.model = None\n\n def fit(self):\n model = LogisticRegression()\n model.fit(self.xtr, self.ytr)\n self.model = model\n print('Train set accuracy:')\n print(model.score(self.xtr, self.ytr))\n print('Test set accuracy:')\n print(self.model.score(self.xte, self.yte))\n\n\nclass rf:\n def __init__(self, x, y, size=0.2, random_state=42):\n self.xtr, self.xte, self.ytr, self.yte = train_test_split(x, y, test_size=size, random_state=random_state)\n self.model = None\n\n def fit(self):\n model = RandomForestClassifier()\n model.fit(self.xtr, self.ytr)\n self.model = model\n print('Train set accuracy:')\n print(model.score(self.xtr, self.ytr))\n print('Test set accuracy:')\n print(self.model.score(self.xte, self.yte))\n\n\nclass Roc:\n def __init__(self, model, method='decision_function'):\n self.model = model\n self.method = method\n\n def plot(self, x, y, cv=5, fontsize=16):\n if self.method == 'decision_function':\n ys = cross_val_predict(self.model, x, y, cv=cv, method=self.method)\n elif self.method == 'predict_proba':\n ys = cross_val_predict(self.model, x, y, cv=cv, method=self.method)[:, 1]\n\n # ROC\n fpr, tpr, thr = roc_curve(y, ys)\n plt.plot(fpr, tpr)\n plt.plot([0, 1], [0, 1], '--')\n plt.axis([0, 1, 0, 1])\n plt.xlabel('1 - Specificity', fontsize=fontsize)\n plt.ylabel('Sensitivity', fontsize=fontsize)\n plt.show()\n # AUC\n print(roc_auc_score(y, ys))\n\n\n\n\nif __name__ == \"__main__\":\n df = pd.read_pickle('df.pkl')\n lr = Lr(df.iloc[:, :16].values, df.iloc[:, 16:17].values.ravel())\n lr.fit()\n Roc(lr.model).plot(lr.xtr, lr.ytr)\n","sub_path":"LoanRisk/classification.py","file_name":"classification.py","file_ext":"py","file_size_in_byte":2781,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"461303607","text":"class Graph(object):\n \"\"\"Graph with vertice/edge data stored in an adjacency matrix.\"\"\"\n def __init__(self, num_vertices):\n self._graph_data = [[-1 for i in range(num_vertices)] for j in range(num_vertices)]\n\n def get_num_vertices(self):\n return len(self._graph_data)\n\n def add_edge(self, nodeId1, nodeId2, weight):\n \"\"\"Node id's map to indices in the adjaceny matrix of graph data.\"\"\"\n # undirected graph so we add two entries for each edge\n self._graph_data[nodeId1][nodeId2] = weight\n self._graph_data[nodeId2][nodeId1] = weight\n\n def find_neighbors(self, node_id):\n result = []\n\n edge_data = self._graph_data[node_id]\n\n for edge_id in edge_data:\n if (edge_id > 0):\n result.append(edge_id)\n\n return result\n\n def get_weight(self, node_id1, node_id2):\n return self._graph_data[nodeId1][nodeId2]\n\n\nclass Dijkstra(object):\n \"\"\"Solve shortest path problem using Digkstra's algorithm.\"\"\"\n def __init__(self, graph):\n self._graph = graph\n\n # assign initial distances to all nodes\n self._dist_dict = dict()\n\n for i in range(self._graph.get_num_vertices()):\n # node id's map to indices\n # using None to represent the infinity value\n # traditionally used in the algorithm\n self._dist_dict[i] = None\n\n # mark all nodes unvisited\n self._unvisited_nodes = set()\n\n for i in range(self._graph.get_num_vertices()):\n # node id's map to indices\n self._unvisited_nodes.add(i)\n\n # create set of visited nodes\n self._visited_nodes = set()\n\n def _eval_neighbors(self, node_id):\n # calcualte distances and update them if less\n # than current distances to the neighbor\n neighbor_ids = self._graph.find_neighbors(node_id)\n\n for neighbor_id in neighbor_ids:\n tenative_distance = self._graph.get_weight(node_id, neighbor_id)\n current_distance = self._dist_dict[neighbor_id]\n\n if (tenative_distance < current_distance):\n self._dist_dict[neighbor_id] = tenative_distance\n\n def _get_smallest_unvisited_node(self):\n smallest_node_id = None\n\n for node_id in self._unvisited_nodes:\n if (self._dist_dict[node_id] is None):\n continue\n\n if (smallest_node_id is None):\n # setting our base value for comparison\n smallest_node_id = node_id\n else:\n if (self._dist_dict[node_id] < self._dist_dict[smallest_node_id]):\n smallest_node_id = node_id\n\n return smallest_node_id\n\n def find_shortest_path(self, start_node_id, end_node_id):\n \"\"\"Finds shortest path b/w two nodes. Assumes path exists.\n Returns a tuple with the path and cost.\"\"\"\n result = (None, None)\n\n # set initial distance to zero for our start node\n self._dist_dict[start_node_id] = 0\n \n # set initial node as \"current\"\n current_node_id = start_node_id\n\n while (len(self._unvisited_nodes) > 0):\n # for the current node, evaluate all neighbors\n self._eval_neighbors(current_node_id)\n\n # mark the current node as visited\n self._visited_nodes.add(current_node_id)\n\n # if destination node marked visited, done\n if (current_node_id == end_node_id):\n # todo - calc path\n path = None\n shortest_path_value = self._dist_dict[end_node_id]\n result = (path, shortest_path_value)\n break\n\n # else, select the unvisited node w/ the smallest\n # tenative distance and make it current node; repeat\n smallest_univisited_node_id = self._get_smallest_unvisited_node()\n\n current_node_id = smallest_univisited_node_id\n\n return result\n","sub_path":"ShortestPath/dijkstra.py","file_name":"dijkstra.py","file_ext":"py","file_size_in_byte":3963,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"504345940","text":"class Solution:\n def getMoneyAmount(self, n: int) -> int:\n dp = [[0]*(n+1) for _ in range(n+1)]\n \n for j in range(2,n+1):\n for i in range(j-1,0,-1):\n global_min = float('inf')\n for k in range(i+1,j):\n local_max = k + max(dp[i][k-1], dp[k+1][j]) \n #[1,2,3,4] k = 2, dp[1][1] = 0 dp[3][4] = 3, so local_max = 2 + max(0,3) = 5\n #[1,2,3,4] k = 3, dp[1][2] = 1 dp[4][4] = 0, so local_max = 3 + max(0,1) = 4\n global_min = min(global_min, local_max)\n #global_min = min(4,5)\n dp[i][j] = i if i+1 ==j else global_min #[1,2]\n return dp[1][n]","sub_path":"2020_04_12/Lewislou_375.py","file_name":"Lewislou_375.py","file_ext":"py","file_size_in_byte":715,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"316874648","text":"import numpy as np\nfrom pathlib import Path\nfrom scipy.optimize import fmin_l_bfgs_b\nimport itertools\nfrom conlleval_ import evaluate\nfrom scipy.special import logsumexp\nimport time\nfrom helper import HiddenPrints, time_elapsed\n\n\ndef labelled(path):\n with open(path) as f: \n X, Y, x, y = list(), list(), list(), list()\n for line in f:\n if line == '\\n':\n X.append(x)\n Y.append(y)\n x, y = list(), list()\n else:\n word, tag = line.strip().split()\n x.append(word)\n y.append(tag)\n return X, Y\n\ndef unlabelled(path):\n with open(path) as f: \n X, x = list(), list()\n for line in f:\n if line == '\\n':\n X.append(x)\n x = list()\n else:\n word = line.strip()\n x.append(word)\n return X\n\ndef read_data(root):\n train, devin, devout = root/'train', root/'dev.in', root/'dev.out' \n return labelled(train), unlabelled(devin), labelled(devout)\n\ndef tokenize(sentence, word2index):\n return [word2index[word] if word in word2index else -1 for word in sentence]\ndef tag2idx(tags, tag2index):\n return [tag2index[tag] for tag in tags]\n\ndef idx_xy(X, Y, word2index=None, tag2index=None):\n if not word2index:\n vocabulary = list(set([word for sentence in X for word in sentence]))\n word2index = {word: i for i, word in enumerate(vocabulary)}\n if not tag2index:\n tags = list(set([tag for tags in Y for tag in tags]))\n tag2index = {tag: i for i, tag in enumerate(tags)}\n \n index2tag = {v:k for (k, v) in tag2index.items()}\n \n X_idx = [tokenize(sentence, word2index) for sentence in X]\n Y_idx = [tag2idx(tags, tag2index) for tags in Y]\n \n return X_idx, X, Y_idx, Y, word2index, tag2index, index2tag\n\ndef get_xy(path):\n train_ds, devin_ds, devout_ds = read_data(path)\n train_X, train_X_str, train_Y, train_Y_str, word2index, tag2index, index2tag = idx_xy(train_ds[0], train_ds[1])\n test_X, test_X_str, test_Y, test_Y_str, _, _, _ = idx_xy(devout_ds[0], devout_ds[1], word2index, tag2index)\n return train_X, train_X_str, train_Y, train_Y_str, test_X, test_X_str, test_Y, test_Y_str, word2index, tag2index, index2tag\n\ndef link_weight_sum(x, transition_weight, emission_weight):\n\n T = transition_weight.shape[0] - 1 \n emission = np.zeros((1, T))\n\n if x != -1:\n emission = np.expand_dims(emission_weight[:, x], axis=0)\n transition = transition_weight[:-1, :-1]\n return transition + emission\n\n\ndef viterbi(X, tag2index, emission_weight, transition_weight, link_weight_sum):\n \n Y = list()\n index2tag = {value: key for key, value in tag2index.items()}\n\n for x in X:\n score_matrix = np.zeros((len(tag2index), len(x)))\n path_matrix = np.zeros((len(tag2index), len(x)), dtype='int')\n \n score_matrix[:, 0] = transition_weight[-1, :-1] + emission_weight[:, x[0]] if x[0] != -1 else transition_weight[-1, :-1]\n for i in range(1, len(x)):\n competitors = score_matrix[:, i-1][:, None] + link_weight_sum(x[i], transition_weight, emission_weight)\n score_matrix[:, i] = np.max(competitors, axis=0)\n path_matrix[:, i] = np.argmax(competitors, axis=0)\n \n competitors = transition_weight[:-1, -1] + score_matrix[:, -1]\n last_idx = np.argmax(competitors)\n path = [last_idx]\n for m in range(len(x)-1, 0, -1):\n path.insert(0, path_matrix[path[0], m])\n \n Y.append([index2tag[idx] for idx in path])\n \n return Y\n\ndef viterbi_output(dev_out_path, X_raw, X, tag2index, emission_weight, transition_weight, link_weight_sum):\n \n tags = viterbi(X, tag2index, emission_weight, transition_weight, link_weight_sum)\n \n output_string = ''\n for i in range(len(X)):\n for j in range(len(X[i])):\n output_string += X_raw[i][j] + ' ' + tags[i][j] + '\\n'\n output_string += '\\n'\n \n with open(dev_out_path, 'w') as f:\n f.write(output_string)\n \n print('Done with writing predictions')\n return None\n\ndef eval(X, Y_raw, tag2index, emission_weight, transition_weight, link_weight_sum):\n index2tag = {value: key for key, value in tag2index.items()}\n def flatten(L):\n return [e for l in L for e in l]\n Y_pred = flatten(viterbi(X, tag2index, emission_weight, transition_weight, link_weight_sum))\n Y_raw = flatten(Y_raw)\n assert len(Y_raw) == len(Y_pred)\n return evaluate(Y_raw, Y_pred)\n\ndef forward(x, tag2index, emission_weight, transition_weight):\n N, T = len(x), len(tag2index)\n forward_matrix = np.zeros((T, N), dtype=np.double)\n\n forward_matrix[:, 0] = transition_weight[-1, :-1]+emission_weight[:, x[0]] if x[0] != -1 else transition_weight[-1, :-1]\n for i in range(1, N): \n # forward_matrix[:, i] = np.log(np.sum(np.exp(forward_matrix[:, i-1][:, None] + link_weight_sum(x[i], transition_weight, emission_weight)), axis=0))\n forward_matrix[:, i] = logsumexp(forward_matrix[:, i-1][:, None] + link_weight_sum(x[i], transition_weight, emission_weight), axis=0)\n # log_Z = np.log(np.sum(np.exp(transition_weight[:-1, -1] + forward_matrix[:, -1])))\n log_Z = logsumexp(transition_weight[:-1, -1] + forward_matrix[:, -1])\n \n return forward_matrix, log_Z\n\ndef backward(x, tag2index, emission_weight, transition_weight):\n N, T = len(x), len(tag2index)\n backward_matrix = np.zeros((T, N), dtype=np.double)\n \n backward_matrix[:, -1] = transition_weight[:-1, -1]\n for i in range(N-2, -1, -1):\n # backward_matrix[:, i] = np.log(np.sum(np.exp(np.expand_dims(backward_matrix[:, i+1], axis=0) + link_weight_sum(x[i+1], transition_weight, emission_weight)), axis=1))\n backward_matrix[:, i] = logsumexp(np.expand_dims(backward_matrix[:, i+1], axis=0) + link_weight_sum(x[i+1], transition_weight, emission_weight), axis=1)\n # log_Z = np.log(np.sum(np.exp(transition_weight[-1, :-1] + emission_weight[:, x[0]] + backward_matrix[:, 0])))\n log_Z = logsumexp(transition_weight[-1, :-1] + emission_weight[:, x[0]] + backward_matrix[:, 0])\n return backward_matrix, log_Z\n\ndef Loss(X, Y, tag2index, emission_weight, transition_weight, param):\n \n loss = 0\n \n for x, y in zip(X, Y):\n pair_score = 0\n emission_score = emission_weight[y[0], x[0]]\n transition_score = transition_weight[-1, y[0]]\n pair_score += (transition_score + emission_score)\n for i in range(1, len(x)):\n emission_score = emission_weight[y[i], x[i]]\n transition_score = transition_weight[y[i-1], y[i]]\n pair_score += (transition_score + emission_score)\n \n transition_score = transition_weight[y[-1], -1]\n pair_score += transition_score\n \n _, log_Z = forward(x, tag2index, emission_weight, transition_weight)\n\n loss += -(pair_score - log_Z)\n \n loss += LossRegularization(emission_weight, transition_weight, param)\n \n return loss\n\ndef LossRegularization(emission_weight, transition_weight, param):\n return param*(np.sum(emission_weight[emission_weight != -np.inf]**2) +\\\n np.sum(transition_weight[transition_weight != -np.inf]**2))\n\ndef GradientTransition(X, Y, tag2index, emission_weight, transition_weight, param):\n \n T = len(tag2index)\n counter = 1\n Expected_count, Empirical_count = np.zeros((T+1, T+1), dtype=np.double), np.zeros((T+1, T+1))\n \n for x, y in zip(X, Y):\n N = len(x)\n forward_matrix, log_Z = forward(x, tag2index, emission_weight, transition_weight)\n backward_matrix, _ = backward(x, tag2index, emission_weight, transition_weight)\n\n expected_count, empirical_count = np.zeros((T+1, T+1), dtype=np.double), np.zeros((T+1, T+1))\n\n for tag1, tag2 in itertools.product(range(-1, T), range(-1, T)):\n log_SumPotential = 0\n\n transition_score = transition_weight[tag1, tag2]\n if tag1 == -1 and tag2 == -1:\n continue\n # both empirical and expected count set to 0, forcing the parameter to go to 0 becasue of L2 regularization, which doesn't matter at all\n elif tag1 == -1:\n emission_score = emission_weight[tag2, x[0]]\n log_SumPotential += transition_score + emission_score + backward_matrix[tag2, 0]\n elif tag2 == -1:\n log_SumPotential += forward_matrix[tag1, -1] + transition_score\n else:\n # SumPotential = 0.0\n # for i in range(N-1):\n # emission_score = emission_weight[tag2, x[i+1]]\n # SumPotential += np.exp(forward_matrix[tag1, i] + transition_score + emission_score + backward_matrix[tag2, i+1])\n # log_SumPotential = np.log(SumPotential)\n log_SumPotential += logsumexp(forward_matrix[tag1, :N-1] + transition_score + emission_weight[tag2, x[1:N]] + backward_matrix[tag2, 1:N])\n \n expected_count[tag1, tag2] = np.exp(log_SumPotential - log_Z)\n \n Expected_count += expected_count\n\n empirical_count[-1, y[0]] += 1\n for i in range(N-1):\n empirical_count[y[i], y[i+1]] += 1\n empirical_count[y[-1], -1] += 1\n\n Empirical_count += empirical_count\n\n if counter % 100 == 0:\n print('Transition: done with the {}th instances'.format(counter))\n counter += 1\n\n L2_gradient = 2*param*transition_weight\n L2_gradient[L2_gradient == -np.inf] = 0\n \n return Expected_count - Empirical_count + L2_gradient\n\ndef GradientEmission(X, Y, tag2index, word2index, emission_weight, transition_weight, param):\n\n T = len(tag2index)\n V = len(word2index)\n counter = 1\n Expected_count, Empirical_count = np.zeros((T, V), dtype=np.double), np.zeros((T, V))\n \n for x, y in zip(X, Y):\n N = len(x)\n forward_matrix, log_Z = forward(x, tag2index, emission_weight, transition_weight)\n backward_matrix, _ = backward(x, tag2index, emission_weight, transition_weight)\n \n expected_count = np.zeros((T, V), dtype=np.double)\n\n\n emission_score = emission_weight[:, x[0]] # only apply to training set\n transition_score = transition_weight[-1, :-1]\n expected_count[:, x[0]] += np.exp(backward_matrix[:, 0] + emission_score + transition_score - log_Z)\n \n for i in range(1, N):\n emission_score = emission_weight[:, x[i]]\n transition_scores = transition_weight[:-1, :-1]\n expected_count[:, x[i]] += np.sum(np.exp(forward_matrix[:, i-1][:, None] + np.expand_dims(backward_matrix[:, i] + emission_score, axis=0) + transition_scores - log_Z), axis=0)\n \n Expected_count += expected_count\n\n for word, tag in zip(x, y):\n Empirical_count[tag, word] += 1\n\n if counter % 100 == 0:\n print('Emission: done with the {}th instances'.format(counter))\n counter += 1\n\n L2_gradient = 2*param*emission_weight\n L2_gradient[L2_gradient == -np.inf] = 0\n\n return Expected_count - Empirical_count + L2_gradient \n\ndef main():\n path = Path('../data/partial')\n train_X, train_X_str, train_Y, train_Y_str, test_X, test_X_str, test_Y, test_Y_str, word2index, tag2index, index2tag = get_xy(path)\n\n print('************Training Set Summary*************')\n T, V = len(tag2index), len(word2index)\n print('Number of tags: {}, Number of words: {}'.format(T, V))\n \n Lambda = 0.1\n\n def callbackF(w):\n loss = get_loss_grad(w)[0]\n transition_weight = w[:(T+1)*(T+1)].reshape((T+1, T+1))\n emission_weight = w[(T+1)*(T+1):].reshape((T, V))\n loss_l2 = LossRegularization(emission_weight, transition_weight, param=Lambda)\n print('Loss:{:.4f} L2 Loss:{:.4f}'.format(loss, loss_l2))\n\n def get_loss_grad(w):\n with HiddenPrints():\n transition_weight = w[:(T+1)*(T+1)].reshape((T+1, T+1))\n emission_weight = w[(T+1)*(T+1):].reshape((T, V))\n loss = Loss(train_X, train_Y, tag2index, \n emission_weight, transition_weight, param=Lambda)\n grads_transition = GradientTransition(train_X, train_Y, tag2index,\n emission_weight, transition_weight, param=Lambda)\n grads_emission = GradientEmission(train_X, train_Y, tag2index, word2index, \n emission_weight, transition_weight, param=Lambda)\n grads = np.concatenate((grads_transition.reshape(-1), grads_emission.reshape(-1)))\n return loss, grads\n\n print('************Train*************')\n start = time.time()\n init_w = np.zeros(((T+1)*(T+1)+T*V,))\n optimal_weight, final_loss, result_dict = fmin_l_bfgs_b(get_loss_grad, init_w, pgtol=0.01, callback=callbackF)\n end = time.time()\n time_elapsed(start, end)\n \n print('************Saving Model Parameters*************')\n optimal_transition_weight = optimal_weight[:(T+1)*(T+1)].reshape((T+1, T+1))\n optimal_emission_weight = optimal_weight[(T+1)*(T+1):].reshape((T, V))\n path_transition = path/'best_weight_features1_transition.npy'\n path_emission = path/'best_weight_features1_emission.npy'\n np.save(path_transition, optimal_transition_weight)\n np.save(path_emission, optimal_emission_weight)\n\n print('************Saving Model Outputs*************')\n path_output = path/'dev.p4.out'\n viterbi_output(path_output, test_X_str, test_X, tag2index, optimal_emission_weight, optimal_transition_weight, link_weight_sum)\n \n print('************Evaluation*************')\n prec, rec, f1 = eval(train_X, train_Y_str, tag2index, optimal_emission_weight, optimal_transition_weight, link_weight_sum)\n print('precision, recall, f1 on training set: {0} {1} {2}'.format(prec, rec, f1))\n prec, rec, f1 = eval(test_X, test_Y_str, tag2index, optimal_emission_weight, optimal_transition_weight, link_weight_sum)\n print('precision, recall, f1 on test set: {0} {1} {2}'.format(prec, rec, f1))\n\n\nif __name__=='__main__':\n main()","sub_path":"NER/numpy/Features1.py","file_name":"Features1.py","file_ext":"py","file_size_in_byte":14173,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"619481256","text":"\n# coding: utf-8\n\nimport numpy as np\n\n\nclass Grid:\n def __init__(self, width, height, start):\n self.width = width\n self.height = height\n self.i = start[0]\n self.j = start[1]\n\n # Class properties\n possible_actions = ('U', 'D', 'L', 'R')\n\n def set(self, rewards, actions):\n # rewards is a dictionary: (i, j): r (row, col): rewards\n # actions is a dictionary: (i, j): A (row, col): list of possible actions\n\n self.rewards = rewards\n self.actions = actions\n\n def set_state(self, s):\n self.i = s[0]\n self.j = s[1]\n\n def get_actions(self):\n return self.actions[(self.i, self.j)]\n\n def current_state(self):\n return (self.i, self.j)\n\n def is_terminal(self, s):\n return s not in self.actions\n\n def move(self, action):\n # Check if the move is legal valid\n if action in self.actions[(self.i, self.j)]:\n if action == 'U':\n self.i -= 1\n elif action == 'D':\n self.i += 1\n elif action == 'R':\n self.j += 1\n elif action == 'L':\n self.j -= 1\n\n # Returns the reward\n return self.rewards.get((self.i, self.j), 0)\n\n def undo_move(self, action):\n # Undo the movement\n if action == 'U':\n self.i += 1\n elif action == 'D':\n self.i -= 1\n elif action == 'R':\n self.j -= 1\n elif action == 'L':\n self.j += 1\n\n # Throw an exception in the cell in invalid.\n assert(self.current_state() in self.all_states())\n\n def game_over(self):\n # Returns true when the game is over and false otherwise.\n return (self.i, self.j) not in self.actions\n\n def all_states(self):\n # Returns all states\n return set(self.actions.keys()) | set(self.rewards.keys())\n\n\ndef create_grid(step_cost=0):\n # Create a new board with the reward and possible actions in the cells.\n # Default each movement does not have a cost.\n #\n # The board is as follows\n # S initial point\n # X not allowed cell\n # . allowed cell\n #\n # Number indicate the rewards of states\n #\n #\n # . . . 1\n # . x . -1\n # s . . .\n\n grid = Grid(3, 4, (2, 0))\n\n rewards = {\n (0, 0): step_cost,\n (0, 1): step_cost,\n (0, 2): step_cost,\n (0, 3): 1,\n (1, 0): step_cost,\n (1, 2): step_cost,\n (1, 3): -1,\n (2, 0): step_cost,\n (2, 1): step_cost,\n (2, 2): step_cost,\n (2, 3): step_cost}\n\n actions = {\n (0, 0): ('D', 'R'),\n (0, 1): ('L', 'R'),\n (0, 2): ('L', 'D', 'R'),\n (1, 0): ('U', 'D'),\n (1, 2): ('U', 'D', 'R'),\n (2, 0): ('U', 'R'),\n (2, 1): ('L', 'R'),\n (2, 2): ('L', 'R', 'U'),\n (2, 3): ('L', 'U')\n }\n\n grid.set(rewards, actions)\n\n return grid\n\n\ndef print_values(value, grid):\n for i in range(grid.width):\n print(\"---------------------------\")\n for j in range(grid.height):\n v = value.get((i, j), 0)\n if v >= 0:\n print(\" %.2f|\" % v, end=\"\")\n else:\n print(\"%.2f|\" % v, end=\"\")\n print(\"\")\n\n\ndef print_policy(policy, grid):\n for i in range(grid.width):\n print(\"---------------------------\")\n for j in range(grid.height):\n action = policy.get((i, j), ' ')\n\n if action == 'U':\n print(\" ↑ |\", end=\"\")\n elif action == 'D':\n print(\" ↓ |\", end=\"\")\n elif action == 'R':\n print(\" → |\", end=\"\")\n elif action == 'L':\n print(\" ← |\", end=\"\")\n else:\n print(\" |\", end=\"\")\n print(\"\")\n\n\ndef print_value_policy(V, policy, grid):\n print(\"Value function\")\n print_values(V, grid)\n print()\n print(\"Policy\")\n print_policy(policy, grid)\n\n\ndef init_states(grid, value=None):\n states = grid.all_states()\n V = {}\n\n for s in states:\n if s in grid.actions:\n if value is None:\n V[s] = np.random.random()\n else:\n V[s] = value\n else:\n V[s] = 0\n\n return V, states\n\n\ndef policy_evaluation(grid, policy, V, states, gamma=1, max_iter=100, threshold=1e-3):\n num_iter = 0\n\n while num_iter < max_iter:\n num_iter += 1\n biggest_change = 0\n for s in states:\n old_v = V[s]\n\n if s in policy:\n action = policy[s]\n grid.set_state(s)\n r = grid.move(action)\n V[s] = r + gamma * V[grid.current_state()]\n biggest_change = max(biggest_change, np.abs(old_v - V[s]))\n\n if biggest_change < threshold:\n break\n\n if num_iter >= max_iter:\n print(\"The maximum number of iterations has been reached\")\n\n\ndef optimal_policy(grid, policy, gamma=1, max_iter=100, threshold=1e-3):\n\n # Initialize the funciton value and states\n V, states = init_states(grid)\n\n # Iterate over policies until convergence or maximum iterations\n num_iter = 0\n\n while num_iter < max_iter:\n num_iter += 1\n\n # Policy evaluation\n policy_evaluation(grid, policy, V, states, gamma, max_iter, threshold)\n\n # Improvement the policy\n is_policy_converged = True\n for s in states:\n if s in policy:\n old_a = policy[s]\n new_a = None\n best_value = float('-inf')\n\n # Iterate over actions until the best is obtained\n for a in Grid.possible_actions:\n grid.set_state(s)\n r = grid.move(a)\n v = r + gamma * V[grid.current_state()]\n if v > best_value:\n best_value = v\n new_a = a\n policy[s] = new_a\n\n if new_a != old_a:\n is_policy_converged = False\n\n if is_policy_converged:\n break\n\n if num_iter >= max_iter:\n print(\"The maximum number of iterations has been reached\")\n\n return V\n\n\ndef random_policy(grid):\n policy = {}\n\n for s in grid.actions.keys():\n policy[s] = np.random.choice(grid.actions[s])\n\n return policy\n\n\ndef policy_evaluation_windy(grid, policy, V, states, windy=0.5, gamma=1, max_iter=100, threshold=1e-3):\n num_iter = 0\n\n while num_iter < max_iter:\n num_iter += 1\n biggest_change = 0\n for s in states:\n old_v = V[s]\n new_v = 0\n\n if s in policy:\n for action in grid.actions[s]:\n if action == policy[s]:\n p = windy\n else:\n p = (1-windy)/(len(grid.actions[s])-1)\n grid.set_state(s)\n r = grid.move(action)\n new_v += p * (r + gamma * V[grid.current_state()])\n\n V[s] = new_v\n biggest_change = max(biggest_change, np.abs(old_v - V[s]))\n\n if biggest_change < threshold:\n break\n\n if num_iter >= max_iter:\n print(\"The maximum number of iterations has been reached\")\n\n\ndef optimal_policy_windy(grid, policy, windy=0.5, gamma=1, max_iter=100, threshold=1e-3):\n\n # Initialize the funciton value and states\n V, states = init_states(grid)\n\n # Iterate over policies until convergence or maximum iterations\n num_iter = 0\n\n while num_iter < max_iter:\n num_iter += 1\n\n # Policy evaluation\n policy_evaluation_windy(grid, policy, V, states,\n windy, gamma, max_iter, threshold)\n\n # Improvement the policy\n is_policy_converged = True\n for s in states:\n if s in policy:\n old_a = policy[s]\n new_a = None\n best_value = float('-inf')\n\n # Iterate over actions until the best is obtained\n for action in Grid.possible_actions:\n v = 0\n for w_action in grid.actions[s]:\n if action == w_action:\n p = windy\n else:\n p = (1-windy)/(len(grid.actions[s])-1)\n grid.set_state(s)\n r = grid.move(action)\n v += p * (r + gamma * V[grid.current_state()])\n if v > best_value:\n best_value = v\n new_a = action\n policy[s] = new_a\n\n if new_a != old_a:\n is_policy_converged = False\n\n if is_policy_converged:\n break\n\n if num_iter >= max_iter:\n print(\"The maximum number of iterations has been reached\")\n\n return V\n\n\ndef optimal_value(grid, policy, gamma=0.9, max_iter=100, threshold=1e-3):\n V, states = init_states(grid)\n\n num_iter = 0\n while num_iter < max_iter:\n num_iter += 1\n biggest_change = 0\n\n for s in states:\n old_v = V[s]\n\n # La función de valor solo tiene valor si no es final\n if s in policy:\n new_v = float('-inf')\n for a in grid.actions[s]:\n grid.set_state(s)\n r = grid.move(a)\n v = r + gamma * V[grid.current_state()]\n if v > new_v:\n new_v = v\n V[s] = new_v\n biggest_change = max(biggest_change, np.abs(old_v - V[s]))\n\n if biggest_change < threshold:\n break\n\n if num_iter >= max_iter:\n print(\"The maximum number of iterations has been reached\")\n\n # Obtener la politica para la función de valor\n for s in policy.keys():\n best_a = None\n best_value = float('-inf')\n # Itera sobre todas las posibles acciones\n for a in Grid.possible_actions:\n grid.set_state(s)\n r = grid.move(a)\n v = r + gamma * V[grid.current_state()]\n if v > best_value:\n best_value = v\n best_a = a\n\n policy[s] = best_a\n\n return V\n","sub_path":"temp/UAH007_02_01_Dynamic_programming.py","file_name":"UAH007_02_01_Dynamic_programming.py","file_ext":"py","file_size_in_byte":10318,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"379807318","text":"import curses\nimport textwrap\nimport time\nimport battery\nimport ADC\nimport Lights\nimport CAN\nimport SPI\nimport Strobelight\nimport WDTimer\nimport PLL\n\nimport config\nimport Timer\n\nimport os\nimport sys\nimport traceback\n\n# Global configurations\nstate = '' # Charging/Discharging\nmode = '' # Low, Normal, High\nstdscr = None # Output screen\nCANbox = None #box that displays the CAN messages\nlights_names = ['EXTRA', 'CAN', 'WDOG', 'UVOLT', 'OVOLT', 'OTEMP', 'OCURR', 'RUN', 'FAULT']\nfrequency = None\n\ndef generate(battery=None):\n global state, mode\n # Update battery's state\n if battery is not None:\n battery.update()\n # Generate ADC values\n ADC.generate(state, mode, battery)\n # Generate SPI values\n SPI.generate(state, mode, battery)\n #Pet Watchdog\n WDTimer.Check_State()\n #Initialize Watchdog Timer\n WDTimer.WD_Enable()\n #Initialize Timer\n Timer.Enable()\n\n\ndef display(battery=None): #print watchdog countdown \n global stdscr\n # Read ADC values\n adc_values = ADC.read()\n global frequency \n frequency = PLL.Get_Frequency()\n\n stdscr.addstr(0, 10, \"Battery\")\n stdscr.addstr(1, 0, \"==============================\")\n stdscr.addstr(2, 0, f\"ADC:\")\n stdscr.addstr(2, 10, f\"Low Precision: {adc_values[0][0]}\")\n stdscr.addstr(3, 10, f\"High Precision: {adc_values[0][1]}\")\n # Read Current values\n stdscr.addstr(4, 0, f\"Current:\")\n stdscr.addstr(4, 10, f\"{adc_values[1]} A \")\n #Read CAN data\n CANdata = CAN.Get_CAN_Info()\n text = ' '.join(CANdata) #put elements of the list of CAN data bytes into a string \n CANbox.erase() #clear previous data in the box\n CANbox.addstr(4, 0, textwrap.fill(text, 40))\n CANbox.addstr(3, 2, \"CAN ID and Message:\")\n # Display Watchdog ticks\n ticks = WDTimer.Tick()\n stdscr.addstr(10, 0, f\" \") #clear previous tick\n stdscr.addstr(10, 0, f\"WDTimer Countdown: {ticks}\")\n #Display current frequency\n stdscr.addstr(6, 0, f\" \") \n stdscr.addstr(6, 0, f\"Clock Frequency: {frequency} Hz\")\n # Read Module values\n stdscr.addstr(0, 54, \"Modules\")\n stdscr.addstr(1, 40, \"====================================\")\n module_values = SPI.read()\n for i, module in enumerate(module_values):\n stdscr.addstr(i+2, 37, f\"{i+1}\")\n stdscr.addstr(i+2, 40, f\"| {'X' if module[0] else ' '} | {module[1]/10000:.4f}V | {module[2]/1000:.3f}°C | {module[3]/1000:.3f}°C |\")\n # Read LED values\n stdscr.addstr(0, 90, \"LEDs\")\n stdscr.addstr(1, 80, \"=======================\")\n lights = Lights.read()\n curses.init_pair(1, curses.COLOR_RED, curses.COLOR_RED)\n curses.init_pair(2, curses.COLOR_GREEN, curses.COLOR_GREEN)\n curses.init_pair(3, curses.COLOR_BLACK, curses.COLOR_BLACK)\n for i in range(9):\n stdscr.addstr(i+2, 80, lights_names[i])\n if lights & (0x1<>\", end=\"\")\n state = input()\n while state != 'charging' and state != 'discharging':\n print(\"That is not a valid option. Please enter 'charging' or 'discharging'\")\n state = input()\n print(\"Would you like to simulate 'low', 'normal', or 'high' values?\")\n print(\">>\", end=\"\")\n mode = input()\n while mode != 'low' and mode != 'normal' and mode != 'high':\n print(\"That is not a valid option. Please enter 'low', 'normal', or 'high': \")\n mode = input()\n\n\ndef change_wires(battery):\n done = False\n while not done:\n print(\"Which module? (0 to exit)\")\n print(\">>\", end=\"\")\n module = int(input())\n if module:\n battery.modules[module-1].connected = not battery.modules[module-1].connected\n else:\n done = True\n\ndef launch_bevolt():\n # Suppress stdout and stderr\n null_fds = [os.open(os.devnull, os.O_RDWR) for x in range(2)]\n # save the current stdout and stderr\n save = os.dup(1), os.dup(2)\n # put /dev/null fds on 1 and 2\n os.dup2(null_fds[0], 1)\n os.dup2(null_fds[1], 2)\n\n # Launch the BPS code for BeVolt\n isParent = os.fork()\n if isParent == 0: # we only want to do this in the child process\n os.execl(\"simulate\", \"simulate\", \"bevolt\")\n \n os.dup2(save[0], 1)\n os.dup2(save[1], 2)\n \n os.close(null_fds[0])\n os.close(null_fds[1])\n os.close(save[0])\n os.close(save[1])\n\ndef main():\n print(\"Welcome to the BPS Simulator\")\n print(\"Type 'start' to start BeVolt. Otherwise, you can specify the types of data to simulate.\")\n print(\">>\", end=\"\")\n if input() == 'start':\n # Initial capacity is (2500*14)/(2950*14)=0.847 i.e. 84.7% charged\n init_capacity_mah = 2500 * config.num_batt_cells_parallel_per_module\n\n # Amperes current draw of the electrical system\n ampere_draw = 30\n\n # Create state of the battery\n BeVolt = battery.Battery(ampere_draw, config.total_batt_pack_capacity_mah, init_capacity_mah)\n PLL.PLL_Init()\n else:\n BeVolt = None\n configure()\n \n try:\n launch_bevolt()\n except Exception as e:\n print(repr(e))\n \n global stdscr\n global CANbox\n stdscr = curses.initscr()\n curses.start_color()\n curses.noecho()\n curses.cbreak()\n #box is for CAN messages\n CANbox = curses.newwin(7, 21, 12, 78)\n CANbox.immedok(True)\n CANbox.box()\n CANbox.refresh()\n #Start background thread for timer \n timerThread = Timer.timer_Thread\n timerThread.start()\n while True:\n try:\n # Generate all values\n generate(BeVolt)\n # Display all values\n display(BeVolt)\n time.sleep(1) # one second delay\n except KeyboardInterrupt:\n curses.endwin()\n if BeVolt is not None:\n print(\"\\n\\rWould you like to change 'wires', 'quit', or 'PLL'?\")\n print(\">>\", end=\"\")\n choice = input()\n if choice == 'wires':\n change_wires(BeVolt)\n stdscr = curses.initscr()\n curses.start_color()\n elif choice == 'quit':\n break\n elif choice == 'PLL':\n print(\"Enter the frequency you would like to change the clock to in Hz.\")\n frequency = int(input())\n PLL.Change_Frequency(frequency)\n else:\n print(\"That is not a valid option. Continuing simulation...\")\n stdscr = curses.initscr()\n curses.start_color()\n else:\n print(\"\\n\\rWould you like to change 'config', 'quit', or send a CAN message ('CAN')?\")\n choice = input()\n if choice == 'config':\n configure()\n stdscr = curses.initscr()\n elif choice == 'quit':\n break\n elif choice == 'CAN':\n print(\"Enter the CAN ID for the system you wish to simulate. Leave out '0x'.\")\n id = input()\n while(CAN.Invalid_CAN_ID(id) == True):\n print(\"Invalid CAN ID. Try again.\")\n id = input()\n print(\"Enter up to 8 bytes of the CAN message that you would like to send, and separate each byte by a ','. Leave out '0x'.\")\n message = input().split(',')\n CAN.Send_Message(id, message, len(message))\n else:\n print(\"That is not a valid option. Continuing simulation...\")\n stdscr = curses.initscr()\n curses.start_color()\n except Exception as e:\n curses.echo()\n curses.nocbreak()\n curses.endwin()\n print(\"ERROR:\", end=\" \")\n print(repr(e), end=\"\\r\\n\")\n print(\"If addwstr() returned ERR, make your terminal window bigger.\")\n print(\"\\n\\rContinue? (Y/n): \", end=\"\")\n cont = input()\n if(cont.lower() == \"n\" or cont.lower() == \"no\"):\n break\n print(\"Continuing...\")\n main()\n curses.echo()\n curses.nocbreak()\n curses.endwin()\n Timer.terminate(True)\n\n\n\nif __name__ == '__main__':\n try:\n main()\n except:\n traceback.print_exc()\n while True:\n pass\n","sub_path":"BSP/Simulator/DataGeneration/simulate.py","file_name":"simulate.py","file_ext":"py","file_size_in_byte":9024,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"473540470","text":"#Send transformed data to event hub\nimport asyncio\nfrom azure.eventhub.aio import EventHubProducerClient\nfrom azure.eventhub import EventData\n\nasync def send_event():\n # Create a producer client to send messages to the event hub.\n # Specify a connection string to your event hubs namespace and\n \t # the event hub name.\n producer = EventHubProducerClient.from_connection_string(conn_str=\"Endpoint=sb://projecteventhub.servicebus.windows.net/;SharedAccessKeyName=RootManageSharedAccessKey;SharedAccessKey=2ZXNW5EOzbelKigs8RqZDcwmT3qEBwSlMcLRwjLIZXc=\", eventhub_name=\"cloudprojecteventhub\")\n async with producer:\n # Create a batch.\n event_data_batch = await producer.create_batch()\n\n # Add events to the batch.\n event_data_batch.add(EventData('First event '))\n event_data_batch.add(EventData('Second event'))\n event_data_batch.add(EventData('Third event'))\n\n # Send the batch of events to the event hub.\n await producer.send_batch(event_data_batch)\n\nloop = asyncio.get_event_loop()\nloop.run_until_complete(send_event())","sub_path":"Serverless function/send_event.py","file_name":"send_event.py","file_ext":"py","file_size_in_byte":1087,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"365150167","text":"# Copyright 2013 OpenStack Foundation\n# All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nimport six\n\nfrom tempest.api.network import base_security_groups as base\nfrom tempest.common.utils import data_utils\nfrom tempest import test\n\n\nclass SecGroupTest(base.BaseSecGroupTest):\n _interface = 'json'\n\n @classmethod\n def setUpClass(cls):\n super(SecGroupTest, cls).setUpClass()\n if not test.is_extension_enabled('security-group', 'network'):\n msg = \"security-group extension not enabled.\"\n raise cls.skipException(msg)\n\n @test.attr(type='smoke')\n def test_list_security_groups(self):\n # Verify the that security group belonging to tenant exist in list\n resp, body = self.client.list_security_groups()\n self.assertEqual('200', resp['status'])\n security_groups = body['security_groups']\n found = None\n for n in security_groups:\n if (n['name'] == 'default'):\n found = n['id']\n msg = \"Security-group list doesn't contain default security-group\"\n self.assertIsNotNone(found, msg)\n\n @test.attr(type='smoke')\n def test_create_list_update_show_delete_security_group(self):\n group_create_body, name = self._create_security_group()\n\n # List security groups and verify if created group is there in response\n resp, list_body = self.client.list_security_groups()\n self.assertEqual('200', resp['status'])\n secgroup_list = list()\n for secgroup in list_body['security_groups']:\n secgroup_list.append(secgroup['id'])\n self.assertIn(group_create_body['security_group']['id'], secgroup_list)\n # Update the security group\n new_name = data_utils.rand_name('security-')\n new_description = data_utils.rand_name('security-description')\n resp, update_body = self.client.update_security_group(\n group_create_body['security_group']['id'],\n name=new_name,\n description=new_description)\n # Verify if security group is updated\n self.assertEqual('200', resp['status'])\n self.assertEqual(update_body['security_group']['name'], new_name)\n self.assertEqual(update_body['security_group']['description'],\n new_description)\n # Show details of the updated security group\n resp, show_body = self.client.show_security_group(\n group_create_body['security_group']['id'])\n self.assertEqual(show_body['security_group']['name'], new_name)\n self.assertEqual(show_body['security_group']['description'],\n new_description)\n\n @test.attr(type='smoke')\n def test_create_show_delete_security_group_rule(self):\n group_create_body, _ = self._create_security_group()\n\n # Create rules for each protocol\n protocols = ['tcp', 'udp', 'icmp']\n for protocol in protocols:\n resp, rule_create_body = self.client.create_security_group_rule(\n security_group_id=group_create_body['security_group']['id'],\n protocol=protocol,\n direction='ingress'\n )\n self.assertEqual('201', resp['status'])\n\n # Show details of the created security rule\n resp, show_rule_body = self.client.show_security_group_rule(\n rule_create_body['security_group_rule']['id']\n )\n self.assertEqual('200', resp['status'])\n create_dict = rule_create_body['security_group_rule']\n for key, value in six.iteritems(create_dict):\n self.assertEqual(value,\n show_rule_body['security_group_rule'][key],\n \"%s does not match.\" % key)\n\n # List rules and verify created rule is in response\n resp, rule_list_body = self.client.list_security_group_rules()\n self.assertEqual('200', resp['status'])\n rule_list = [rule['id']\n for rule in rule_list_body['security_group_rules']]\n self.assertIn(rule_create_body['security_group_rule']['id'],\n rule_list)\n\n @test.attr(type='smoke')\n def test_create_security_group_rule_with_additional_args(self):\n # Verify creating security group rule with the following\n # arguments works: \"protocol\": \"tcp\", \"port_range_max\": 77,\n # \"port_range_min\": 77, \"direction\":\"ingress\".\n group_create_body, _ = self._create_security_group()\n\n direction = 'ingress'\n protocol = 'tcp'\n port_range_min = 77\n port_range_max = 77\n resp, rule_create_body = self.client.create_security_group_rule(\n security_group_id=group_create_body['security_group']['id'],\n direction=direction,\n protocol=protocol,\n port_range_min=port_range_min,\n port_range_max=port_range_max\n )\n\n self.assertEqual('201', resp['status'])\n sec_group_rule = rule_create_body['security_group_rule']\n\n self.assertEqual(sec_group_rule['direction'], direction)\n self.assertEqual(sec_group_rule['protocol'], protocol)\n self.assertEqual(int(sec_group_rule['port_range_min']), port_range_min)\n self.assertEqual(int(sec_group_rule['port_range_max']), port_range_max)\n\n\nclass SecGroupTestXML(SecGroupTest):\n _interface = 'xml'\n","sub_path":"tempest/api/network/test_security_groups.py","file_name":"test_security_groups.py","file_ext":"py","file_size_in_byte":5956,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"550080592","text":"import requests\r\nimport re\r\nimport time\r\nimport smtplib\r\nimport io\r\nfrom email.mime.text import MIMEText\r\nfrom email.header import Header\r\n\r\ndef search(keyword):\r\n url = 'https://www.costco.com/CatalogSearch?dept=All&keyword=' + keyword\r\n headers = {\"User-Agent\":\"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/83.0.4103.116 Safari/537.36\", \"Cache-Control\":\"no-store, max-age=0\"}\r\n page = requests.get(url, headers=headers)\r\n content = page.text\r\n \r\n # #Write to file\r\n # with io.open(\"debug.txt\", \"w\", encoding=\"utf-8\") as f:\r\n # f.write(content)\r\n\r\n print(page.status_code)\r\n\r\n # Find item\r\n items_text = re.findall('

          '\r\n else:\r\n email_body += '

          ' + item + '

          '\r\n\r\n return email_body\r\n\r\n\r\ngmail_pwd = input('Input gmail password:\\n')\r\nwhile 1 == 1:\r\n print('****** start round ******')\r\n\r\n # REPLACE HERE FOR SEARCH BOX IN COSTCO PAGE\r\n search_str = 'ysl+niki'\r\n\r\n # REPLACE HERE FOR TARGET ITEM FOUND\r\n item_match_str = 'niki'\r\n\r\n items = search(search_str)\r\n print(items)\r\n\r\n if len(items) > 0: \r\n if any(item_match_str.lower() in s.lower() for s in items):\r\n sendEmail(constructEmailBody(items, item_match_str), gmail_pwd)\r\n print('Found' + item_match_str)\r\n else:\r\n print('Found items but no ' + item_match_str + ' found in search results')\r\n else:\r\n print('No items found')\r\n \r\n t = time.localtime()\r\n current_time = time.strftime(\"%H:%M:%S\", t)\r\n print('****** end round at ' + current_time + '******\\n\\n' )\r\n time.sleep(60)\r\n ","sub_path":"costco.py","file_name":"costco.py","file_ext":"py","file_size_in_byte":2992,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"230583022","text":"import requests, re\n\nheader = {'User-Agent': 'User-Agent: Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/67.0.3396.62 Safari/537.36'}\ncontent = requests.get('https://book.douban.com/', headers=header).text\n\npattern = re.compile('class=\"info\".*?(.*?)',re.S)\nresults = re.findall(pattern, content)\n\nfor i in results:\n print(i.strip())","sub_path":"python/requests_example.py","file_name":"requests_example.py","file_ext":"py","file_size_in_byte":390,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"572162248","text":"# -*- coding: utf-8 -*-\n#!/usr/bin/env python\n'''\nwanderer bootstrap\n==================\n\nBootstrap a new wanderer project::\n\n wanderer bootstrap\n'''\n\nimport os\nfrom ..packages import click\nfrom .. import Wanderer, FilePath\n\n\n@click.group(invoke_without_command=True)\n@click.option('--path',\n prompt=(\"Insert the full path to your new project:\\n\\n\"\n \" C:\\\\path\\\\to\\\\MyProject\\n\\n\"),\n help='Path to new project')\n@click.option('--config',\n help='Path to a wanderer configuration',\n required=False)\ndef cli(path, config):\n '''Bootstrap a new project from the specific configuration.'''\n\n if not config:\n config = click.prompt(\n 'Path to the config you would like to use. All '\n 'environment variables or user variables will be expanded.:\\n\\n'\n ' C:\\\\path\\\\to\\\\.wanderer\\n\\n',\n default='default')\n\n config = config if config != 'default' else None\n\n Wanderer.bootstrap(path, config)\n","sub_path":"wanderer/commands/bootstrap.py","file_name":"bootstrap.py","file_ext":"py","file_size_in_byte":1024,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"419032752","text":"#\n# Copyright 2022 Ocean Protocol Foundation\n# SPDX-License-Identifier: Apache-2.0\n#\nimport os\n\nimport pytest\nfrom brownie.network import accounts\nfrom web3.main import Web3\n\nfrom tests.resources.helper_functions import generate_wallet\n\n\n@pytest.mark.unit\ndef test_generating_wallets(publisher_ocean_instance):\n generated_wallet = generate_wallet()\n assert generated_wallet.address, \"Wallet has not an address.\"\n assert accounts.at(generated_wallet.address).balance() == Web3.toWei(3, \"ether\")\n\n OCEAN_token = publisher_ocean_instance.OCEAN_token\n assert OCEAN_token.balanceOf(generated_wallet.address) == Web3.toWei(50, \"ether\")\n\n env_key_labels = [\n \"TEST_PRIVATE_KEY1\",\n \"TEST_PRIVATE_KEY2\",\n \"TEST_PRIVATE_KEY3\",\n \"TEST_PRIVATE_KEY4\",\n \"TEST_PRIVATE_KEY5\",\n \"TEST_PRIVATE_KEY6\",\n \"TEST_PRIVATE_KEY7\",\n \"TEST_PRIVATE_KEY8\",\n \"FACTORY_DEPLOYER_PRIVATE_KEY\",\n \"PROVIDER_PRIVATE_KEY\",\n ]\n env_private_keys = []\n for key_label in env_key_labels:\n key = os.environ.get(key_label)\n env_private_keys.append(key)\n assert generated_wallet.private_key not in env_private_keys\n","sub_path":"ocean_lib/web3_internal/test/test_wallet.py","file_name":"test_wallet.py","file_ext":"py","file_size_in_byte":1183,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"336645813","text":"from lark import Lark\n\n# de volgorde maakt uit in deze grammatica!\n# dat kunnen we exploiten voor deze regels,\n# eerst print, dan ask, dan text\n\n# wel weer grappig dat hij bij pr felienne 123 pr felienne als text ziet\n# maak ok (kunnen we zelf wel hakken tot de eerste spatie)\n\n# mooi trouwens ook, op zo'n simpele taal kunnen we\n# veel makkelijker program repair doen OMG!\n\nl = Lark('''start: \"print \" text -> print\n | \"ask \" text -> ask\n | text \" \" text -> invalid\n\n text: (LETTER | DIGIT | WS_INLINE)+\n\n %import common.LETTER // imports from terminal library\n %import common.DIGIT // imports from terminal library\n %import common.WS_INLINE // imports from terminal library\n\n \n ''')\n\ndef flatten_test(tree):\n if tree.data == 'text':\n return ''.join([str(c) for c in tree.children])\n else:\n raise Exception('Attemping to print or ask non-text element')\n\n\ndef transpile(input_string):\n tree = l.parse(input_string)\n if tree.data == 'print':\n command = 'print'\n elif tree.data == 'ask':\n command = 'input'\n else:\n raise Exception('First word is not a command')\n\n parameter = flatten_test(tree.children[0])\n return command + \"('\" + parameter + \"')\"\n\ndef execute(input_string):\n python = transpile(input_string)\n exec(python)\n\n\n# f = open('output.py', 'w+')\n# f.write(python)\n# f.close()\n\n\n\n\n\n","sub_path":"hedy.py","file_name":"hedy.py","file_ext":"py","file_size_in_byte":1455,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"441019379","text":"import logging\nfrom aiogram import Bot, Dispatcher, executor, types\nfrom bot_list import sqlite_3_select, Rates\nfrom exchange import Exchange\nfrom keyboard import *\nfrom get_diagram import Diagram\n\nlogging.basicConfig(level=logging.INFO)\nbot = Bot(token='1617603896:AAHUmL3ufG8yvRG2RNJe1944zovFxKux4y8')\ndp = Dispatcher(bot)\n\n\n@dp.message_handler(commands=[\"start\"])\nasync def start(message):\n await message.answer(\"Hi, I'm a bot, I have the following features available:\\n/list\\n/exchange\"\n \"\\n/getdiagram\")\n\n\n@dp.message_handler(commands=['list'])\nasync def lst(message: types.Message):\n await message.reply('To view the exchange rate, select it from the panel', reply_markup=keyboard,\n reply=False)\n\n\n@dp.callback_query_handler(text_contains='1')\nasync def get_rates(call: types.callback_query):\n sqlite_3_select(call.data.replace('1', ''))\n answer_message = Rates.get_rates()\n await bot.send_message(call.message.chat.id,'\\n'.join(map(str, answer_message)))\n\n\n@dp.message_handler(commands=['exchange'])\nasync def exchange(message: types.Message):\n await message.reply('Enter the amount of currency to exchange', reply=False)\n\n\n@dp.message_handler(commands=['getdiagram'])\nasync def get_diagram(message: types.Message):\n await message.reply('To get diagram select the base currency', reply=False, reply_markup=keyboard4)\n\n\n@dp.message_handler()\nasync def exchange_amount(message: types.Message):\n\n try:\n float(message.text)\n except:\n await message.reply('The entered value must be numeric, please try again', reply=False)\n else:\n Exchange.put_numeric(message.text)\n await message.reply('To exchange select the base currency', reply=False, reply_markup=keyboard2)\n\n\n@dp.callback_query_handler(text_contains='2')\nasync def base_currently(call: types.callback_query):\n Exchange.request(call.data.replace('2', ''))\n await bot.send_message(call.message.chat.id,'Select the currency to which the transfer will be made',\n reply_markup=keyboard3)\n\n\n@dp.callback_query_handler(text_contains='3')\nasync def transfer_base(call: types.callback_query):\n Exchange.put_transfer_base(call.data.replace('3',''))\n answer_message = Exchange.get_exchange_data()\n await bot.send_message(call.message.chat.id, '\\n'.join(map(str, answer_message)))\n\n\n@dp.callback_query_handler(text_contains='4')\nasync def diagram_base(call: types.callback_query):\n Diagram.put_base(call.data.replace('4', ''))\n await bot.send_message(call.message.chat.id, 'To get diagram select the second currency',\n reply_markup=keyboard5)\n\n\n@dp.callback_query_handler(text_contains='5')\nasync def diagram_base(call: types.callback_query):\n Diagram.put_transfer(call.data.replace('5', ''))\n Diagram.request()\n await bot.send_photo(call.message.chat.id, photo=open('diagrams/{}.png'.format(call.data.replace('5', '')), 'rb'))\n Diagram.dell_diagram()\n\nif __name__ == \"__main__\":\n executor.start_polling(dp, skip_updates=True)\n","sub_path":"bot_main.py","file_name":"bot_main.py","file_ext":"py","file_size_in_byte":3080,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"417737046","text":"import datetime\n\ndef calcDate(dateString):\n try:\n calculatedDate = datetime.datetime.strptime(dateString, '%d %b %Y')\n return calculatedDate\n except:\n print(\"Could not parse date, returning now\")\n # We get no credit for detecting syntactic errors, but may want to know when \n return datetime.datetime.now()\n\ndef calculateAge(individual, inputDateObj):\n birthDate = individual[\"BIRT\"][0]\n try:\n birthDateObj = calcDate(birthDate)\n except:\n print(\"Could not parse individual's birthdate\")\n # We get no credit for detecting syntactic errors, but may want to know when \n return -1\n else:\n ageAtInputDate = inputDateObj.year - birthDateObj.year - (1 if (inputDateObj.month< birthDateObj.month) else (1 if ((inputDateObj.month == birthDateObj.month) and (inputDateObj.day < birthDateObj.day)) else 0))\n return ageAtInputDate\n\n#US02\n\ndef birthBeforeMarriage(familyDict, individualDict):\n retList = []\n currDate = datetime.datetime.now()\n for key in familyDict.keys():\n currFam = familyDict[key]\n husb = individualDict[currFam['HUSB'][0]]\n wife = individualDict[currFam['WIFE'][0]]\n husbBirt = calcDate(husb['BIRT'][0])\n wifeBirt = calcDate(wife['BIRT'][0])\n marrDate = calcDate(currFam['MARR'][0])\n if marrDate < husbBirt:\n retList += [(\"Error US02: \" + husb['NAME'][0] + \"'s birth date is after their marriage date.\")]\n if marrDate < wifeBirt:\n retList += [(\"Error US02: \" + wife['NAME'][0] + \"'s birth date is after their marriage date.\")]\n return retList\n ","sub_path":"gedComProj/src/us02Story.py","file_name":"us02Story.py","file_ext":"py","file_size_in_byte":1645,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"161627907","text":"import numpy as np\nimport pandas as pd\nimport csv as csv\nfrom sklearn.tree import DecisionTreeClassifier\n\n\ndef preprocess(filename):\n data_df = pd.read_csv(filename, header=0)\n data_df['Gender'] = data_df['Sex'].map( {'female': 0, 'male': 1} ).astype(int)\n # All missing Embarked -> just make them embark from most common place\n if len(data_df.Embarked[ data_df.Embarked.isnull() ]) > 0:\n data_df.Embarked[ data_df.Embarked.isnull() ] = data_df.Embarked.dropna().mode().values\n\n Ports = list(enumerate(np.unique(data_df['Embarked']))) # determine all values of Embarked,\n Ports_dict = { name : i for i, name in Ports } # set up a dictionary in the form Ports : index\n data_df.Embarked = data_df.Embarked.map( lambda x: Ports_dict[x]).astype(int) # Convert all Embark strings to int\n dummy_embarked = pd.get_dummies(data_df['Embarked'],prefix='Embarked',drop_first=False)\n data_df = pd.concat([data_df,dummy_embarked], axis = 1)\n data_df = data_df.drop(['Embarked'],axis=1)\n # All the ages with no data -> make the median of all Ages\n median_age = data_df['Age'].dropna().median()\n if len(data_df.Age[ data_df.Age.isnull() ]) > 0:\n data_df.loc[ (data_df.Age.isnull()), 'Age'] = median_age\n data_df['Age'] *= 0.1\n\n Dmax = data_df['Fare'].max()\n Dmin = data_df['Fare'].min()\n Dmean = data_df['Fare'].mean()\n data_df['Fare'] = (data_df['Fare']-Dmean)/(Dmax-Dmin)\n\n # Remove the Name column, Cabin, Ticket, and Sex (since I copied and filled it to Gender)\n data_df = data_df.drop(['Name', 'Sex', 'Ticket', 'Cabin', 'PassengerId','Fare','Parch','SibSp'], axis=1)\n return data_df\n\n# transform data to np.array\ntrain_df = preprocess('train.csv')\ntest_df = preprocess('test.csv')\ntrain_X = np.array(train_df.drop(['Survived'],axis=1),dtype=np.float32)\ntrain_Y = np.matrix(train_df['Survived']).T\n\ntest_X = np.array(test_df,dtype=np.float32)\ntest_X[np.isnan(test_X)] = np.mean(test_X[~np.isnan(test_X)])\n#print('是否存在nan?',np.isnan(test_X).any())\nprint('train_X shape:',train_X.shape)\nprint('train_y shape:',train_Y.shape)\nprint('text_X shape:',test_X.shape)\n\nclf = DecisionTreeClassifier(min_samples_split=20)\nprint('fitting model...')\nclf.fit(train_X,train_Y)\n\ntest_predict = clf.predict(test_X)\nprint('finish!')\nprint('score in training set:',clf.score(train_X,train_Y))\n# output the result\nwith open('Tree_predict_result.csv','w') as predict:\n writer = csv.writer(predict)\n writer.writerow(['PassengerId','Survived'])\n for i in range(test_X.shape[0]):\n writer.writerow([i+892,int(test_predict[i])])\n","sub_path":"Titanic_predict_tree.py","file_name":"Titanic_predict_tree.py","file_ext":"py","file_size_in_byte":2616,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"609240738","text":"from __future__ import absolute_import\n\nimport os.path\nimport shutil\nimport sys\nimport tempfile\nimport textwrap\n\nif sys.version_info < (2, 7):\n import unittest2 as unittest\n # FIXME: this looks quite fishy. On 2.6, with unittest2, the assertRaises\n # context manager does not contain the actual exception object ?\n def exception_code(ctx):\n return ctx.exception\nelse:\n import unittest\n def exception_code(ctx):\n return ctx.exception.code\n\nimport mock\n\nfrom enstaller.main import main\nfrom enstaller.tests.common import authenticated_config, mock_print\n\nfrom .common import (fake_empty_resolve, fake_configuration_and_auth,\n enstaller_version, raw_input_always_yes,\n remote_enstaller_available)\n\nclass TestEnstallerMainActions(unittest.TestCase):\n def setUp(self):\n self.d = tempfile.mkdtemp()\n self.config = os.path.join(self.d, \".enstaller4rc\")\n\n def tearDown(self):\n shutil.rmtree(self.d)\n\n @authenticated_config\n @raw_input_always_yes\n @enstaller_version(\"4.6.1\")\n @remote_enstaller_available([\"4.6.2\"])\n def test_automatic_update(self):\n r_output = textwrap.dedent(\"\"\"\\\n Enstaller has been updated.\n Please re-run your previous command.\n \"\"\")\n\n with mock_print() as m:\n with mock.patch(\"enstaller.main.update_enstaller\"):\n with mock.patch(\"enstaller.main.install_req\"):\n main([\"\"])\n self.assertMultiLineEqual(m.value, r_output)\n\n @authenticated_config\n @raw_input_always_yes\n @enstaller_version(\"4.6.1\")\n @remote_enstaller_available([\"4.6.2\"])\n def test_enstaller_in_req(self):\n r_output = textwrap.dedent(\"\"\"\\\n Enstaller has been updated.\n Please re-run your previous command.\n \"\"\")\n\n with mock_print() as m:\n with mock.patch(\"enstaller.main.inplace_update\"):\n main([\"enstaller\"])\n self.assertMultiLineEqual(m.value, r_output)\n\n @authenticated_config\n @raw_input_always_yes\n @enstaller_version(\"4.6.3\")\n @remote_enstaller_available([\"4.6.2\"])\n @mock.patch(\"enstaller.main.logger\")\n def test_updated_enstaller(self, logger):\n with mock.patch(\"enstaller.main.install_req\"):\n main([\"\"])\n logger.info.assert_called_with('prefix: %r',\n os.path.normpath(sys.prefix))\n\n @authenticated_config\n @raw_input_always_yes\n @enstaller_version(\"4.6.3\")\n @remote_enstaller_available([\"4.6.2\"])\n def test_updated_enstaller_in_req(self):\n with mock_print() as m:\n with mock.patch(\"enstaller.main.install_req\"):\n main([\"enstaller\"])\n self.assertMultiLineEqual(m.value, \"\")\n\n\nclass TestEnstallerInstallActions(unittest.TestCase):\n @fake_configuration_and_auth\n @fake_empty_resolve\n def test_install_numpy(self):\n main([\"numpy\"])\n\n @fake_configuration_and_auth\n @fake_empty_resolve\n def test_install_epd(self):\n with mock.patch(\"enstaller.main.epd_install_confirm\") as m:\n main([\"epd\"])\n self.assertTrue(m.called)\n\n @fake_configuration_and_auth\n def test_remove_epd_fails(self):\n with mock.patch(\"enstaller.main.epd_install_confirm\"):\n with mock.patch(\"enstaller.main.install_req\"):\n with self.assertRaises(SystemExit) as e:\n main([\"--remove\", \"epd\"])\n self.assertNotEqual(exception_code(e), 0)\n\n @fake_configuration_and_auth\n def test_install_epd_and_other(self):\n with mock.patch(\"enstaller.main.epd_install_confirm\"):\n with mock.patch(\"enstaller.main.install_req\"):\n with self.assertRaises(SystemExit) as e:\n main([\"epd\", \"numpy\"])\n self.assertNotEqual(exception_code(e), 0)\n\n @fake_configuration_and_auth\n def test_remove(self):\n with mock.patch(\"enstaller.main.Enpkg.execute\"):\n main([\"--remove\", \"numpy\"])\n","sub_path":"tests/functional/test_install.py","file_name":"test_install.py","file_ext":"py","file_size_in_byte":4055,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"518366150","text":"from tr_option.base import KWTR\nfrom copy import deepcopy\n\n# [ opt10066 : 장중투자자별매매차트요청 ]\nclass Opt10066(KWTR):\n\n def __init__(self, core):\n super().__init__(core)\n\n self.rq_name = self.tr_code = 'opt10066'\n\n self.record_name_multiple = '장중투자자별매매차트'\n self.header_multiple = [\n '종목코드', '종목명', '현재가', '대비기호', '전일대비', '등락률', '거래량',\n '개인투자자', '외국인투자자', '기관계', '금융투자', '보험', '투신', '기타금융', '은행', '연기금등', '사모펀드', '국가', '기타법인',\n ]\n\n\n def tr_opt(self, market_type, input1, input2, code, prev_next, screen_no):\n # 시장구분 = 000:전체, 001:코스피, 101:코스닥\n # 금액수량구분 = 1:금액, 2:수량\n # 매매구분 = 0:순매수, 1:매수, 2:매도\n # 종목코드 = 전문 조회할 종목코드\n\n self.core.set_input_value('시장구분', market_type)\n self.core.set_input_value('금액수량구분', input1)\n self.core.set_input_value('매매구분', input2)\n self.core.set_input_value('종목코드', code)\n self.core.comm_rq_data(self.rq_name, self.tr_code, prev_next, screen_no)\n\n self.tr_data = deepcopy(self.core.receive_tr_data_handler[self.tr_code][screen_no])\n\n return self.tr_data\n","sub_path":"tr_option/opt10066.py","file_name":"opt10066.py","file_ext":"py","file_size_in_byte":1412,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"45517598","text":"import math\n\nclass Solution:\n # 给定L = [232, 124, 456], k=7, return 114,\n # 给定一组树木,要求切割任意次之后相同长度的树木个数大于等于k,求最长切割后的长度\n\n def woodCut(self, nums, k):\n n = len(nums)\n if (nums is None or n == 0):\n return -1\n\n # 确定开始位置和结束位置\n start = 1\n end = max(nums)\n\n while(start + 1 < end):\n # 相邻就退出\n mid = start + (end - start) // 2\n if self.check_nums(nums, mid) == k:\n # 使用check_nums求在当前每段长度为mid时,可以将数组分成多少段\n # mid越大,return的值越小,找到结果为k的最大的mid(last_index)\n start = mid\n elif self.check_nums(nums, mid) > k:\n start = mid\n else:\n end = mid\n\n if self.check_nums(nums, start) == k:\n return start\n if self.check_nums(nums, end) == k:\n return end\n\n return 0\n\n def check_nums(self, nums, mid):\n count = 0\n for num in nums:\n count += num // mid\n return count\n\nif __name__ == '__main__':\n print(Solution().woodCut([232, 124, 456], 7))","sub_path":"专题学习/二分法/woodCut.py","file_name":"woodCut.py","file_ext":"py","file_size_in_byte":1275,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"94836747","text":"#!/usr/bin/python -i\n\n# Copyright (c)2010-2013 the Boeing Company.\n# See the LICENSE file included in this distribution.\n\n# A distributed example where CORE API messaging is used to create a session\n# distributed across the local server and one slave server. The slave server\n# must be specified using the '-s ' parameter, and needs to be\n# running the daemon with listenaddr=0.0.0.0 in the core.conf file.\n#\n\nimport datetime\nimport optparse\nimport sys\nfrom builtins import range\n\nimport core.nodes.base\nimport core.nodes.network\nfrom core import constants\nfrom core.api.tlv import coreapi, dataconversion\nfrom core.emulator.enumerations import CORE_API_PORT, EventTypes, EventTlvs, LinkTlvs, LinkTypes, MessageFlags\nfrom core.emulator.session import Session\nfrom core.nodes import ipaddress\n\n# node list (count from 1)\nn = [None]\n\n\ndef main():\n usagestr = \"usage: %prog [-h] [options] [args]\"\n parser = optparse.OptionParser(usage=usagestr)\n parser.set_defaults(numnodes=5, slave=None)\n\n parser.add_option(\"-n\", \"--numnodes\", dest=\"numnodes\", type=int,\n help=\"number of nodes\")\n parser.add_option(\"-s\", \"--slave-server\", dest=\"slave\", type=str,\n help=\"slave server IP address\")\n\n def usage(msg=None, err=0):\n sys.stdout.write(\"\\n\")\n if msg:\n sys.stdout.write(msg + \"\\n\\n\")\n parser.print_help()\n sys.exit(err)\n\n # parse command line options\n (options, args) = parser.parse_args()\n\n if options.numnodes < 1:\n usage(\"invalid number of nodes: %s\" % options.numnodes)\n if not options.slave:\n usage(\"slave server IP address (-s) is a required argument\")\n\n for a in args:\n sys.stderr.write(\"ignoring command line argument: '%s'\\n\" % a)\n\n start = datetime.datetime.now()\n\n prefix = ipaddress.Ipv4Prefix(\"10.83.0.0/16\")\n session = Session(1)\n if 'server' in globals():\n server.addsession(session)\n\n # distributed setup - connect to slave server\n slaveport = options.slave.split(':')\n slave = slaveport[0]\n if len(slaveport) > 1:\n port = int(slaveport[1])\n else:\n port = CORE_API_PORT\n print(\"connecting to slave at %s:%d\" % (slave, port))\n session.broker.addserver(slave, slave, port)\n session.broker.setupserver(slave)\n session.set_state(EventTypes.CONFIGURATION_STATE)\n tlvdata = coreapi.CoreEventTlv.pack(EventTlvs.TYPE.value, EventTypes.CONFIGURATION_STATE.value)\n session.broker.handlerawmsg(coreapi.CoreEventMessage.pack(0, tlvdata))\n\n switch = session.create_node(cls=core.nodes.network.SwitchNode, name=\"switch\")\n switch.setposition(x=80, y=50)\n num_local = options.numnodes / 2\n num_remote = options.numnodes / 2 + options.numnodes % 2\n print(\"creating %d (%d local / %d remote) nodes with addresses from %s\" % \\\n (options.numnodes, num_local, num_remote, prefix))\n for i in range(1, num_local + 1):\n node = session.create_node(cls=core.nodes.base.CoreNode, name=\"n%d\" % i, _id=i)\n node.newnetif(switch, [\"%s/%s\" % (prefix.addr(i), prefix.prefixlen)])\n node.cmd([constants.SYSCTL_BIN, \"net.ipv4.icmp_echo_ignore_broadcasts=0\"])\n node.setposition(x=150 * i, y=150)\n n.append(node)\n\n flags = MessageFlags.ADD.value\n session.broker.handlerawmsg(switch.tonodemsg(flags=flags))\n\n # create remote nodes via API\n for i in range(num_local + 1, options.numnodes + 1):\n node = core.nodes.base.CoreNode(session=session, _id=i, name=\"n%d\" % i, start=False)\n node.setposition(x=150 * i, y=150)\n node.server = slave\n n.append(node)\n node_data = node.data(flags)\n node_message = dataconversion.convert_node(node_data)\n session.broker.handlerawmsg(node_message)\n\n # create remote links via API\n for i in range(num_local + 1, options.numnodes + 1):\n tlvdata = coreapi.CoreLinkTlv.pack(LinkTlvs.N1_NUMBER.value, switch.id)\n tlvdata += coreapi.CoreLinkTlv.pack(LinkTlvs.N2_NUMBER.value, i)\n tlvdata += coreapi.CoreLinkTlv.pack(LinkTlvs.TYPE.value, LinkTypes.WIRED.value)\n tlvdata += coreapi.CoreLinkTlv.pack(LinkTlvs.INTERFACE2_NUMBER.value, 0)\n tlvdata += coreapi.CoreLinkTlv.pack(LinkTlvs.INTERFACE2_IP4.value, prefix.addr(i))\n tlvdata += coreapi.CoreLinkTlv.pack(LinkTlvs.INTERFACE2_IP4_MASK.value, prefix.prefixlen)\n msg = coreapi.CoreLinkMessage.pack(flags, tlvdata)\n session.broker.handlerawmsg(msg)\n\n session.instantiate()\n tlvdata = coreapi.CoreEventTlv.pack(EventTlvs.TYPE.value, EventTypes.INSTANTIATION_STATE.value)\n msg = coreapi.CoreEventMessage.pack(0, tlvdata)\n session.broker.handlerawmsg(msg)\n\n # start a shell on node 1\n n[1].client.term(\"bash\")\n\n print(\"elapsed time: %s\" % (datetime.datetime.now() - start))\n print(\"To stop this session, use the 'core-cleanup' script on this server\")\n print(\"and on the remote slave server.\")\n\n\nif __name__ == \"__main__\" or __name__ == \"__builtin__\":\n main()\n","sub_path":"daemon/examples/netns/distributed.py","file_name":"distributed.py","file_ext":"py","file_size_in_byte":5023,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"248783580","text":"from django.urls import path\nfrom django.conf.urls import url\nfrom . import views\n\nurlpatterns = [\n path('', views.home, name='homeProfessor'),\n path('schedule', views.agenda, name='schedule'),\n path('tarefa', views.TaskCreate.as_view(), name='tarefa'),\n path('nota//', views.nota, name='nota'),\n path('turmas', views.turmas, name='turmas'),\n path('alunos//', views.alunos, name='alunos'),\n path('tarefas//', views.tarefas, name='tarefas'),\n path('tarefasAluno//', views.tarefas_aluno, name='tarefasAluno'),\n path('notaTarefa//', views.nota_tarefa, name='notaTarefa'),\n path('alunosTarefa//', views.alunos_tarefa, name='alunosTarefa'), \n]","sub_path":"studyPlanner/professors/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":755,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"149126557","text":"from django.test import TestCase\nfrom django.core.management import call_command\n\nfrom core.utils import get_gene_codes\nfrom core.utils import get_voucher_codes\nfrom public_interface.models import TaxonSets\nfrom public_interface.models import GeneSets\nfrom public_interface.models import Genes\nfrom genbank_fasta import utils\n\n\nclass TestGenBankFastaUtils(TestCase):\n def setUp(self):\n args = []\n opts = {'dumpfile': 'test_db_dump.xml', 'verbosity': 0}\n cmd = 'migrate_db'\n call_command(cmd, *args, **opts)\n\n gs = GeneSets.objects.get(geneset_name='2genes')\n g = Genes.objects.get(gene_code='COI')\n g2 = Genes.objects.get(gene_code='16S')\n ts = TaxonSets.objects.get(taxonset_name='Erebia')\n self.cleaned_data = {\n 'gene_codes': [g, g2],\n 'taxonset': ts,\n 'voucher_codes': 'CP200-10\\r\\nCP100-11',\n 'geneset': gs,\n }\n\n def test_get_gene_codes(self):\n expected = 3\n result = get_gene_codes(self.cleaned_data)\n self.assertEqual(expected, len(result))\n\n def test_dataset_reading_frame_2(self):\n res = utils.Results(['CP100-10', 'CP100-11'], ['COI'])\n res.get_datasets()\n self.assertEqual('WAGMIGTSLSLIIRTELGNP', res.protein.splitlines()[1][0:20])\n\n def test_get_voucher_codes(self):\n expected = 3\n result = get_voucher_codes(self.cleaned_data)\n self.assertEqual(expected, len(result))\n\n def test_get_voucher_codes_dropped(self):\n self.cleaned_data['voucher_codes'] = 'CP100-10\\r\\n--CP100-11\\r\\nCP100-12'\n expected = 2\n result = get_voucher_codes(self.cleaned_data)\n self.assertEqual(expected, len(result))\n","sub_path":"voseq/genbank_fasta/tests/test_utils.py","file_name":"test_utils.py","file_ext":"py","file_size_in_byte":1722,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"85763244","text":"# Sofia Juarez Rodriguez\r\n# 10689184\r\n# Homework 3 dataprocessing\r\n\r\nimport csv\r\nimport json\r\n\r\n# Define in- and output files\r\ninfile = input(\"Origin file (between quotation marks):\")\r\noutfile = input(\"Exit file (between quotation marks):\")\r\n\r\ndef fromcsvtojson(infile, outfile):\r\n\t\r\n\t#Opens and reads input file and writes data in output file\r\n\tcsvfile = open(infile, 'r')\r\n\tjsonfile = open(outfile, 'w')\r\n\t\r\n\t#Define json fields\r\n\tfieldnames = (\"location\",\"indicator\",\"subject\",\"measure\",\"frequency\",\"time\",\"value\",\"flag_codes\")\r\n\treader = csv.DictReader(csvfile, fieldnames)\r\n\t\r\n\t#Write for every csv row an array in the output json file\r\n\tjsonfile.write(\"[\")\r\n\tfor row in reader:\r\n\t\tjson.dump(row, jsonfile)\r\n\t\tjsonfile.write(', \\n')\r\n\tjsonfile.write(\"]\")\r\n\r\n#Execute function\t\t\r\nfromcsvtojson(infile, outfile)","sub_path":"Homework/week_5/hw3convertCSV2JSON.py","file_name":"hw3convertCSV2JSON.py","file_ext":"py","file_size_in_byte":814,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"90805573","text":"import os\nfrom subprocess import call\n\nimport time\nfrom boto.s3.key import Key\nfrom flask import request, abort\nfrom flask_restful import Resource, reqparse\nfrom api import app, bucket, api, elasticsearch_service, mongo_client\nfrom api.tasks import transcribe\n\n\ndef make_key_name(filename):\n return 'recording/{}'.format(filename)\n\n\ndef make_s3_url(filename):\n return 'https://s3.amazonaws.com/minutes-api/recording/{}'.format(filename)\n\n\ndef upload_to_s3(bucket, filename, path):\n k = Key(bucket)\n k.key = make_key_name(filename)\n k.set_contents_from_filename(path)\n\n\ndef resample(file_path, resampled_file_path):\n call(['sox', file_path, '-b', '16', '-r', '16k', '-c', '1', resampled_file_path])\n\n\nclass Transcripts(Resource):\n\n def get(self):\n parser = reqparse.RequestParser()\n parser.add_argument('query')\n args = parser.parse_args()\n\n if args['query']:\n return elasticsearch_service.search(args['query'])\n else:\n return elasticsearch_service.get_most_recent()\n\n\nclass Recordings(Resource):\n\n def post(self):\n file = request.files['file']\n if file:\n timestamp = time.strftime(\"%Y%m%d-%H%M%S\")\n filename = '{}.wav'.format(timestamp)\n resampled_filename = '{}-16k.wav'.format(timestamp)\n filepath = os.path.join(app.config['UPLOAD_FOLDER'], filename)\n resampled_filepath = os.path.join(app.config['UPLOAD_FOLDER'], resampled_filename)\n file.save(filepath)\n resample(filepath, resampled_filepath)\n\n s3_url = make_s3_url(resampled_filename)\n\n recording = {\n 'user_id': 1,\n 's3_url': s3_url\n }\n recording_id = str(mongo_client.minutes.transcripts.insert_one(recording).inserted_id)\n\n upload_to_s3(bucket, resampled_filename, resampled_filepath)\n transcribe.delay(recording_id, s3_url)\n\n return {'id': recording_id}, 201\n abort(400, message='A file is required!')\n\napi.add_resource(Recordings, '/recordings')\napi.add_resource(Transcripts, '/transcripts')\n","sub_path":"api/resources.py","file_name":"resources.py","file_ext":"py","file_size_in_byte":2140,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"585390390","text":"#!/usr/local/bin/python3\n# -*- coding: utf-8 -*-\n\n\nclass Solution(object):\n def canTrans(self, word1, word2):\n l = len(word1)\n count = 0\n for i in range(l):\n if (word1[i] != word2[i]):\n count += 1\n if (count != 1):\n return False\n return True\n\n def findLadders(self, beginWord, endWord, wordList):\n end = -1\n for index, value in enumerate(wordList):\n if (value == endWord):\n end = index\n break\n if (end == -1):\n return []\n\n wordList.append(beginWord)\n l = len(wordList)\n wordMap = [[999999 for i in range(l)] for j in range(l)]\n print(wordMap)\n for i in range(l):\n for j in range(i, l):\n if (wordList[i] != wordList[j] and self.canTrans(wordList[i], wordList[j])):\n wordMap[i][j] = wordMap[j][i] = 1\n print(wordMap)\n\ns = Solution()\nprint(s.findLadders('hit', 'cog', [\"hot\", \"dot\", \"dog\", \"lot\", \"log\", \"cog\"]))\n","sub_path":"126.py","file_name":"126.py","file_ext":"py","file_size_in_byte":1048,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"650389275","text":"# emoji-flag to ISO 639-1 language code.\n# see also:\n# - https://en.wikipedia.org/wiki/List_of_ISO_639-2_codes\n# - https://en.wikipedia.org/wiki/ISO_3166-1_alpha-2\n# - https://emojipedia.org/flags/\n# - https://wikitravel.org/en/Main_Page\n# - https://cloud.google.com/translate/docs/languages?hl=en\n# - https://tech.yandex.com/translate/doc/dg/concepts/api-overview-docpage/\n# - https://msdn.microsoft.com/en-us/library/hh456380.aspx\nimport os\nimport json\n\n\nclass Lang:\n def __init__(self):\n self.dict_flag = self.__open_json_file__('lang_flag.json')\n self.dict_google = self.__open_json_file__('lang_google.json')\n self.dict_ms = self.__open_json_file__('lang_ms.json')\n self.dict_yandex = self.__open_json_file__('lang_yandex.json')\n\n def __open_json_file__(self, file_name):\n try:\n dir_name = os.path.dirname(os.path.abspath(__file__))\n path = os.path.join(dir_name, file_name)\n with open(path) as f:\n return json.load(f)\n except:\n return None\n\n def __print_list_diff__(self, ln):\n if self.dict_flag is None:\n print('dict_flag is None')\n return\n\n if ln is None:\n print('input list is None')\n return\n\n for item in set(self.dict_flag.values()):\n if item not in ln:\n print('{} is not found in flag list.'.format(item))\n\n def get_lang(self, flag):\n if self.dict_flag is None:\n return None\n\n name = self.dict_flag.get(flag)\n if name is None or name == \"\":\n return None\n return name\n\n def get_google(self, lang):\n if self.dict_google is None:\n return None\n\n name = self.dict_google.get(lang)\n if name is None or name == \"\":\n return None\n return name\n\n def get_ms(self, lang):\n if self.dict_ms is None:\n return None\n\n name = self.dict_ms.get(lang)\n if name is None or name == \"\":\n return None\n return name\n\n def get_yandex(self, lang):\n if self.dict_yandex is None:\n return None\n\n name = self.dict_yandex.get(lang)\n if name is None or name == \"\":\n return None\n return name\n","sub_path":"chalicelib/lang.py","file_name":"lang.py","file_ext":"py","file_size_in_byte":2288,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"562418385","text":"# -*- coding: utf-8 -*-\nimport cv2\nimport numpy as np\n\n\ndef scaling_translation(img, a = 1, d = 1, tx=0, ty=0):\n H, W, C = img.shape\n \n # new H and w for output image\n H_new = int(H*d)\n W_new = int(W*a)\n out = np.zeros((H_new, W_new, C))\n \n # kernel\n K = np.matrix([[d, 0],[0, a]])\n T = np.matrix([tx, ty]).T\n adbc = a*d - 0*0\n \n for j in range(H_new):\n for i in range(W_new):\n pixel = (np.matmul(K, np.matrix([i, j]).T)/adbc - T).astype(np.int)\n if (pixel[1,0]=0) & (pixel[0,0] 14:\n ordinal += 1\n victim = ordinal % 3\n name = victims[victim][0]\n print(str(today) + ' - Assigning chores to ' + name)\n address = victims[victim][1]\n msg = MIMEText(payload.format(name))\n msg['From'] = 'chorebot@gcr.me'\n msg['To'] = address\n msg['Subject'] = 'Apartment Cleanup Reminder'\n with smtplib.SMTP() as sender:\n sender.connect()\n sender.send_message(msg)\n\nif __name__ == '__main__':\n main()\n","sub_path":"chorebot.py","file_name":"chorebot.py","file_ext":"py","file_size_in_byte":1021,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"297110787","text":"import numpy as np\nimport matplotlib.pyplot as plt\nimport capture\nimport game\nfrom game import Directions\n\nACTIONS = [Directions.STOP, Directions.NORTH, Directions.SOUTH, Directions.WEST, Directions.EAST]\n\ndef getLegalActionsVector(state, agentIndex):\n legalActions = state.getLegalActions(agentIndex)\n vector = np.zeros(5)\n for i in range(vector.size):\n vector[i] = 0 if ACTIONS[i] in legalActions else -1000\n\n return vector\n\n\ndef createMapRepresentation(state, agentIndex):\n \"\"\"\n Creates an image representation of the state that can be sent as an input to the CNN.\n One could picture this as a simplified image of the map in a given state, but instead of using\n multiple pixels for each object in the map (that is, an agent, a wall, ...), it will be represented\n with a single, one channel (black and white), pixel. \n \"\"\"\n IMG_ROWS = state.data.layout.height\n\n data = str(state.data).split(\"\\n\")\n data.pop()\n data.pop()\n\n representation = []\n rowIndex = 0\n for rowIndex in range(len(data)):\n representation.append([])\n for char in list(data[rowIndex]):\n representation[rowIndex].append(ord(char))\n\n representation = np.array(representation)\n\n # COLOR AGENTS:\n for agent in range(state.getNumAgents()):\n agentPosition = state.getAgentPosition(agent)\n agentState = state.getAgentState(agent)\n if agent == agentIndex and not agentState.isPacman:\n representation[IMG_ROWS - agentPosition[1] -1][agentPosition[0]] = 200\n elif agent == agentIndex and agentState.isPacman:\n representation[IMG_ROWS - agentPosition[1] -1][agentPosition[0]] = 220 \n elif state.isOnRedTeam(agentIndex) == state.isOnRedTeam(agent):\n representation[IMG_ROWS - agentPosition[1] -1][agentPosition[0]] = 150\n else:\n representation[IMG_ROWS - agentPosition[1] -1][agentPosition[0]] = 80\n \n if agentState.scaredTimer > 0 and not agentState.isPacman:\n representation[IMG_ROWS - agentPosition[1] -1][agentPosition[0]] += 10\n\n # USE THESE LINES IF YOU WANT TO CHECK THE IMAGE REPRESENTATION OF THE STATE,\n # SEEN BY THE AGENT THAT EXECUTES THE FUNCTION\n # plt.imshow(representation)\n # plt.show()\n\n representation = representation.reshape([1, representation.shape[0], representation.shape[1], 1])\n return representation\n","sub_path":"qlearnFunctions.py","file_name":"qlearnFunctions.py","file_ext":"py","file_size_in_byte":2410,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"370846423","text":"import csv\nimport os\nimport shutil\n\n\ndef classification():\n root_dir = \"../../data/XXXX_code/\"\n CWE = root_dir + \"CWE/\"\n if not os.path.exists(CWE):\n os.makedirs(CWE)\n\n with open(\"src/flawfinder-result.csv\", \"r\", encoding=\"utf-8\") as result_list:\n reader = csv.reader(result_list)\n for item in reader:\n if reader.line_num == 1:\n continue\n cwes = str(item[9])\n if cwes.find(\"!/\") != -1:\n cwes = cwes.replace(\"!/\", \"#\")\n if not os.path.exists(CWE + cwes):\n os.makedirs(CWE + cwes)\n name = str(item[0])\n shutil.copy(root_dir + name, CWE + cwes)\n elif cwes.find(\",\") != -1:\n for cwes_name in cwes.split(\",\"):\n cwes_name = cwes_name.strip()\n if not os.path.exists(CWE + cwes_name):\n os.makedirs(CWE + cwes_name)\n name = str(item[0])\n shutil.copy(root_dir + name, CWE + cwes_name)\n else:\n if not os.path.exists(CWE + cwes):\n os.makedirs(CWE + cwes)\n name = str(item[0])\n shutil.copy(root_dir + name, CWE + cwes)\n\n\nif __name__ == '__main__':\n classification()\n","sub_path":"CprojectCollection/download/classification.py","file_name":"classification.py","file_ext":"py","file_size_in_byte":1316,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"80952560","text":"\n\nfrom xai.brain.wordbase.nouns._flex import _FLEX\n\n#calss header\nclass _FLEXES(_FLEX, ):\n\tdef __init__(self,): \n\t\t_FLEX.__init__(self)\n\t\tself.name = \"FLEXES\"\n\t\tself.specie = 'nouns'\n\t\tself.basic = \"flex\"\n\t\tself.jsondata = {}\n","sub_path":"xai/brain/wordbase/nouns/_flexes.py","file_name":"_flexes.py","file_ext":"py","file_size_in_byte":226,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"198645051","text":"import argparse\nimport os.path as ops\nimport subprocess\n\nimport cv2\nimport numpy as np\nfrom imutils.object_detection import non_max_suppression\n\nimport sys, os\nsys.path.insert(0, os.getcwd())\n\nfrom tools.test_shadownet import *\n\nmin_conf = 0.5\nFFMPEG_CMD = \"ffmpeg -f rawvideo -pixel_format yuyv422 -video_size 848x477 -i\"\n\n\n\ndef init_args_recog():\n \"\"\"\n\n :return: parsed arguments and (updated) config.cfg object\n \"\"\"\n parser = argparse.ArgumentParser()\n parser.add_argument('--image', type=str,\n help='image to be read')\n parser.add_argument('--weights_path', type=str,\n help='Path to the pre-trained weights to use')\n parser.add_argument('-c', '--char_dict_path', type=str,\n help='Directory where character dictionaries for the dataset were stored')\n parser.add_argument('-o', '--ord_map_dict_path', type=str,\n help='Directory where ord map dictionaries for the dataset were stored')\n parser.add_argument('-v', '--visualize', type=args_str2bool, nargs='?', const=True,\n help='Whether to display images')\n parser.add_argument(\"--paddingX\", type=float, default=0.0,\n help=\"amount of padding to add to each border of ROI\")\n parser.add_argument(\"--paddingY\", type=float, default=0.0,\n help=\"amount of padding to add to each border of ROI\")\n\n return parser.parse_args()\n\ndef decode_predictions(scores, geometry):\n # grab the number of rows and columns from the scores volume, then\n # initialize our set of bounding box rectangles and corresponding\n # confidence scores\n (numRows, numCols) = scores.shape[2:4]\n rects = []\n confidences = []\n\n # loop over the number of rows\n for y in range(0, numRows):\n # extract the scores (probabilities), followed by the\n # geometrical data used to derive potential bounding box\n # coordinates that surround text\n scoresData = scores[0, 0, y]\n xData0 = geometry[0, 0, y]\n xData1 = geometry[0, 1, y]\n xData2 = geometry[0, 2, y]\n xData3 = geometry[0, 3, y]\n anglesData = geometry[0, 4, y]\n\n # loop over the number of columns\n for x in range(0, numCols):\n # if our score does not have sufficient probability,\n # ignore it\n if scoresData[x] < min_conf:\n continue\n\n # compute the offset factor as our resulting feature\n # maps will be 4x smaller than the input image\n (offsetX, offsetY) = (x * 4.0, y * 4.0)\n\n # extract the rotation angle for the prediction and\n # then compute the sin and cosine\n angle = anglesData[x]\n cos = np.cos(angle)\n sin = np.sin(angle)\n\n # use the geometry volume to derive the width and height\n # of the bounding box\n h = xData0[x] + xData2[x]\n w = xData1[x] + xData3[x]\n\n # compute both the starting and ending (x, y)-coordinates\n # for the text prediction bounding box\n endX = int(offsetX + (cos * xData1[x]) + (sin * xData2[x]))\n endY = int(offsetY - (sin * xData1[x]) + (cos * xData2[x]))\n startX = int(endX - w)\n startY = int(endY - h)\n\n # add the bounding box coordinates and probability score\n # to our respective lists\n rects.append((startX, startY, endX, endY))\n confidences.append(scoresData[x])\n\n # return a tuple of the bounding boxes and associated confidences\n return (rects, confidences)\n\ndef get_text_boxes(image, W, H):\n # define the two output layer names for the EAST detector model that\n # we are interested -- the first is the output probabilities and the\n # second can be used to derive the bounding box coordinates of text\n layerNames = [\n \"feature_fusion/Conv_7/Sigmoid\",\n \"feature_fusion/concat_3\"]\n \n # load the pre-trained EAST text detector\n print(\"[INFO] loading EAST text detector...\")\n net = cv2.dnn.readNet('frozen_east_text_detection.pb')\n \n # construct a blob from the image and then perform a forward pass of\n # the model to obtain the two output layer sets\n blob = cv2.dnn.blobFromImage(image, 1.0, (W, H),\n (123.68, 116.78, 103.94), swapRB=True, crop=False)\n net.setInput(blob)\n (scores, geometry) = net.forward(layerNames)\n \n # decode the predictions, then apply non-maxima suppression to\n # suppress weak, overlapping bounding boxes\n (rects, confidences) = decode_predictions(scores, geometry)\n boxes = non_max_suppression(np.array(rects), probs=confidences)\n return boxes\n\nif __name__ == '__main__':\n \"\"\"\n \n \"\"\"\n # init images\n args = init_args_recog()\n\n # detect text\n image_name = args.image\n print(\"Find the text in the image \" + args.image)\n\n file_ext = image_name[-4:]\n if(file_ext=='yuyv'):\n new_name = image_name[:-4]+'png'\n print('converting image to PNG format')\n cmd = FFMPEG_CMD +\" \"+ image_name + \" \"+ new_name\n #p = subprocess.Popen(['ls', '-l', '../'])\n p = subprocess.Popen(cmd, stdout=subprocess.PIPE, shell=True)\n print(p.communicate())\n image_name = new_name\n\n image = cv2.imread(image_name)\n orig = image.copy()\n\n #Crop the image for Netflix use case 480, 160\n W=480\n H=224\n image = image[0:224, 0:480]\n\n (origH, origW) = image.shape[:2]\n\n # set the new width and height and then determine the ratio in change\n # for both the width and height\n (newW, newH) = (W*2, H*2)\n rW = origW / float(newW)\n rH = origH / float(newH)\n\n # resize the image and grab the new image dimensions\n image = cv2.resize(image, (newW, newH), cv2.INTER_AREA)\n (H, W) = image.shape[:2]\n\n boxes = get_text_boxes(image, W,H)\n\n #save the image\n #Draw rectangles\n text_boxes = image.copy()\n for (startX, startY, endX, endY) in boxes:\n cv2.rectangle(text_boxes, (startX, startY), (endX, endY), (0, 255, 0), 2)\n cv2.imwrite(image_name[:-4]+'_out.jpg', text_boxes)\n\n #Order the boundboxes\n xSorted = boxes[np.argsort(boxes[:, 1]), :]\n print(\"BEFORE:::\")\n print(xSorted)\n\n n_row = 0\n boxes_order = []\n for i,box in enumerate(xSorted):\n if i == 0:\n box = np.concatenate((box,np.array([n_row])))\n print(box)\n boxes_order.append(box)\n continue\n if((box[1] - xSorted[i-1][1] < 4) and (box[1] - xSorted[i-1][1] > -4)):\n box = np.concatenate((box,np.array([n_row])))\n boxes_order.append(box)\n else:\n n_row = n_row+1\n box = np.concatenate((box,np.array([n_row])))\n boxes_order.append(box)\n boxes = sorted(boxes_order , key=lambda k: [k[4], k[0]])\n print(\"AFTER::\")\n print(boxes)\n\n #Read text for each box\n nRow=0\n out = \"\"\n for (startX, startY, endX, endY, row) in boxes:\n #cv2.rectangle(image, (startX, startY), (endX, endY), (0, 255, 0), 2)\n print(\"Reading text {}, {} \".format(startX, startY))\n \n #Read Text in the original image\n image = orig\n startX \t= int(startX/2)\n endX \t= int(endX/2)\n startY \t= int(startY/2)\n endY \t= int(endY/2)\n #endY = endY+1\n\n #add margin to boundbox\n dX = int((endX - startX) * args.paddingX)\n dY = int((endY - startY) * args.paddingY)\n \n print(\"Padding {}, {} \".format(dX, dY))\n\n # apply padding to each side of the bounding box, respectively\n startX = max(0, startX - dX)\n startY = max(0, startY - dY)\n endX = min(origW, endX + (dX * 2))\n endY = min(origH, endY + (dY * 2)) \n\n text_read = recognize_image(\n image[startY:endY, startX:endX], \n weights_path=args.weights_path,\n char_dict_path=args.char_dict_path,\n ord_map_dict_path=args.ord_map_dict_path,\n is_vis=args.visualize)\n\n if row == nRow:\n out = out + text_read + \" \"\n else:\n nRow = row\n out = out + '\\n' + text_read + \" \"\n\n print(\"OUTPUT\")\n print(out)\n #Draw rectangles\n #for (startX, startY, endX, endY) in boxes:\n # cv2.rectangle(image, (startX, startY), (endX, endY), (0, 255, 0), 2)\n\n\n # show the output image\n #cv2.imshow(\"Text Detection\", image)\n #cv2.waitKey(0)\n\n","sub_path":"my_ocr/text_recognition.py","file_name":"text_recognition.py","file_ext":"py","file_size_in_byte":8984,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"512982402","text":"\"\"\"\nMask R-CNN\nThe main Mask R-CNN model implemenetation.\n\nCopyright (c) 2017 Matterport, Inc.\nLicensed under the MIT License (see LICENSE for details)\nWritten by Waleed Abdulla\n\"\"\"\n\nimport os\nimport sys\nimport glob\nimport random\nimport math\nimport datetime\nimport itertools\nimport time\nimport json\nimport re\nimport logging\nfrom collections import OrderedDict\nimport numpy as np\nimport scipy.misc\nimport tensorflow as tf\nimport keras\nimport keras.backend as KB\nimport keras.layers as KL\nimport keras.initializers as KI\nimport keras.engine as KE\nimport keras.models as KM\nimport pprint \npp = pprint.PrettyPrinter(indent=2, width=100)\n\n# def get_layer_output(model, output_layer, model_input, training_flag = True):\n # _mrcnn_class = KB.function([model.input]+[KB.learning_phase()],\n # [model.layers[output_layer].output])\n # output = _mrcnn_class([model_input,training_flag])[0] \n # return output\n \n \ndef get_layer_output_1(model, model_input, output_layer, training_flag = True, verbose = True):\n _my_input = model_input + [training_flag]\n if verbose: \n print('/* Inputs */')\n for i, (input_layer,input) in enumerate(zip(model.input, model_input)):\n print('Input {:2d}: ({:40s}) \\t Input shape: {}'.format(i, input_layer.name, input.shape))\n \n model_outputs = [model.outputs[x] for x in output_layer]\n \n _mrcnn_class = KB.function(model.input, model_outputs) \n output = _mrcnn_class(model_input) \n \n if verbose:\n print('\\n/* Outputs */') \n for i, output_layer, output in zip (output_layer, model_outputs , output):\n print('Output {:2d}: ({:40s}) \\t Output shape: {}'.format(i, output_layer.name, output.shape))\n\n return output\n\n \n \ndef get_layer_output_2(model, model_input, training_flag = True, verbose = True):\n if verbose: \n print('/* Inputs */')\n for i, (input_layer,input) in enumerate(zip(model.input, model_input)):\n print('Input {:2d}: ({:40s}) \\t Input shape: {}'.format(i, input_layer.name, input.shape))\n\n get_output = KB.function(model.input , model.outputs)\n model_output = get_output(model_input) \n \n print(type(model_output))\n if verbose:\n print('\\n/* Outputs */') \n for i, (output_layer, output) in enumerate (zip (model.outputs , model_output)):\n print('Output {:2d}: ({:40s}) \\t Output shape: {}'.format(i, output_layer.name, output.shape))\n return model_output \n\n \nclass MyCallback(keras.callbacks.Callback):\n\n def __init__(self): \n\n return \n \n # , pool_shape, image_shape, **kwargs):\n # super(PyramidROIAlign, self).__init__(**kwargs)\n # self.pool_shape = tuple(pool_shape)\n # self.image_shape = tuple(image_shape)\n\n def on_epoch_begin(self, epoch, logs = {}) :\n print('\\n>>> Start epoch {} \\n'.format(epoch))\n pp = pprint.PrettyPrinter(indent=4)\n return \n\n def on_epoch_end (self, epoch, logs = {}): \n print('\\n>>>End epoch {} \\n'.format(epoch))\n pp = pprint.PrettyPrinter(indent=4) \n return \n\n def on_batch_begin(self, batch, logs = {}):\n print('\\n... Start training of batch {} size {} '.format(batch,logs['size']))\n pp = pprint.PrettyPrinter(indent=4)\n pp.pprint(self.model._feed_inputs)\n k_sess = KB.get_session()\n # self.model._feed_inputs[1].eval(session=k_sess)\n return \n \n def on_batch_end (self, batch, logs = {}): \n print('\\n... End training of batch {} '.format(batch,logs['loss']))\n pp = pprint.PrettyPrinter(indent=4)\n pp.pprint(logs)\n # i = 229\n # print('\\n shape of output layer: {} '.format(i)) ## , tf.shape(self.model.layers[i].output)))\n # for i in (self.model.input):\n # print('input type: {}'.format(i.get_shape()))\n # print(self.model.layers[229].output.eval())\n # layer_out = get_layer_output(self.model, 229, self.model.input, 1)\n # print('type of layer out is {} shape is {}'.format(type(layer_out), layer_out.shape))\n return \n \n def on_train_begin(self,logs = {}): \n pp = pprint.PrettyPrinter(indent=4)\n # i = 229\n # pp.pprint(self.model.layers[i].__dict__) \n # pp.pprint(self.model.layers[i]._inbound_nodes[0].__dict__) \n # pp.pprint(self.model.layers[i].layer.__dict__) \n # print('size of input {} type of input {}'.format(len(self.model.input), type(self.model.input)))\n\n print('\\n ***** Start of Training {} '.format(time.time()))\n return \n \n def on_train_end (self,logs = {}): \n pp = pprint.PrettyPrinter(indent=4) \n # pp.pprint(self.model.__dict__)\n print('\\n'*3)\n # pp.pprint(dir(self.model))\n print('***** End of Training {} '.format(time.time())) \n return \n","sub_path":"mrcnn/callbacks.py","file_name":"callbacks.py","file_ext":"py","file_size_in_byte":5046,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"376819107","text":"# coding: utf-8\n\n# Copyright (C) 2019 Google Inc.\n# Licensed under http://www.apache.org/licenses/LICENSE-2.0 \n\n\"\"\"Tests for /query api endpoint.\"\"\"\n\nimport datetime\nimport operator\nfrom ggrc.models import all_models\nfrom freezegun import freeze_time\nfrom integration.ggrc import TestCase\nfrom integration.ggrc.api_helper import Api\nfrom integration.ggrc.models import factories\n\n\nclass TestAuditDeprecated(TestCase):\n \"\"\"Test for correct working field last_deprecated_date \"\"\"\n\n def setUp(self):\n super(TestAuditDeprecated, self).setUp()\n self.api = Api()\n\n def test_redefine_status(self):\n \"\"\"Test create audit and change status to Deprecated\"\"\"\n audit = factories.AuditFactory()\n\n with freeze_time(\"2017-01-25\"):\n self.api.modify_object(audit, {\n \"status\": \"Deprecated\"\n })\n\n audit_result = all_models.Audit.query.filter(\n all_models.Audit.id == audit.id\n ).one()\n\n self.assertEquals(audit_result.last_deprecated_date,\n datetime.date(2017, 1, 25))\n\n def test_keep_date_unchanged(self):\n \"\"\"Test set status audit to Deprecated, and then set status to Planned\"\"\"\n audit = factories.AuditFactory()\n\n with freeze_time(\"2017-01-25\"):\n self.api.modify_object(audit, {\n \"status\": \"Deprecated\"\n })\n\n with freeze_time(\"2017-01-26\"):\n self.api.modify_object(audit, {\n \"status\": \"Planned\"\n })\n\n audit_result = all_models.Audit.query.filter(\n all_models.Audit.id == audit.id\n ).one()\n\n self.assertEquals(audit_result.status, \"Planned\")\n self.assertEquals(audit_result.last_deprecated_date,\n datetime.date(2017, 1, 25))\n\n def test_repeat_deprecated_state(self):\n \"\"\"Test set status audit to Deprecated, then to Planned,\n then to Deprecated and then to Planned\"\"\"\n audit = factories.AuditFactory()\n\n with freeze_time(\"2017-01-25\"):\n self.api.modify_object(audit, {\n \"status\": \"Deprecated\"\n })\n\n with freeze_time(\"2017-01-26\"):\n self.api.modify_object(audit, {\n \"status\": \"Planned\"\n })\n with freeze_time(\"2017-02-25\"):\n self.api.modify_object(audit, {\n \"status\": \"Deprecated\"\n })\n with freeze_time(\"2017-02-26\"):\n self.api.modify_object(audit, {\n \"status\": \"Planned\"\n })\n\n audit_result = all_models.Audit.query.filter(\n all_models.Audit.id == audit.id\n ).one()\n\n self.assertEquals(audit_result.status, \"Planned\")\n self.assertEquals(audit_result.last_deprecated_date,\n datetime.date(2017, 2, 25))\n\n def test_filter_by_deprecated_date(self):\n \"\"\"Test filter audits by last deprecated date\"\"\"\n amount_of_audits = 5\n list_of_ids = []\n with factories.single_commit():\n with freeze_time(\"2017-01-25\"):\n for _ in range(amount_of_audits):\n list_of_ids.append(\n factories.AuditFactory(status=\"Deprecated\").id\n )\n\n query_request_data = [{\n \"object_name\": \"Audit\",\n 'filters': {\n 'expression': {\n 'left': 'Last Deprecated Date',\n 'op': {'name': '='},\n 'right': \"2017-01-25\",\n },\n },\n 'type': 'ids',\n }]\n\n result = self.api.send_request(self.api.client.post,\n data=query_request_data,\n api_link=\"/query\")\n self.assertItemsEqual(list_of_ids, result.json[0][\"Audit\"][\"ids\"])\n\n def test_sort_by_deprecated_date(self):\n \"\"\"Test sorting results of filter audits by deprecated date\"\"\"\n dict_of_dates = {}\n date_list = [\"2017-01-25\", \"2017-01-29\", \"2017-01-02\", \"2017-01-26\"]\n with factories.single_commit():\n for date in date_list:\n with freeze_time(date):\n dict_of_dates[factories.AuditFactory(status=\"Deprecated\").id] = date\n\n sorted_dict = sorted(dict_of_dates.items(), key=operator.itemgetter(1))\n sorted_list_ids = [item[0] for item in sorted_dict]\n\n query_request_data = [{\n \"object_name\": \"Audit\",\n 'filters': {\n 'expression': {\n 'left': 'Last Deprecated Date',\n 'op': {'name': '='},\n 'right': \"2017-01\",\n },\n },\n \"order_by\": [{\"name\": \"last_deprecated_date\"}],\n 'type': 'ids',\n }]\n\n result = self.api.send_request(self.api.client.post,\n data=query_request_data,\n api_link=\"/query\")\n\n self.assertItemsEqual(sorted_list_ids, result.json[0][\"Audit\"][\"ids\"])\n","sub_path":"test/integration/ggrc/services/test_audit_deprecated.py","file_name":"test_audit_deprecated.py","file_ext":"py","file_size_in_byte":4607,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"143409773","text":"import os\nimport json\nimport logging\nimport pytest\nimport yaml\nfrom tycho.client import TychoClient\nfrom tycho.test.lib import client\nfrom tycho.test.lib import system\nfrom tycho.test.lib import system_request\nfrom unittest import mock\n\nlogger = logging.getLogger (__name__)\n\ndef test_client_start (mocker, system_request, client, request):\n print (f\"{request.node.name}\")\n response = {\n \"status\": \"success\",\n \"result\": {\n \"name\": \"jupyter-ds-caa94baea8a849d89e427bd78cad17eb\",\n \"sid\": \"caa94baea8a849d89e427bd78cad17eb\",\n \"containers\": {\n \"jupyter-datascience\": {\n \"ip_address\": \"127.0.0.1\",\n \"port-1\": 32661\n }\n },\n \"conn_string\": \"\",\n },\n \"message\": \"Started system jupyter-ds-caa94baea8a849d89e427bd78cad17eb\"\n }\n with mock.patch.object(TychoClient, 'request', return_value=response):\n tycho_system = client.start (system_request)\n result = response['result']\n jupyter = result['containers']['jupyter-datascience']\n assert tycho_system.status == 'success'\n assert tycho_system.name == result['name']\n assert tycho_system.identifier == result['sid']\n assert tycho_system.services[0].ip_address == jupyter['ip_address']\n assert tycho_system.services[0].port == jupyter['port-1']\n assert tycho_system.message == response['message']\n","sub_path":"tycho/test/test_client.py","file_name":"test_client.py","file_ext":"py","file_size_in_byte":1459,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"591727456","text":"from datetime import datetime\nfrom time import sleep\n\nimport pandas as pd\n\nimport configs\nfrom web_scrap_data.web_scrap_stats import get_coronavirus_cases_table, save_df_to_json_file\n\n\ndef build_timestamps_dataframe_from_web_archive():\n url = 'http://web.archive.org/cdx/search/cdx?url=https://www.worldometers.info/coronavirus/&output=txt'\n urls_df = pd.read_csv(url, sep=' ')\n urls_df.columns = [0, 'timestamp', 0, 0, 'status_code', 0, 0]\n urls_df = urls_df[['timestamp', 'status_code']]\n urls_df = urls_df[urls_df['status_code'] == '200']\n return urls_df\n\n\ndef get_valid_urls_by_dates_from_timestamps_dataframe(df):\n all_valid_urls_by_date = {}\n df.sort_values(by=['timestamp'], ascending=False, inplace=True)\n\n unique_dates = []\n for (_, row) in df.iterrows():\n timestamp = row[0]\n valid_date = str(timestamp)[:-6]\n if valid_date not in unique_dates:\n unique_dates.append(valid_date)\n\n valid_url = f'https://web.archive.org/web/{valid_date}/https://www.worldometers.info/coronavirus/'\n all_valid_urls_by_date[valid_date] = valid_url\n\n return all_valid_urls_by_date\n\n\ndef scrap_legacy_stats_tables(urls_by_dates):\n length = len(urls_by_dates)\n counter = 1\n for (date, url) in urls_by_dates.items():\n stats_df = get_coronavirus_cases_table(url, legacy=True)\n\n valid_date_for_path = datetime.strptime(date, '%Y%m%d').strftime('%d.%m.%Y')\n json_save_path = f'{configs.json_save_path}\\coronavirus_stats-{valid_date_for_path}.json'\n save_df_to_json_file(stats_df, json_save_path)\n\n print(f'{counter}/{length} ---- json file created for date {valid_date_for_path} - {url}')\n counter += 1\n sleep(0.2)\n print('saving legacy stats json files finished.')\n\n\nif __name__ == '__main__':\n timestamps_df = build_timestamps_dataframe_from_web_archive()\n urls_by_dates_for_scarp_tables = get_valid_urls_by_dates_from_timestamps_dataframe(timestamps_df)\n scrap_legacy_stats_tables(urls_by_dates_for_scarp_tables)\n\n\n\n","sub_path":"web_scrap_data/web_scrap_legacy_stats.py","file_name":"web_scrap_legacy_stats.py","file_ext":"py","file_size_in_byte":2050,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"407614893","text":"from django.conf.urls import patterns, include, url\nfrom django.contrib import admin\n\nurlpatterns = patterns('',\n # Examples:\n # url(r'^$', 'rescuingpaws.views.home', name='home'),\n # url(r'^blog/', include('blog.urls')),\n\n url(r'^admin/', include(admin.site.urls)),\n #url(r'^$', 'pets.views.home'),\n #url(r'^$', HomePageView.as_view(), name='home'),\n #url(r'^formset$', DefaultFormsetView.as_view(), name='formset_default'),\n #url(r'^form$', DefaultFormView.as_view(), name='form_default'),\n #url(r'^form_by_field$', DefaultFormByFieldView.as_view(), name='form_by_field'),\n #url(r'^form_horizontal$', FormHorizontalView.as_view(), name='form_horizontal'),\n #url(r'^form_inline$', FormInlineView.as_view(), name='form_inline'),\n #url(r'^form_with_files$', FormWithFilesView.as_view(), name='form_with_files'),\n #url(r'^pagination$', PaginationView.as_view(), name='pagination'),\n #url(r'^misc$', MiscView.as_view(), name='misc'),\n url(r'', include('social.apps.django_app.urls', namespace='social')),\n url(r'^$', 'pets.views.login'),\n url(r'^home/$', 'pets.views.home'),\n url(r'^logout/$', 'pets.views.logout'),\n\n)\n","sub_path":"rescuingpaws/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1170,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"433696228","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n\"\"\"Evaluates one homework.\n\nUsage:\n ./commander.py directory - where directory contains (see submit.py)\n `archive.zip' `tests.zip' `config'\n\n\nThe script parses config and invokes vm_executor with\nthe requiered arguments.\n\nVMExecutor excepts files in `executor_jobs' so it's not safe\nto run two instances of commander simultaneously.\n\nWhen done the vmchecker.callback module passing in files retrieved\nfrom the vm or constructed on the commander.\n\"\"\"\n\nfrom __future__ import with_statement\n\n# Use simplejson or Python 2.6 json, prefer simplejson.\ntry:\n import simplejson as json\nexcept ImportError:\n import json\n\n\nimport ConfigParser\nimport time\nimport sys\nimport os\nfrom subprocess import Popen\n\nfrom . import callback\nfrom . import vmlogging\nfrom .paths import submission_config_file\nfrom .config import CourseConfig\n\n_logger = vmlogging.create_module_logger('commander')\n\n_FILES_TO_SEND = (\n 'vmchecker-stderr.vmr',\n 'build-stdout.vmr',\n 'build-stderr.vmr',\n 'run-stdout.vmr',\n 'run-stderr.vmr',\n 'run-km.vmr',\n 'grade.vmr',)\n_EXECUTOR_OVERHEAD = 300\n\n\n\ndef _run_callback(bundle_dir):\n \"\"\"Runs callback script to upload results\"\"\"\n abs_files = (os.path.join(bundle_dir, f) for f in _FILES_TO_SEND)\n callback.run_callback(submission_config_file(bundle_dir), abs_files)\n\n\n\n\n\ndef _run_executor(json_cfg_fname, executor_job_dir, assignment, timeout):\n \"\"\"Starts a job.\n\n Allthough it would be nicer to import vm-executor and just invoke\n methods from there, the vm-executor can get stuck (due to fauly\n pyvix interaction with the vmware server 1 daemon). We're already\n called directly from queue-manager. If we get stuck too, the whole\n tester-side vmchecker gets stuck.\n\n Because of this, vm-executor is launched in a separate process.\n \"\"\"\n\n # get the current time\n start = time.time()\n deadline = start + int(timeout) + _EXECUTOR_OVERHEAD\n\n\n # first just try to open the process\n try:\n cmd = ['vmchecker-vm-executor', json_cfg_fname]\n _logger.info('Begin homework evaluation. Calling %s', cmd)\n\n popen = Popen(cmd)\n\n with open(os.path.join(executor_job_dir, 'grade.vmr'), 'w') as handler:\n print >> handler, 'ok'\n except OSError:\n _logger.exception('Cannot invoke VMExecutor.')\n with open(os.path.join(executor_job_dir, 'vmchecker-stderr.vmr'), 'a') as handler:\n print >> handler, 'Cannot run VMExecutor.'\n print >> handler, 'Please contact the administrators.'\n # if we cannot open the process, there is nothing more to be done\n return\n\n\n\n\n # waits for the the process to finish\n try:\n counter = 0\n while time.time() < deadline:\n counter += 1\n exit_code = popen.poll()\n\n if exit_code is None:\n # if process has not finished => continue to sleep\n _logger.debug('-- VMExecutor sleeping for 5 seconds, '\n 'exit_code is None: x=%d', counter)\n # polls every 5 seconds\n time.sleep(5)\n else:\n with open(os.path.join(executor_job_dir, 'vmchecker-stderr.vmr'), 'a') as handler:\n print >> handler, 'VMExecutor returned %d (%s)' % (\n exit_code, ['success', 'error'][exit_code < 0])\n\n # no reason to stay in the loop after process exit terminates\n popen = None\n return\n else:\n _logger.error(\"VMChecker timeouted on assignment `%s'\" % assignment)\n with open(os.path.join(executor_job_dir, 'vmchecker-stderr.vmr'), 'a') as handler:\n print >> handler, \"\"\"\\ VMExecutor successfuly started,\n but it's taking too long. Check your sources,\n makefiles, etc and resubmit. If the problem\n persists please contact administrators.\"\"\"\n except:\n _logger.exception('Exception after starting VMExecutor.')\n with open(os.path.join(executor_job_dir, 'vmchecker-stderr.vmr'), 'a') as handler:\n print >> handler, \"\"\"\\ Error after starting VMExecutor.\n If the problem persists please contact\n administrators.\"\"\"\n finally:\n # release any leftover resources\n try:\n if not popen is None:\n import signal\n os.kill(popen.pid, signal.SIGTERM)\n # can't do \"popen.kill()\" here because that's\n # python2.6 speciffic :(\n except:\n _logger.exception('Nested exception in kill(PID=%d)', popen.pid)\n\n\n\n\ndef _check_required_files(path):\n \"\"\"Checks that a set of files required by commander is present in\n the given path.\"\"\"\n found_all = True\n needed_files = ['archive.zip', 'tests.zip', 'submission-config', 'machine-config']\n found_files = os.listdir(path)\n not_found = []\n for need in needed_files:\n if not need in found_files:\n _logger.error('Could not find necessary file [%s] in [%s]' % (\n need, path))\n found_all = False\n not_found.append(need)\n if not found_all:\n raise IOError('Files ' + not_found + ' required for testing missing')\n\n\n\ndef _make_test_config(bundle_dir, assignment, mccfg, asscfg, tester_root_path):\n \"\"\"Returns an object with a configuration suitable for\n vm-executor\"\"\"\n timeout = asscfg.get(assignment, 'Timeout')\n machine = asscfg.get(assignment, 'Machine')\n kernel_messages_str = asscfg.get(assignment, 'KernelMessages')\n\n kernel_messages = True if int(kernel_messages_str) != 0 else False\n return {\n 'km_enable' : kernel_messages,\n 'host' : {\n 'vmx_path' : mccfg.get(machine, 'VMPath'),\n 'vmchecker_root' : tester_root_path,\n 'jobs_path' : bundle_dir,\n 'scripts_path' : bundle_dir},\n 'guest' : {\n 'username' : mccfg.get(machine, 'GuestUser'),\n 'password' : mccfg.get(machine, 'GuestPassword'),\n 'shell' : mccfg.get(machine, 'GuestShellPath'),\n 'root_path' : {\n 'native_style' : mccfg.get(machine, 'GuestBasePath'),\n 'shell_style' : mccfg.get(machine, 'GuestHomeInBash'),\n 'separator' : '/',\n },\n },\n 'test' : [\n {\n 'input' : ['archive.zip', 'tests.zip'],\n 'script' : ['build.sh'],\n 'output' : ['build-stdout.vmr', 'build-stderr.vmr'],\n 'timeout': int(timeout),\n },\n {\n 'input' : [],\n 'script' : ['run.sh'],\n 'output' : ['run-stdout.vmr', 'run-stderr.vmr'],\n 'timeout': int(timeout)\n }\n ]\n }\n\ndef _write_test_config(dst_file, bundle_dir, assignment, mccfg, asscfg, tester_root_path):\n \"\"\"Write the test configuration to a json file to be passed in to\n the vm-executor\"\"\"\n with open(dst_file, 'w') as handler:\n testcfg = _make_test_config(bundle_dir, assignment, mccfg, asscfg, tester_root_path)\n testcfg_str = json.dumps(testcfg)\n handler.write(testcfg_str)\n\n\ndef _get_assignment_id(bundle_dir):\n \"\"\"Reads the assignment identifier from the config file of the\n submission from bundle_dir\"\"\"\n with open(submission_config_file(bundle_dir)) as handle:\n config = ConfigParser.RawConfigParser()\n config.readfp(handle)\n assignment = config.get('Assignment', 'Assignment')\n return assignment\n\ndef _get_machine_config(bundle_dir):\n \"\"\"Returns a parser for the machine-config in the bundle dir\"\"\"\n return CourseConfig(os.path.join(bundle_dir, 'machine-config'))\n\n\ndef prepare_env_and_test(tester_root_path, bundle_dir):\n \"\"\"Prepare testing environment for vm-executor, create a config\n file and run vm-executor\"\"\"\n\n _check_required_files(bundle_dir)\n\n assignment = _get_assignment_id(bundle_dir)\n mccfg = _get_machine_config(bundle_dir)\n asscfg = mccfg.assignments()\n timeout = asscfg.get(assignment, 'Timeout')\n\n\n json_cfg_fname = os.path.join(bundle_dir, 'vm_executor_config.json')\n _write_test_config(json_cfg_fname, bundle_dir, assignment, mccfg, asscfg, tester_root_path)\n\n try:\n _run_executor(json_cfg_fname, bundle_dir, assignment, timeout)\n _run_callback(bundle_dir)\n except:\n _logger.exception('cannot run callback')\n _logger.info('all done')\n\n\ndef _print_usage():\n \"\"\"Prints a help string\"\"\"\n print >> sys.stderr, \"\"\"Usage:\n ./commander.py course_id directory - where directory contains:\n `archive.zip' `tests.zip' `config'\"\"\"\n","sub_path":"vmchecker/commander.py","file_name":"commander.py","file_ext":"py","file_size_in_byte":8819,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"306394220","text":"graph={}#所有邻居散列表\r\ngraph[\"yuepu\"]={}\r\ngraph[\"yuepu\"][\"heijiao\"]=5\r\ngraph[\"yuepu\"][\"haibao\"]=0\r\n\r\ngraph[\"heijiao\"]={}\r\ngraph[\"heijiao\"][\"jita\"]=15\r\ngraph[\"heijiao\"][\"jiazigu\"]=20\r\n\r\n\r\ngraph[\"haibao\"]={}\r\ngraph[\"haibao\"][\"jita\"]=30\r\ngraph[\"haibao\"][\"jiazigu\"]=35\r\n\r\ngraph[\"jita\"]={}\r\ngraph[\"jita\"][\"gangqin\"]=20\r\n\r\ngraph[\"jiazigu\"]={}\r\ngraph[\"jiazigu\"][\"gangqin\"]=10\r\n\r\ngraph[\"gangqin\"]={}\r\n\r\ninfinity=float(\"inf\")#无穷大\r\n#开销表\r\ncosts={}\r\ncosts[\"heijiao\"]=5\r\ncosts[\"haibao\"]=0\r\ncosts[\"jita\"]=infinity\r\ncosts[\"jiazigu\"]=infinity\r\ncosts[\"gangqin\"]=infinity\r\n\r\n#存储父节点的散列表\r\nparents={}\r\nparents[\"heijiao\"]=\"yuepu\"\r\nparents[\"haibao\"]=\"yuepu\"\r\nparents[\"jita\"]=None\r\nparents[\"jiazigu\"]=None\r\nparents[\"gangqin\"]=None\r\n\r\nprocessed=[]#用于记录处理过的节点\r\n\r\ndef find_lowest_cost_node(costs):#在未处理的节点中找出开销最低的节点\r\n\tlowest_cost=float(\"inf\")\r\n\tlowest_cost_node=None\r\n\tfor node in costs:\r\n\t\tcost=costs[node]\r\n\t\tif costnew_cost:\r\n\t\t\tcosts[n]=new_cost\r\n\t\t\tparents[n]=node\r\n\t\t\t#print(parents[n],costs[n])\r\n\tprocessed.append(node)\r\n\tnode=find_lowest_cost_node(costs)\r\n\t\r\n\tprint(\"原图散列表\")\r\n\tfor k,v in graph.items():\r\n\t\tprint(k,\"\t\",v)\r\n\tprint(\"\\n\")\r\n\t\r\n\tprint(\"开销列表\")\r\n\tfor k,v in costs.items():\r\n\t\tprint(k,\"\t\",v)\r\n\tprint(\"\\n\")\r\n\t\r\n\tprint(\"父节点列表\")\r\n\tfor k,v in parents.items():\r\n\t\tprint(k,\"\t\",v)\r\n\tprint(\"\\n\")","sub_path":"168206244/huangangqin.py","file_name":"huangangqin.py","file_ext":"py","file_size_in_byte":1900,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"576245502","text":"import string\ndef cleanword(str):\n ss = \"\"\n for letter in str:\n if letter not in string.punctuation:\n ss += letter\n return ss\n\n#print(cleanword(\"what what\"))\n# print(cleanword(\"'now'\"))\n# print(cleanword(\"?+='w-o-r-d!,@$()'\"))\n\ndef has_dashdash(str):\n length = len(str)\n for i in range(0, length):\n if str[i] == '-':\n if i < length - 1:\n if str[i + 1] == '-':\n return True\n return False\n\n# print(has_dashdash(\"distance--but\"))\n# print(not has_dashdash(\"several\"))\n# print(has_dashdash(\"spoken--\"))\n# print(not has_dashdash(\"-yo-yo-\"))\n\ndef extract_words(str):\n ss = \"\"\n for letter in str:\n if letter in string.punctuation:\n ss += \" \"\n else:\n ss += letter\n new_list = ss.split()\n num = -1\n for i in new_list:\n num += 1\n new_list[num] = i.lower()\n return new_list\n\n# print(extract_words(\"Now is the time! 'Now', is the time? Yes, now\"))\n# print(extract_words(\"she tried to curtsey as she spoke--fancy\"))\n\ndef wordcount(substr, words):\n count = 0\n for i in words:\n if i == substr:\n count += 1\n return count\n\n# ss = [\"now\", \"is\", \"time\", \"is\", \"now\", \"is\", \"is\"]\n# print(wordcount(\"now\", ss))\n# print(wordcount(\"is\", ss))\n# print(wordcount(\"time\", ss))\n# print(wordcount(\"frog\", ss))\n\n\ndef longestword(words):\n longest = 0\n sum = 0\n for i in words:\n sum = len(i)\n if sum > longest:\n longest = sum\n return longest\n\n# print(longestword([\"a\", \"apple\", \"pear\", \"grape\"]))\n# print(longestword([\"a\", \"am\", \"I\", \"be\"]))\n# print(longestword([\"this\", \"supercalifragilisticexpialidocious\"]))\n# print(longestword([]))\n","sub_path":"wordtools.py","file_name":"wordtools.py","file_ext":"py","file_size_in_byte":1723,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"10173274","text":"# mmm_talkingUI.py\r\n\r\nfrom appuifw import *\r\nfrom e32 import *\r\nfrom graphics import *\r\nfrom mmm_utils import *\r\n\r\ndef receiveLocation():\r\n ao_sleep(20)\r\n app.receivedLocationUI.run()\r\n app.redraw(None)\r\n \r\ndef shareLocation():\r\n app.confirmingShareUI.run()\r\n #app.loadMapUI.run()\r\n app.redraw(None)\r\n \r\nclass TalkingUI:\r\n def __init__(self):\r\n \r\n self.talking_title = u\"Talking\"\r\n self.talking_image = Image.open(\"C:\\\\Data\\\\Images\\\\talking1.jpg\")\r\n self.talking_menu = [\r\n (u\"Mute\", receiveLocation),\r\n (u\"Loud Speaker\", doNothing),\r\n (u\"Hold\", doNothing),\r\n (u\"Share Location\", shareLocation)]\r\n self.talking_lock = Ao_lock()\r\n self.loadingMap_image = Image.open(\"C:\\\\Data\\\\Images\\\\loadingMap.jpg\")\r\n\r\n def run(self):\r\n def talking_quit():\r\n self.talking_lock.signal()\r\n \r\n def talking_redraw(rect):\r\n app.body.blit(self.talking_image)\r\n \r\n saveState()\r\n app.screen = 'normal'\r\n app.title = self.talking_title\r\n app.menu = self.talking_menu\r\n app.redraw = talking_redraw\r\n app.redraw(None)\r\n app.exit_key_handler = talking_quit\r\n self.talking_lock.wait()\r\n restoreState()\r\n \r\n# Prompting\r\n#receivedLocation_image = Image.open(\"C:\\\\Data\\\\Images\\\\promptingToShare.jpg\")\r\n#receivedLocation_menu = [(u\"Confirm\", temp)]\r\n\r\n\r\n#loadingMap_menu = [\r\n# (u\"Mute\", doNothing),\r\n# (u\"Loud Speaker\", doNothing),\r\n# (u\"Hold\", doNothing)]\r\n\r\n","sub_path":"MaraudersMapMobile/release/version 0.5/c/resource/mmm_talkingUI.py","file_name":"mmm_talkingUI.py","file_ext":"py","file_size_in_byte":1578,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"375846688","text":"import asyncio\nimport json\n\nfrom aiohttp import web\nfrom funcy.colls import project\nfrom funcy.seqs import concat, distinct, flatten\n\nfrom app.sdk import sdk\n\nfrom .utils import prepare_bundle, prepare_varaibles\n\nWHITELISTED_ROOT_ELEMENTS = {\n \"launchContext\": lambda i: i[\"name\"],\n \"mapping\": lambda i: i[\"id\"],\n \"contained\": lambda i: i[\"id\"],\n \"sourceQueries\": lambda i: i.get(\"id\", i[\"localRef\"]),\n \"cqf-library\": lambda i: i[\"expression\"],\n}\n\nPROPOGATE_ELEMENTS = [\"itemContext\"]\n\n\n@sdk.operation([\"GET\"], [\"Questionnaire\", {\"name\": \"id\"}, \"$assemble\"])\nasync def assemble(operation, request):\n questionnaire = await sdk.client.resources(\"Questionnaire\").get(\n id=request[\"route-params\"][\"id\"]\n )\n root_elements = project(dict(questionnaire), WHITELISTED_ROOT_ELEMENTS.keys())\n questionnaire[\"item\"] = await assemble_questionnaire(\n questionnaire, questionnaire[\"item\"], root_elements\n )\n dict.update(questionnaire, root_elements)\n questionnaire.assembledFrom = questionnaire[\"id\"]\n del questionnaire[\"id\"]\n return web.json_response(questionnaire, dumps=lambda a: json.dumps(a, default=list))\n\n\nasync def load_sub_questionanire(root_elements, parent_item, item):\n if \"subQuestionnaire\" in item:\n sub = await sdk.client.resources(\"Questionnaire\").get(id=item.subQuestionnaire)\n\n varaibles = prepare_varaibles(item)\n\n sub = prepare_bundle(sub, varaibles)\n\n propogate = project(dict(sub), PROPOGATE_ELEMENTS)\n dict.update(parent_item, propogate)\n\n root = project(dict(sub), WHITELISTED_ROOT_ELEMENTS.keys())\n for key, value in root.items():\n uniqness = WHITELISTED_ROOT_ELEMENTS[key]\n current = root_elements.get(key, [])\n new = concat(current, value)\n root_elements[key] = distinct(new, uniqness)\n return sub[\"item\"]\n\n return item\n\n\nasync def assemble_questionnaire(parent, questionnaire_items, root_elements):\n with_sub_items = questionnaire_items\n while len([i for i in with_sub_items if \"subQuestionnaire\" in i]):\n with_sub_items_futures = (\n load_sub_questionanire(root_elements, parent, i) for i in with_sub_items\n )\n with_sub_items = list(flatten(await asyncio.gather(*with_sub_items_futures)))\n\n resp = []\n for i in with_sub_items:\n if \"item\" in i:\n i[\"item\"] = await assemble_questionnaire(i, i[\"item\"], root_elements)\n resp.append(i)\n return resp\n","sub_path":"app/operations/assemble.py","file_name":"assemble.py","file_ext":"py","file_size_in_byte":2493,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"613968831","text":"#!/usr/bin/env python3.7\n# -*- coding: utf-8 -*-\n#\n# RKI Data Parser\n#\n# Requires Python 3.7, pandas and dfply\n# You can install pandas and dfply with one command:\n# pip install pandas dfply\n#\n# Usage: python rki-update.py [RKI-filename.csv]\n\nprint(\"RKI Updater script\")\n\ntry:\n import sys\n import pandas as pd\n from dfply import *\nexcept:\n print(\"Requires pandas and dfply modules.\")\n print(\"You can install them with: pip install pandas dfply\")\n sys.exit(1)\n\nif len(sys.argv) != 2:\n print(\"Usage: python rki-update.py [RKI-filename.csv]\")\n sys.exit(1)\n\nprint(\"Reading csv:\", sys.argv[1])\n\ncsv = pd.read_csv(sys.argv[1], parse_dates=True)\n\n# split out dates\ncsv >>= separate(\n X.Refdatum,\n [\"year\", \"dash1\", \"month\", \"dash2\", \"day\"],\n sep=[4, 5, 7, 8, 10],\n convert=True,\n remove=False,\n) >> select(X.Bundesland, X.Landkreis, X.AnzahlFall, X.year, X.month, X.day)\n\n# Berlin\nfname = \"berlin-cases.csv\"\nprint(fname)\nberlin = (\n csv\n >> filter_by(X.Bundesland == \"Berlin\")\n >> group_by(X.Bundesland, X.year, X.month, X.day)\n >> summarize(cases=X.AnzahlFall.sum())\n >> arrange(X.year, X.month, X.day)\n)\n\nberlin.to_csv(fname, index=False, columns=[\"year\", \"month\", \"day\", \"cases\"])\n\n\n# München\nfname = \"munich-cases.csv\"\nprint(fname)\nmunich = (\n csv\n >> filter_by(X.Landkreis == \"SK München\")\n >> group_by(X.Landkreis, X.year, X.month, X.day)\n >> summarize(cases=X.AnzahlFall.sum())\n >> arrange(X.year, X.month, X.day)\n)\n\nmunich.to_csv(fname, index=False, columns=[\"year\", \"month\", \"day\", \"cases\"])\n\n\n# Heinsberg\nfname = \"heinsberg-cases.csv\"\nprint(fname)\nheinsberg = (\n csv\n >> filter_by(X.Landkreis == \"LK Heinsberg\")\n >> group_by(X.Landkreis, X.year, X.month, X.day)\n >> summarize(cases=X.AnzahlFall.sum())\n >> arrange(X.year, X.month, X.day)\n)\n\nheinsberg.to_csv(fname, index=False, columns=[\"year\", \"month\", \"day\", \"cases\"])\n\nprint(\"--Done!\")\n","sub_path":"scripts/rki-update.py","file_name":"rki-update.py","file_ext":"py","file_size_in_byte":1933,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"105791940","text":"\"\"\"\nCMU 48721 Fall 2019\nAssignment 3 part 1: \"The right time\"\nNote:\n\t1: DO NOT use \"print\" function, it will block the code, \n\t\tuse \"logger.info()\" instead.\n\t2: ob is a python list with the following items:\n\t\t0: hour (0-23)\n\t\t1: min (0-59)\n\t\t2: weekday (0-7, 0 is Monday)\n\t\t3: outdoor air temperature (C)\n\t\t4: outdoor air RH (%)\n\t\t5: diffuse dolar radiation (W/m2)\n\t\t6: direct solar radiation (W/m2)\n\t\t7-16: IAT of 10 zones (C)\n\t\t17-26: IAT cooling setpoint of 10 zones (C)\n\t\t27-36: PMV of 10 zones (-999 if no occupied)\n\t\t37: DX heating coil electric demand (W)\n\t\t38: HVAC total electric demand (W)\n\"\"\"\nimport gym, eplus_env, logging\n\nlogger = logging.getLogger('Ctrl-Tester');\nlogger.setLevel(logging.DEBUG);\nch = logging.StreamHandler()\nch.setLevel(logging.DEBUG);\nlogger.addHandler(ch);\nlogger.info('Running the simulation test...')\nlogger.info('Environment warm-up may take time.')\nenv = gym.make('CSL-short-cycle-v1');\nob, is_terminal = env.reset();\nlogger.info('Environment warm-up is done.')\ncycle_count = 9999;\n\nwhile is_terminal == False:\n# !!DO NOT change the above lines!!\n# The following lines are the baseline control strategy\n# You should implement your control strategy here\n\tall_pmvs = ob[27:37] # Get all PMVs\n\teff_pmvs = []; # Remove -999 from the PMVs\n\tfor pmv in all_pmvs:\n\t\tif pmv != -999:\n\t\t\teff_pmvs.append(pmv);\n\tmin_pmv = min(eff_pmvs) if len(eff_pmvs) > 0 else 0; # The min occupied PMV\n\tall_iats = ob[7:17];\n\tmin_iat = min(all_iats); # The min IAT\n\tdx_status = 1 if ob[37] > 0 else 0; # DX heating on/off state\n\tact = dx_status;\n\tif cycle_count >= 3:\n\t\t# Turn on heating if the conditions allow\n\t\tif min_pmv < -0.5 or min_iat < 18:\n\t\t\tact = 1;\n\t\telse:\n\t\t\tact = 0;\n\tif act == dx_status:\n\t\t# Remember cycle number\n\t\tcycle_count += 1;\n\telse:\n\t\tcycle_count = 1;\n\tlogger.info('This observation is: %s'%ob);\n\tob, is_terminal = env.step([act])\n# !!DO NOT change the following lines!!\nenv.end_env();","sub_path":"csl_short_cycle_starter.py","file_name":"csl_short_cycle_starter.py","file_ext":"py","file_size_in_byte":1919,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"454551569","text":"from django.dispatch import receiver\nfrom django_mailbox import signals as django_mailbox_signals\nfrom .models import Issue\n\nimport logging\nLOG = logging.getLogger(__name__)\n\n@receiver(django_mailbox_signals.message_received)\ndef email_message_received(sender, message, **kwargs):\n conn = sender.connection\n if conn.needs_emptying:\n # Ignore this message as we are waiting for the mailbox to empty\n return\n\n issue = Issue.objects.create(\n message=message,\n connection=conn,\n )\n\n try:\n issue.process()\n except:\n LOG.exception('failed processing issue #{}'.format(issue.id))\n import traceback; traceback.print_exc()\n raise\n","sub_path":"lightr/signalhandlers.py","file_name":"signalhandlers.py","file_ext":"py","file_size_in_byte":695,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"648674616","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Oct 15 10:09:42 2015\n\n@author: jack.gang\n\"\"\"\n\nimport time\nfrom EulerFunctions import isPrime#, findPrimeFactors\nimport numpy as np\n\nstart = time.clock()\n\n#def totient(n, primeList):\n# primeFactors = findPrimeFactors(n, primeList)\n# nonTotientList = []\n# for prime in primeFactors:\n# num = prime\n# multiplier = 2\n# while num < n:\n# if num not in nonTotientList:\n# nonTotientList.append(num)\n# num = multiplier*prime\n# multiplier += 1\n# return n - 1 - len(nonTotientList)\n\nprimeList = []\nanswer = 0\n# don't actually need to calculate quotient, the n's with the largest quotient are just products of prime numbers\nfor n in range(2,1000001):\n if isPrime(n):\n primeList.append(n)\n if np.prod(primeList) > 1000000:\n answer = np.prod(primeList[:-1])\n break\n# ratio = n / totient(n, primeList)\n# if ratio > maxRatio:\n# maxRatio = ratio\n# answer = n\n\nelapsed = time.clock() - start\n \nprint(str(answer) + \" found in \" + str(elapsed) + \" seconds\")\n","sub_path":"069 - Totient matximum.py","file_name":"069 - Totient matximum.py","file_ext":"py","file_size_in_byte":1127,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"70069548","text":"#!/usr/bin/python\n#\n# Copyright 2018-2022 Polyaxon, Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom typing import List, Union\n\nfrom fsspec import AbstractFileSystem\n\nfrom polyaxon.logger import logger\nfrom polyaxon.utils.list_utils import to_list\nfrom polyaxon.utils.path_utils import check_or_create_path\n\n\ndef download_file_or_dir(\n fs: AbstractFileSystem,\n path_from: str,\n path_to: str,\n is_file: bool,\n check_path: bool,\n):\n if check_path:\n is_file = fs.isfile(path_from)\n check_or_create_path(path_to, is_dir=not is_file)\n fs.download(rpath=path_from, lpath=path_to, recursive=not is_file)\n\n\ndef delete_file_or_dir(\n fs: AbstractFileSystem,\n subpath: Union[str, List[str]],\n is_file: bool = False,\n):\n subpath = to_list(subpath, check_none=True)\n for sp in subpath:\n try:\n fs.delete(path=sp, recursive=not is_file)\n except Exception as e:\n logger.info(\"Could not delete path %s\\nError: %s\", sp, e)\n","sub_path":"core/polyaxon/fs/manager.py","file_name":"manager.py","file_ext":"py","file_size_in_byte":1496,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"354779439","text":"\"\"\"\r\nGiven an array A of positive integers of size N, where each value represents number of chocolates in a packet.\r\nEach packet can have variable number of chocolates. There are M students, the task is to distribute chocolate \r\npackets such that :\r\n\r\n1. Each student gets one packet.\r\n2. The difference between the number of chocolates given to the students having packet with maximum chocolates\r\nand student having packet with minimum chocolates is minimum.\r\n\r\nInput:\r\n\r\nThe first line of input contains an integer T, denoting the number of test cases. Then T test cases follow. \r\nEach test case consists of three lines. The first line of each test case contains an integer N denoting the \r\nnumber of packets. Then next line contains N space separated values of the array A denoting the values of each\r\npacket. The third line of each test case contains an integer m denoting the no of students.\r\n\r\nOutput:\r\n\r\nFor each test case in a new line print the minimum difference.\r\n\r\nConstraints:\r\n\r\n1 <= T <= 100\r\n1 <=N<= 107\r\n1 <= Ai <= 1018\r\n1 <= M <= N\r\n\r\nExample:\r\n\r\nInput:\r\n2\r\n8\r\n3 4 1 9 56 7 9 12\r\n5\r\n7\r\n7 3 2 4 9 12 56\r\n3\r\n\r\nOutput:\r\n6\r\n2\r\n\r\nExplanation:\r\nTestcase 1: The minimum difference between maximum chocolates and minimum chocolates is 9-3=6\r\n\r\n\"\"\"\r\n\r\ndef ChocolateDifference(n,m,arr):\r\n \"\"\"\r\n This function returns the minimum difference between maximum chocolate and minimum chocolate that is being \r\n received by m students\r\n Input : n - number of elements in the arr\r\n m- number of students \r\n arr- containing n chocolate packet quantity \r\n returns : chocolate_diff - minimum difference between maximum chocolate and minimum chocolate that is being \r\n received by m students\r\n \r\n Time Complexity : O(nlogn) # for sorting \r\n \"\"\"\r\n chocolate_diff=pow(2,64)-1\r\n arr=sorted(arr)\r\n for i in range(len(arr)-m+1):\r\n current_diff=arr[i+m-1]-arr[i]\r\n chocolate_diff=min(chocolate_diff,current_diff)\r\n return chocolate_diff\r\n \r\n \r\n# enter the number of test cases\r\nt=int(input())\r\nwhile t>0:\r\n n=int(input()) # enter the size of the array\r\n input_arr=list(map(int, input().split(' ')[:n])) # get array elements for chocolate packet size \r\n m=int(input())# enter the number of students \r\n print(ChocolateDifference(n,m,input_arr))\r\n t-=1","sub_path":"CODING PRACTICE PROBLEMS/Arrays/9. Chocolate Difference.py","file_name":"9. Chocolate Difference.py","file_ext":"py","file_size_in_byte":2335,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"632260683","text":"import json\nimport pandas as pd\nimport urllib3\nimport numpy as np\nimport re\n\nhttp = urllib3.PoolManager()\nvotd = json.loads(http.request('GET',\"https://public.tableau.com/api/gallery?page=0&count=10000&galleryType=viz-of-the-day&language=any\").data)\ndf = pd.json_normalize(votd['items'], max_level=0)\n\n# initialise dataframes\nworkbook_df =[]\nattributions_df = []\n\nfor i in df.index:\n print(i)\n workbook_url = 'https://public.tableau.com/profile/api/single_workbook/' + votd['items'][i]['workbookRepoUrl']\n workbook = json.loads(http.request('GET',workbook_url).data)\n workbook = pd.json_normalize(workbook)\n \n if 'error.message' in workbook.columns:\n source_url = df['sourceUrl'][i]\n retry = re.search('/views/(.+?)/', source_url)\n if retry is not None:\n retry = retry.group(0)[7:-1]\n workbook_url = 'https://public.tableau.com/profile/api/single_workbook/' + retry\n workbook = json.loads(http.request('GET',workbook_url).data)\n workbook = pd.json_normalize(workbook)\n workbook['workbookRepoUrl'] = votd['items'][i]['workbookRepoUrl']\n\n if 'error.message' not in workbook.columns:\n attributions = pd.json_normalize(workbook['attributions'][0])\n attributions['workbookRepoUrl'] = votd['items'][i]['workbookRepoUrl']\n\n workbook_df.append(workbook)\n attributions_df.append(attributions)\n\n# see pd.concat documentation for more info\nworkbook_df = pd.concat(workbook_df)\nattributions_df = pd.concat(attributions_df)\n\n# join VOTD with workbook and attributions dataframes\ndf = pd.merge(df,workbook_df, on='workbookRepoUrl',how='left')\ndf = pd.merge(df,attributions_df, on='workbookRepoUrl',how='left')\n\n# remove columns that have been json_normalized to additional columns\ndel df['workbook']\ndel df['attributions']\n\n# if there are error messages remove them\nif 'error.message' in df.columns:\n del df['error.message']\n del df['error.id']\n\n# convert lists to comma seperated strings\ndf['types'] = [','.join(map(str, l)) for l in df['types']]\ndf['topics'] = [','.join(map(str, l)) for l in df['topics']]\ndf['badges'] = [','.join(map(str, l)) for l in df['badges']]\n\n# rename attribution columns\ndf.rename(columns={'authorProfileName_y':'attributed_authorProfileName'}, inplace=True)\ndf.rename(columns={'workbookName':'attributed_workbookName'}, inplace=True)\ndf.rename(columns={'authorDisplayName':'attributed_authorDisplayName'}, inplace=True)\ndf.rename(columns={'workbookViewName':'attributed_workbookViewName'}, inplace=True)\n\n# rename conflicts between gallery and workbook data\ndf.rename(columns={'authorProfileName_x':'authorProfileName'}, inplace=True)\ndf.rename(columns={'title_x':'gallery_title'}, inplace=True)\ndf.rename(columns={'description_x':'gallery_description'}, inplace=True)\ndf.rename(columns={'title_y':'viz_title'}, inplace=True)\ndf.rename(columns={'description_y':'viz_description'}, inplace=True)\n\ndf = df.drop_duplicates()\n\n# Save locally\n#df.to_csv('data/tableau_public_votd.csv', index=False)\nprint(df)\n","sub_path":"Python/get_votd_data.py","file_name":"get_votd_data.py","file_ext":"py","file_size_in_byte":3051,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"533999837","text":"#!/usr/bin/env python\n\nimport os, sys, shutil\nimport subprocess\n\nTHIS_DIR = os.path.dirname(os.path.abspath(__file__))\nDIST_DIR = os.path.join(THIS_DIR, \"dist\")\nsys.path.append(os.path.join(THIS_DIR, \"src\", \"Selenium2Library\"))\nsys.path.append(os.path.join(THIS_DIR, \"doc\"))\nsys.path.append(os.path.join(THIS_DIR, \"demo\"))\n\ndef main():\n clear_dist_folder()\n run_doc_gen()\n run_sdist()\n run_win_bdist()\n run_demo_packaging()\n\ndef clear_dist_folder():\n if os.path.exists(DIST_DIR):\n shutil.rmtree(DIST_DIR)\n os.mkdir(DIST_DIR)\n\ndef run_doc_gen():\n import generate\n generate.main()\n\ndef run_sdist():\n subprocess.call([\"python\", os.path.join(THIS_DIR, \"setup.py\"), \"sdist\", \"--formats=gztar,zip\"])\n\ndef run_win_bdist():\n if os.name == 'nt':\n subprocess.call([\"python\", os.path.join(THIS_DIR, \"setup.py\"), \"bdist\", \"--formats=wininst\", \"--plat-name=win32\"])\n subprocess.call([\"python\", os.path.join(THIS_DIR, \"setup.py\"), \"bdist\", \"--formats=wininst\", \"--plat-name=win-amd64\"])\n\ndef run_demo_packaging():\n import package\n package.main()\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"build_dist.py","file_name":"build_dist.py","file_ext":"py","file_size_in_byte":1132,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"88899841","text":"#-*- coding: utf-8 -*-\n#from sparta.lib.base import *\nfrom pylons import request, response, session, tmpl_context as c, url\nfrom pylons.controllers.util import abort, redirect\nfrom sparta.lib.base import BaseController, render\n\nfrom sparta.model import *\nfrom ArchivePages import *\n\nimport json\n\nlog = logging.getLogger(__name__)\n\n\n\nclass ProjectController(BaseController, ArchivePage):\n\t\n\tarchiveName = \"Project\"\n\t\n\t# View 페이지에서 사용할 쿼리용 컬럼 \n\tviewColStr = \"Project.Code,Project.Name,Project.Thumb,Project_Type.Name,Project_Stat.Name,Project.CreateDate,Project.DeadLine,Project.Content\"\n \n\t\n\tdef menu(self, id=None):\n\t\t\"\"\" 프로젝트 왼쪽 메뉴 \"\"\"\n\t\t\n\t\tproInfo = Archive(\"Project\").getValues(id, \"Name, Code\")\n\t\taccLevel = session[\"UserType\"] if CheckNotAdmin() else None\n\t\t\n\t\tmenuRoot = []\n\t\tregFolders = {}\n\t\tsv.Tree.getMenuItems(menuRoot, regFolders, \"Tree.ProjectIDX == %s\" % id)\n\t\t\n\t\t\n\t\t# >> 히든 어떻게좀 해라\n\t\t# 메뉴 구성 시작 --------------------------------------------------------------------\n\t\t#CmdRoot = {\"text\":u\"프로젝트 기능\", \"leaf\":False, \"expanded\":True}\n\t\t#CmdRoot[\"children\"] = [{\"text\" : u\"샷 추가\", \"leaf\":True, \"iconCls\":\"icon-plus\", \"click\":\"openShotWin(null,\"+id+\");\"},\n\t\t#\t\t\t\t\t\t{\"text\" : u\"스캔 인 등록\", \"leaf\":True, \"iconCls\":\"icon-plus\", \"click\":\"openPlugWin(null,\"+id+\");\"}]\n\t\t#menuRoot.append(CmdRoot)\n\t\t\n\t\t\n\t\t## 프로젝트 게시판 ------------------------\n\t\tBoardRoot = {\"text\":u\"프로젝트 게시판\", \"leaf\":False, \"expanded\":True, \"ntype\" : \"Board\"} \n\t\tBoardRoot[\"children\"] = []\n\t\t\n\t\tRDB = Archive(\"AdminBoard\").Search(columns=\"AdminBoard.BCode,AdminBoard.BName\", where=\"AdminBoard.ByProject == %s\" % id, order=\"AdminBoard.SortNumber ASC\", accfield=\"AdminBoard.AccessView\", acclevel=accLevel)\n\t\t\n\t\tfor board in RDB[1]:\n\t\t\tbDic = {\"text\": board[\"AdminBoard_BName\"], \"ntype\" : \"Board\", \"title\":board[\"AdminBoard_BName\"], \"leaf\":True, \"iconCls\":\"icon-comment\", \"taburl\":\"/board/list/\"+board[\"AdminBoard_BCode\"]}\n\t\t\tBoardRoot[\"children\"].append(bDic)\n\t\t\n\t\tif\tlen(BoardRoot[\"children\"]) > 0:\n\t\t\tmenuRoot.append(BoardRoot)\n\t\t\n\t\treturn jsonOutput(menuRoot)\n\t\n\tdef context_menu_tree(self, id):\n\t\t\n\t\tmenuRoot = []\n\t\tregFolders = {}\n\t\tsv.Tree.getMenuItems2(menuRoot, regFolders, \"Tree.ProjectIDX == %s,Tree.IsSystem == 0\" % id)\n\t\t\n\t\treturn jsonOutput(menuRoot)\n\n\n\tdef view2(self, id, id2=True):\n\t\t\n\t\tc.attrStore = self.custom_listup(id)\n\t\t\n\t\t# 게시판 표기\t\t \n\t\t#bData = Archive(\"AdminBoard\").getValues(\"AdminBoard.BType == 3,AdminBoard.ByProject == %s\" % id, \"AdminBoard.BName,AdminBoard.BCode\")\t\t \n\t\t#if (bData):\n\t\t#\t c.BoardID = bData[\"BCode\"] \n\t\t#\t c.BoardName = bData[\"BName\"]\n\t\t\n\t\t# 부모클래스의 원본 함수 실행\t\t\n\t\treturn ArchivePage.view2(self, id, id2)\n\n\t# 현재 사용 가능한(DONE이 아닌) 프로젝트 리스트 반환\n\tdef available(self):\n\t\tlist_q = Archive(\"Project\").getRecords(\"Project.StatCode != DON\", \"Project.IDX,Project.Name\")\n\t\t\n\t\tjsonList = {\"rows\": [{\"projectId\": u\"all\", \"projectName\": \"전체\"}]}\n\t\t\n\t\tfor row in list_q:\n\t\t\tjsonList[\"rows\"].append(dict(\n\t\t\t\tprojectId=row[\"IDX\"],\n\t\t\t\tprojectName=row[\"Name\"],\n\t\t\t))\n\t\t\n\t\treturn json.dumps(jsonList)\n","sub_path":"Sparta/sparta/controllers/project.py","file_name":"project.py","file_ext":"py","file_size_in_byte":3193,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"474150377","text":"# author: AI X\n\nfrom agents import BasicAgent\nfrom environment import Environment\nfrom numpy import matrix\nimport json\n\nwith open(\"environment.json\") as json_file:\n data = json.load(json_file)\nmyComplexMazeMatrix = matrix( data['environment'] )\n\nmyInitialAgentRobotPositionArray = [1,1]\nmyEnvironment = Environment( myComplexMazeMatrix, myInitialAgentRobotPositionArray, True)\nmyBasicAgent = BasicAgent()\nfor i in range(100000):\n if myEnvironment.is_the_maze_clean():\n break\n myBasicAgent.play(myEnvironment)\n\nprint('Basic Agent total reward:', myBasicAgent.total_reward)\n","sub_path":"vaccum_cleaner_simulation/simu.py","file_name":"simu.py","file_ext":"py","file_size_in_byte":589,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"452197769","text":"# -*- coding: utf-8 -*-\n\nfrom odoo import models, fields, api\n\n\nclass ZoneType1(models.Model):\n _name = 'archives.zone_type1'\n _description = u'无菜单档案:地区分类1'\n\n _sql_constraints = [\n ('name_unique',\n 'UNIQUE(name)',\n \"已存在同名地区分类1\"),\n ]\n\n name = fields.Char(string=u'地区分类1', required=True)\n\n @api.multi\n def copy(self, default=None):\n default = dict(default or {})\n\n copied_count = self.search_count(\n [('name', '=like', u\"Copy of {}%\".format(self.name))])\n if not copied_count:\n new_name = u\"Copy of {}\".format(self.name)\n else:\n new_name = u\"Copy of {} ({})\".format(self.name, copied_count)\n\n default['name'] = new_name\n return super(ZoneType1, self).copy(default)\n","sub_path":"archives/models/no_page/zone_type1.py","file_name":"zone_type1.py","file_ext":"py","file_size_in_byte":828,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"68709101","text":"from __future__ import division\r\nimport numpy\r\nimport matplotlib.pyplot as pyplot\r\nUSER = \"Leilah Mackenzie\"\r\nUSER_ID = \"gmxc22\"\r\n\r\ndef f(x) :\r\n '''The function f (x) = cos(x), as outlined in assessment task 1'''\r\n return numpy.cos(x)\r\ndef g(x) :\r\n '''Function returns the analytical derivative of f(x) w.r.t x'''\r\n return -1*numpy.sin(x)\r\ndef g_bdm(x, dx):\r\n '''Function estimates the derivative of f(x), denoted by f_derivative, w.r.t x using the backwards difference method'''\r\n f_derivative= (f(x) - f(x-dx))/dx\r\n return f_derivative\r\n\r\n#set up graph plot between -2Pi < x < 2Pi\r\nxs = numpy.linspace(-2*numpy.pi, 2*numpy.pi,100)\r\npyplot.figure(figsize=(8,4))\r\npyplot.xlabel(' x ')\r\npyplot.ylabel(' Error in derivative estimate')\r\npyplot.title('Analysis of the error for different dx values using the B.D. method')\r\n\r\n\r\n#dx chosen to be too small, too large and well chosen\r\nys = g_bdm(xs, dx = 1e-16) - g(xs)\r\npyplot.plot(xs, ys, color='blue', label='dx=e-16 (too small) ')\r\n\r\nys = g_bdm(xs, dx = 0.5) - g(xs)\r\npyplot.plot(xs, ys, color='red', label='dx=0.5 (too large)')\r\n\r\nys = g_bdm(xs, dx = 1e-5) - g(xs)\r\npyplot.plot(xs, ys, color='green', label='dx=e-5 (well chosen)')\r\npyplot.legend(prop={'size':9})\r\n\r\npyplot.show()\r\n\r\nANSWER1 = '''Python has a maximum precision of 10^-16. Any numbers stored that are smaller\r\nthan this lose accuracy, which is shown by the 'weird' blue plot. The red plot shows a significant\r\nerror when the interval dx is too large since the backward difference method is not representative\r\nof the gradient at that point'''\r\n","sub_path":"1 - backwards difference method derivatives.py","file_name":"1 - backwards difference method derivatives.py","file_ext":"py","file_size_in_byte":1579,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"47322355","text":"# -*- coding: utf-8 -*-\n\"\"\"\n\nThis module test the results on LFR networks\n\nExample:\n Execute the code to test on LFR network::\n\n $ python LFR.py\n\nResearch article is available at:\n http://...\n\n\"\"\"\n\nimport networkx as nx\nimport os\nimport time\nimport pickle\nfrom networkx.algorithms.community.modularity_max import * \nfrom networkx.algorithms.community import LFR_benchmark_graph\nfrom sklearn.metrics.cluster import adjusted_rand_score, normalized_mutual_info_score\nfrom heatmap import hist\nimport numpy as np\n\nfrom pylouvain import PyLouvain\nfrom run import multiscale\n\n\ndef LFR(n, tau1, tau2, mu, min_com_size, force = False):\n # enforce regeneration if force==True \n path = \"data/LFR/LFR_%d_%.2f_%.2f_%.2f.gpickle\" %(n, tau1, tau2, mu)\n if not force and os.path.isfile(path): \n G = nx.read_gpickle(path)\n return G\n else:\n G = LFR_benchmark_graph(n, tau1, tau2, mu, average_degree = 8, min_community = min_com_size, seed=0)\n print(\"write gpickle file\", path)\n nx.write_gpickle(G, path)\n return G\n\nif __name__ == \"__main__\":\n print(\"Start!\")\n verbose = True \n\n #=========== Global Parameters ===========#\n _network_size = 5000\n _min_com_size = 5 \n\n #=========== Generate Graph ==============#\n\n G = LFR(n = _network_size, tau1 = 3.0, tau2 = 1.5, mu = 0.25, min_com_size = _min_com_size, force = True)\n\n #G = LFR(n = 100, tau1 = 2.5, tau2 = 1.2, mu = 0.15)\n print(nx.info(G))\n\n print(\"get ground truth\")\n gnc = {frozenset(G.nodes[v]['community']) for v in G}\n map_comm = {v:i for i, c in enumerate(gnc) for v in c}\n gnc_list = [map_comm[k] for k in G.nodes()]\n\n sizes = [len(i) for i in gnc]\n gnc_sizes = sorted(sizes)\n verbose and print(\"ground truth community sizes=\", gnc_sizes)\n\n nodes = list(G.nodes())\n edges = [((a, b), 1) for a, b in G.edges()] \n\n #=========== Benchmark Fast Greedy ===============#\n print(\"Start Fast Greedy community detection\")\n\n #start = time.time()\n #commsFG = greedy_modularity_communities(G)\n #end = time.time()\n\n #commsFG_sizes = sorted([len(commsFG[i]) for i in range(len(commsFG))])\n #verbose and print(commsFG_sizes)\n\n #map_comm = {v:i for i, c in enumerate(commsFG) for v in c}\n #a = [map_comm[k] for k in G.nodes()]\n #print(\"FastGreedy Algorithm ARI=\", adjusted_rand_score(a, b), \"NMI=\", normalized_mutual_info_score(a, b))\n #print(\"which takes\", end - start, \"seconds\")\n\n #=========== Benchmark Louvain ===============#\n '''\n print(\"Start Louvain community detection\")\n\n #x, y1, y2, y3, y4 = [], [], [], [], []\n for gamma in np.linspace(0.5, 6.0, num=12):\n start = time.time()\n pyl = PyLouvain(nodes, edges)\n commsLV, q = pyl.apply_method(gamma)\n end = time.time()\n\n map_comm = {v:i for i, c in enumerate(commsLV) for v in c}\n LV_list = [map_comm[k] for k in G.nodes()]\n print(gamma)\n print(\"Louvain Algorithm ARI=\", adjusted_rand_score(LV_list, gnc_list), \"NMI=\", normalized_mutual_info_score(LV_list, gnc_list))\n print(\"which takes\", end - start, \"seconds\")\n print()\n\n #x.append(gamma)\n #y1.append(adjusted_rand_score(LV_list, gnc_list))\n #y2.append(normalized_mutual_info_score(LV_list, gnc_list))\n #y3.append(end - start)\n\n #pickle.dump({'x':x,'y1':y1,'y2':y2,'y3':y3}, open(\"tmp/save_louvain_generalized_modularity.p\", \"wb\" ) )\n '''\n\n #=========== Multi-scale Community Detection ===============#\n print(\"Start Multi-scale Community Detection\")\n\n x, y1, y2, y3, y4 = [], [], [], [], []\n for gamma in np.linspace(0.4, 0.9, num=11):\n\n print(gamma)\n\n start = time.time()\n commsMS = multiscale(nodes, edges, gamma, verbose = False)\n end = time.time()\n\n commsMS_sizes = sorted([len(commsMS[i]) for i in range(len(commsMS))])\n verbose and print(commsMS_sizes)\n verbose and print(len(commsMS_sizes))\n \n map_comm = {v:i for i, c in enumerate(commsMS) for v in c}\n MS_list = [map_comm[k] for k in G.nodes()]\n print(\"Multi-scale Algorithm ARI=\", adjusted_rand_score(MS_list, gnc_list), \"NMI=\", normalized_mutual_info_score(MS_list, gnc_list))\n print(\"which takes\", end - start, \"seconds\")\n print()\n\n x.append(gamma)\n y1.append(adjusted_rand_score(MS_list, gnc_list))\n y2.append(normalized_mutual_info_score(MS_list, gnc_list))\n y3.append(end - start)\n \n pickle.dump({'x':x,'y1':y1,'y2':y2,'y3':y3}, open(\"fig/save_louvain_generalized_modularity.p\", \"wb\" ) )\n exit(0)\n\n #============ Plot community sizes ==============#\n #print(\"Plot histogram of community sizes\")\n #sizes_distri = {\"Ground Truth\": gnc_sizes, \"Modularity\": commsFG_sizes, \"Multiscale\": commsMS_sizes}\n\n #pickle.dump(sizes_distri, open('save%d.p' % _network_size, 'wb'))\n #hist(sizes_distri, _network_size)\n","sub_path":"LFR_generate_result.py","file_name":"LFR_generate_result.py","file_ext":"py","file_size_in_byte":4934,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"407499906","text":"import collections\n\n#####\ndef validar1(lista):\n for i in range (0,len(lista)):\n if lista[i] == 1: \n print(\"No puedes vender todos los pantalones de una marca\")\n\n return False\n else:\n return True\n\ndef validar5(lista):\n for i in range (0,len(lista)):\n if lista[i] >= 5:\n print(\"No puedes vender mas de 5 pantalones de una marca\")\n\n return False\n else:\n return True\n\n\ndef validaciones(lista):\n pantalonesAVender = list(lista)\n Listamarcas=[]\n for i in pantalonesAVender:\n Listamarcas.append(i[0])\n\n dicmar = collections.Counter(Listamarcas)\n valpan=list(dicmar.values())\n pasar = validar1(valpan)\n\n ######Verifica que no sean mas de 5 de la misma marca\n pasar = validar5(valpan)\n return pasar\n\n\n###############################\n \n\n\n\ndef PedirTotales(inputList):\n ######\n N = inputList[0]\n X = inputList[1]\n return N,X\n\n\ndef CrearListaPantalones(N): \n inputPantalones = [] \n for i in range(N):\n try:\n arr = input(\"Marca y Precio del Pantalon\\n\").split()\n arr[1] = int(arr[1])\n except Exception:\n print(\"Solo se permite introducir numero en el precio\")\n\n inputPantalones.sort(key= lambda k: (k[1]), reverse=True )\n return inputPantalones\ninputList=[] \n\ntry:\n inputList = list(map(int,input(\"Cuantos pantalones tienes y cuantos quieres vender?\\n\").split()))\nexcept Exception:\n print(\"Solo se permite introducir numeros\")\n\n\n\nN,X = PedirTotales(inputList)\nlistapantalones = CrearListaPantalones(N)\n\nif validaciones(listapantalones)== False:\n print(0)\nelse:\n suma = 0\n for i in listapantalones[:N-X]:\n suma += i[1]\n count= N-X\n \n print(count, suma)\n","sub_path":"ago-dic-2020/ErickEscarcega/MegaPracticaDos.py","file_name":"MegaPracticaDos.py","file_ext":"py","file_size_in_byte":1768,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"124919529","text":"# -*- coding: utf-8 -*-\n\nimport os\nimport shutil\nimport time\n\n\nclass FileTools():\n\n @classmethod\n def file_exist(cls, fullname, default_path='./'):\n path, name, extension = cls.splitname(fullname)\n name0 = fullname + extension\n name1 = default_path + name\n name2 = default_path + name + extension\n\n if os.path.isfile(fullname):\n return fullname\n elif os.path.isfile(name0):\n return name0\n elif os.path.isfile(name1):\n return name1\n elif os.path.isfile(name2):\n return name2\n\n @classmethod\n def splitname(cls, fullname):\n fullname, extension = os.path.splitext(fullname)\n path, name = os.path.split(fullname)\n return path, name, extension\n\n @classmethod\n def newname(cls, fullname, default='../data/temp.npy'):\n path, name, extension = cls.splitname(fullname)\n dpath, dname, dext = cls.splitname(default)\n\n if extension == '':\n extension = dext\n\n if path == '':\n path = dpath\n\n os.makedirs(path, exist_ok=True)\n\n if name == '':\n name = dname\n\n if os.path.isfile(path + '/' + name + '0' + extension):\n i = 0\n newname = name + str(i)\n while os.path.isfile(path + '/' + newname + extension):\n i += 1\n newname = name + str(i)\n name = newname\n else:\n name = name + '0'\n\n return path + '/' + name + extension\n\n @classmethod\n def lastname(cls, fullname, default='../data/temp.npy'):\n path, name, extension = cls.splitname(cls.newname(fullname, default))\n return path + '/' + name[:-1] + str(int(name[-1]) - 1) + extension\n\n @classmethod\n def move(cls, files, dest, copy=False, verbose=False):\n changes = list()\n newlist = list()\n for fullname in files:\n path, name, extension = cls.splitname(fullname)\n dpath, dname, dextension = cls.splitname(dest)\n\n if (path == dpath and name[:len(dname)] == dname):\n newlist.append(fullname)\n else:\n newname = cls.newname(dest)\n if copy:\n shutil.copyfile(fullname, newname)\n if verbose:\n print(fullname + ' copy to ' + newname)\n else:\n os.rename(fullname, newname)\n if verbose:\n print(fullname + ' move to ' + newname)\n\n newlist.append(newname)\n changes.append([fullname, newname])\n\n return newlist, changes\n\n\nclass PromptTools():\n\n @classmethod\n def select_prompt(cls, options, message='Select from list'):\n print(message)\n for i, option in enumerate(options):\n print('%2d) %s' % (i, option))\n\n prompt = 'Enter option (0-%d): ' % (len(options) - 1)\n number = input(prompt)\n try:\n number = int(number)\n except:\n number = -1\n while 0 > number or number > len(options) - 1:\n number = input(prompt)\n try:\n number = int(number)\n except:\n number = -1\n return number\n\n @classmethod\n def yn_prompt(cls, message, default='y'):\n choices = '[Y]/n' if default.lower() in ('y', 'yes') else 'y/[N]'\n choice = input(\"%s (%s) \" % (message, choices)).lower()\n values = ('y', 'yes', '') if choices == '[Y]/n' else ('y', 'yes')\n while choice not in ('yes', 'y', '', 'no', 'n'):\n choice = input(\"%s (%s) \" % (message, choices)).lower()\n return choice.strip().lower() in values\n\n\nclass LogTools():\n\n def __init__(self, filename, if_exist='a'):\n self._file = filename\n\n def time_stamp(self, message=None, answer=None, style='%X'):\n with open(self._file, 'a') as f:\n if message is not None:\n f.write(time.strftime(style) + \" >> \" + message + '\\n')\n if answer is not None:\n f.write(' ' * 8 + ' << {} \\n'.format(answer))\n f.write('\\n')\n\n def annontate(self, comment, style='%X'):\n with open(self._file, 'a') as f:\n f.write(time.strftime(style) + \" ## \" + comment + '\\n')\n f.write('\\n')\n\n def block(self, *args, border='#', inside=' ', align='<', width=70):\n\n template = '{left}{:{i}{a}{w}.{w}}{right}'\n params = {'left': border + ' ',\n 'right': ' ' + border,\n 'i': inside,\n 'a': align,\n 'w': width - 4}\n\n with open(self._file, 'a') as f:\n f.write(border * width + '\\n')\n for line in args:\n f.write(template.format(line, **params) + '\\n')\n f.write(border * width + '\\n\\n')\n\n def underline(self, text, style='-'):\n with open(self._file, 'a') as f:\n f.write(text + '\\n')\n f.write(style * len(text) + '\\n\\n')\n\n def tabulated_lines(self, lines, tab=4, space=' '):\n with open(self._file, 'a') as f:\n for line in lines:\n f.write(space * tab + line + '\\n')\n f.write('\\n')\n","sub_path":"tools.py","file_name":"tools.py","file_ext":"py","file_size_in_byte":5243,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"87531127","text":"import mxnet as mx\nimport numpy as np\nfrom MultiBoxDetection import myMultiBoxDetection, BB8MultiBoxDetection\n\n\nclass MultiBoxMetric(mx.metric.EvalMetric):\n \"\"\"Calculate metrics for Multibox training \"\"\"\n def __init__(self, eps=1e-8):\n super(MultiBoxMetric, self).__init__('MultiBox')\n self.eps = eps\n self.num = 7\n self.ovp_thresh = 0.5\n self.use_difficult = False\n self.name = ['CrossEntropy', 'loc_SmoothL1', 'loc_MAE', 'loc_MAE_pixel', 'bb8_SmoothL1', 'bb8_MAE', 'bb8_MAE_pixel']\n self.reset()\n\n def reset(self):\n \"\"\"\n override reset behavior\n \"\"\"\n if getattr(self, 'num', None) is None:\n self.num_inst = 0\n self.sum_metric = 0.0\n else:\n self.num_inst = [0] * self.num\n self.sum_metric = [0.0] * self.num\n\n def update(self, labels, preds):\n \"\"\"\n :param preds: [cls_prob, loc_loss, cls_label, bb8_loss, loc_pred, bb8_pred,\n anchors, loc_label, loc_pred_masked, loc_mae, bb8_label, bb8_pred_masked, bb8_mae]\n Implementation of updating metrics\n \"\"\"\n def iou(x, ys):\n \"\"\"\n Calculate intersection-over-union overlap\n Params:\n ----------\n x : numpy.array\n single box [xmin, ymin ,xmax, ymax]\n ys : numpy.array\n multiple box [[xmin, ymin, xmax, ymax], [...], ]\n Returns:\n -----------\n numpy.array\n [iou1, iou2, ...], size == ys.shape[0]\n \"\"\"\n ixmin = np.maximum(ys[:, 0], x[0])\n iymin = np.maximum(ys[:, 1], x[1])\n ixmax = np.minimum(ys[:, 2], x[2])\n iymax = np.minimum(ys[:, 3], x[3])\n iw = np.maximum(ixmax - ixmin, 0.)\n ih = np.maximum(iymax - iymin, 0.)\n inters = iw * ih\n uni = (x[2] - x[0]) * (x[3] - x[1]) + (ys[:, 2] - ys[:, 0]) * \\\n (ys[:, 3] - ys[:, 1]) - inters\n ious = inters / uni\n ious[uni < 1e-12] = 0 # in case bad boxes\n return ious\n\n labels = labels[0].asnumpy()\n # get generated multi label from network\n cls_prob = preds[0]\n loc_loss = preds[1].asnumpy() # smoothL1 loss\n loc_loss_in_use = loc_loss[loc_loss.nonzero()]\n cls_label = preds[2].asnumpy()\n bb8_loss = preds[3].asnumpy()\n loc_pred = preds[4]\n bb8_pred = preds[5]\n anchors = preds[6]\n # anchor_in_use = anchors[anchors.nonzero()]\n bb8dets = BB8MultiBoxDetection(cls_prob, loc_pred, bb8_pred, anchors, nms_threshold=0.5, force_suppress=False,\n variances=(0.1, 0.1, 0.2, 0.2), nms_topk=400)\n bb8dets = bb8dets.asnumpy()\n\n loc_label = preds[7].asnumpy()\n loc_label_in_use = loc_label[loc_label.nonzero()]\n loc_pred_masked = preds[8].asnumpy()\n loc_pred_in_use = loc_pred_masked[loc_pred_masked.nonzero()]\n loc_mae = preds[9].asnumpy()\n loc_mae_in_use = loc_mae[loc_mae.nonzero()]\n # loc_mae_pixel = np.abs((bb8dets[:, 0, 2:6] - labels[:, 0, 1:5]) * 300) # need to be refined\n # for each class, only consider the most confident instance\n loc_mae_pixel = []\n bb8_mae_pixel = []\n for sampleDet, sampleLabel in zip(bb8dets, labels):\n for instanceLabel in sampleLabel:\n if instanceLabel[0] < 0:\n continue\n else:\n cid = instanceLabel[0]\n indices = np.where(sampleDet[:, 0] == cid)[0]\n if indices.size > 0:\n instanceDet = sampleDet[indices[0]] # only consider the most confident instance\n loc_mae_pixel.append(np.abs((instanceDet[2:6] - instanceLabel[1:5]) * 300))\n bb8_mae_pixel.append(np.abs((instanceDet[6:22] - instanceLabel[8:24]) * 300))\n loc_mae_pixel = np.array(loc_mae_pixel)\n bb8_mae_pixel = np.array(bb8_mae_pixel)\n bb8_mae_pixel_x = bb8_mae_pixel[:, [0, 2, 4, 6, 8, 10, 12, 14]]\n bb8_mae_pixel_y = bb8_mae_pixel[:, [1, 3, 5, 7, 9, 11, 13, 15]]\n bb8_mae_pixel = np.sqrt(np.square(bb8_mae_pixel_x) + np.square(bb8_mae_pixel_y))\n\n bb8_label = preds[10].asnumpy()\n bb8_label_in_use = bb8_label[bb8_label.nonzero()]\n bb8_pred = preds[11].asnumpy()\n bb8_pred_in_use = bb8_pred[bb8_pred.nonzero()]\n bb8_mae = preds[12].asnumpy()\n bb8_mae_in_use = bb8_mae[bb8_mae.nonzero()]\n # bb8_mae_pixel = np.abs((labels[:, 0, 8:24] - bb8dets[:, 0, 6:22]) * 300) # need to be refined\n\n # loc_mae_pixel = []\n # bb8_mae_pixel = []\n # # independant execution for each image\n # for i in range(labels.shape[0]):\n # # get as numpy arrays\n # label = labels[i]\n # pred = bb8dets[i]\n # loc_mae_pixel_per_image = []\n # bb8_mae_pixel_per_image = []\n # # calculate for each class\n # while (pred.shape[0] > 0):\n # cid = int(pred[0, 0])\n # indices = np.where(pred[:, 0].astype(int) == cid)[0]\n # if cid < 0:\n # pred = np.delete(pred, indices, axis=0)\n # continue\n # dets = pred[indices]\n # pred = np.delete(pred, indices, axis=0)\n #\n # # ground-truths\n # label_indices = np.where(label[:, 0].astype(int) == cid)[0]\n # gts = label[label_indices, :]\n # label = np.delete(label, label_indices, axis=0)\n # if gts.size > 0:\n # found = [False] * gts.shape[0]\n # for j in range(dets.shape[0]):\n # # compute overlaps\n # ious = iou(dets[j, 2:6], gts[:, 1:5])\n # ovargmax = np.argmax(ious)\n # ovmax = ious[ovargmax]\n # if ovmax > self.ovp_thresh:\n # if not found[ovargmax]:\n # loc_mae_pixel_per_image.append(np.abs((dets[j, 2:6] - gts[ovargmax, 1:5]) * 300)) # tp\n # bb8_mae_pixel_per_image.append(np.abs((dets[j, 6:22] - gts[ovargmax, 8:24]) * 300))\n # found[ovargmax] = True\n # else:\n # # duplicate\n # pass # fp\n #\n # loc_mae_pixel.append(np.mean(loc_mae_pixel_per_image, axis=1))\n # bb8_mae_pixel.append(np.mean(bb8_mae_pixel_per_image, axis=1))\n\n\n valid_count = np.sum(cls_label >= 0)\n box_count = np.sum(cls_label > 0)\n # overall accuracy & object accuracy\n label = cls_label.flatten()\n # in case you have a 'other' class\n label[np.where(label >= cls_prob.shape[1])] = 0\n mask = np.where(label >= 0)[0]\n indices = np.int64(label[mask])\n prob = cls_prob.transpose((0, 2, 1)).reshape((-1, cls_prob.shape[1])).asnumpy()\n prob = prob[mask, indices]\n self.sum_metric[0] += (-np.log(prob + self.eps)).sum()\n self.num_inst[0] += valid_count\n # loc_smoothl1loss\n self.sum_metric[1] += np.sum(loc_loss)\n self.num_inst[1] += box_count * 4\n # loc_mae\n self.sum_metric[2] += np.sum(loc_mae)\n self.num_inst[2] += box_count * 4\n # loc_mae_pixel\n self.sum_metric[3] += np.sum(loc_mae_pixel)\n self.num_inst[3] += loc_mae_pixel.size\n # bb8_smoothl1loss\n self.sum_metric[4] += np.sum(bb8_loss)\n self.num_inst[4] += box_count * 16\n # bb8_mae\n self.sum_metric[5] += np.sum(bb8_mae)\n self.num_inst[5] += box_count * 16\n # bb8_mae_pixel\n self.sum_metric[6] += np.sum(bb8_mae_pixel)\n self.num_inst[6] += bb8_mae_pixel.size\n\n def get(self):\n \"\"\"Get the current evaluation result.\n Override the default behavior\n\n Returns\n -------\n name : str\n Name of the metric.\n value : float\n Value of the evaluation.\n \"\"\"\n if self.num is None:\n if self.num_inst == 0:\n return (self.name, float('nan'))\n else:\n return (self.name, self.sum_metric / self.num_inst)\n else:\n names = ['%s'%(self.name[i]) for i in range(self.num)]\n values = [x / y if y != 0 else float('nan') \\\n for x, y in zip(self.sum_metric, self.num_inst)]\n return (names, values)\n","sub_path":"train/metric.py","file_name":"metric.py","file_ext":"py","file_size_in_byte":8715,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"498706951","text":"#!/usr/bin/env python3\n\n# \n# Copyright © 2019 James A. Dooley\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy of this software \n# and associated documentation files (the \"Software\"), to deal in the Software without restriction, \n# including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, \n# and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, \n# subject to the following conditions:\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT \n# LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON INFRINGEMENT. \n# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, \n# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE \n# OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.\n#\n# Licensed under the The MIT License.\n#\n# You may not use this file except in compliance with the License.\n#\n# You may obtain more information about this License at\n#\n# https://tldrlegal.com/license/mit-license\n#\n# File: document_sub_group_datastore.py\n#\n# Description:\n#\n# Author: James Dooley\n#\n# Created: 06.02.20\n#\n# Python Version: 3.7.0\n#\n# History:\n# 06.02.20 - Initial version\n#\n\n\"\"\"document_sub_group_datastore.py: \"\"\"\n\n__author__ = \"James Dooley\"\n__credits__ = []\n__license__ = \"MIT\"\n__version__ = \"1.0.0\"\n__maintainer__ = \"James Dooley\"\n__email__ = \"\"\n__status__ = \"Prototype\"\n\nimport sqlalchemy as db\nfrom typing import List, Dict, Any\nfrom datetime import datetime\nfrom .common import _DatabaseManagerBase\nfrom .common import _LookupRow\nfrom ..model import DocumentSubGroup, XxxDocumentSubGroup\nfrom ..model.audit import AUDIT_ACTION_INSERT, AUDIT_ACTION_UPDATE, AUDIT_ACTION_DELETE\n\nDocumentSubGroupLookupList = List[_LookupRow]\nDocumentSubGroupList = List[DocumentSubGroup]\nAuditList = List[XxxDocumentSubGroup]\nDocumentGroupLookupList = List[_LookupRow]\nRecordData = Dict[str, Any]\n\n\nclass _DocumentSubGroupDataStore(_DatabaseManagerBase):\n __slots__ = ['_document_sub_group_table', '_xxx_document_sub_group_table', '_document_group_table', '_view_query']\n\n def __init__(self, driver: str, host: str, port: int, user: str, password: str, schema: str,\n create_engne: bool = True, echo: bool = False):\n super(_DocumentSubGroupDataStore, self).__init__(driver, host, port, user, password, schema, create_engne, echo)\n\n self._document_sub_group_table = None\n self._xxx_document_sub_group_table = None\n self._document_group_table = None\n\n self._create_document_sub_table()\n self._create_xxx_document_sub_table()\n self._create_document_group_table()\n\n # Build the view query\n view = self._document_sub_group_table.join(self._document_group_table)\n self._view_query = db.select([self._document_sub_group_table,\n self._document_group_table.columns.name.label(\n 'doc_group_name')]).select_from(view)\n\n @staticmethod\n def _map_audit_record(record_data: RecordData) -> XxxDocumentSubGroup:\n return XxxDocumentSubGroup.create(record_data['name'], record_data['name_upper'], record_data['notes'],\n record_data['doc_group_id'], record_data['sort_order'], record_data['icon'],\n record_data['id'], record_data['system_application_id'],\n record_data['logged_at'], record_data['usr'], record_data['action'],\n record_data['record_id'], record_data['version'])\n\n @staticmethod\n def _map_document_sub_group_reord(record_data: RecordData) -> DocumentSubGroup:\n \"\"\"\n Creates a User object from a database record\n\n :param record_data: The database record returned from a call to the database\n\n :return: An instance of the User class with the given values\n \"\"\"\n return DocumentSubGroup.create(record_data['name'], record_data['name_upper'], record_data['notes'],\n record_data['doc_group_id'], record_data['sort_order'], record_data['icon'],\n record_data['id'], record_data['version'], record_data['created_at'],\n record_data['updated_at'], record_data['doc_group_name'])\n\n def _create_document_sub_table(self):\n self._document_sub_group_table = db.Table('document_sub_group', self._metadata,\n db.Column('id', db.Integer, primary_key=True, autoincrement=True),\n db.Column('name', db.String(100), nullable=False),\n db.Column('name_upper', db.String(100), nullable=False, unique=True),\n db.Column('notes', db.String(2500)),\n db.Column('doc_group_id', db.Integer,\n db.ForeignKey('document_group.id'), nullable=False),\n db.Column('sort_order', db.SmallInteger, nullable=False, default=0),\n db.Column('icon', db.String(100)),\n db.Column('version', db.SmallInteger, nullable=False, default=1),\n db.Column('created_at', db.TIMESTAMP, nullable=False),\n db.Column('updated_at', db.TIMESTAMP, nullable=False))\n\n def _create_xxx_document_sub_table(self):\n self._xxx_document_sub_group_table = db.Table('xxx_document_sub_group', self._metadata,\n db.Column('id', db.BigInteger, primary_key=True,\n autoincrement=True),\n db.Column('system_application_id', db.SmallInteger),\n db.Column('logged_at', db.TIMESTAMP),\n db.Column('usr', db.String(100), nullable=False),\n db.Column('action', db.String(1), nullable=False),\n db.Column('record_id', db.BigInteger, nullable=False),\n db.Column('version', db.SmallInteger, nullable=False),\n db.Column('name', db.String(100)),\n db.Column('name_upper', db.String(100)),\n db.Column('notes', db.String(2500)),\n db.Column('doc_group_id', db.Integer),\n db.Column('sort_order', db.SmallInteger),\n db.Column('icon', db.String(100)))\n\n def _create_document_group_table(self):\n self._document_group_table = db.Table('document_group', self._metadata,\n db.Column('id', db.Integer, primary_key=True, autoincrement=True),\n db.Column('name', db.String(100), nullable=False),\n db.Column('name_upper', db.String(100), nullable=False, unique=True),\n db.Column('notes', db.String(2500)),\n db.Column('language_id', db.SmallInteger,\n db.ForeignKey('language.id'), nullable=False),\n db.Column('doc_type_id', db.SmallInteger,\n db.ForeignKey('document_type.id'), nullable=False),\n db.Column('sort_order', db.SmallInteger, nullable=False, default=0),\n db.Column('icon', db.String(100)),\n db.Column('version', db.SmallInteger, nullable=False, default=1),\n db.Column('created_at', db.TIMESTAMP, nullable=False),\n db.Column('updated_at', db.TIMESTAMP, nullable=False))\n\n def _insert_audit_record(self, xxx_doc: XxxDocumentSubGroup) -> None:\n values = {'system_application_id': xxx_doc.app_id, 'usr': xxx_doc.user,\n 'action': xxx_doc.action, 'record_id': xxx_doc.audit_record_id,\n 'version': xxx_doc.version, 'name': xxx_doc.name, 'name_upper': xxx_doc.name_upper,\n 'notes': xxx_doc.notes, 'doc_group_id': xxx_doc.doc_group_id, 'sort_order': xxx_doc.sort_order,\n 'icon': xxx_doc.icon}\n\n self._engine.execute(self._xxx_document_sub_group_table.insert().values(values))\n\n def get_last_audit_record(self, record_id: int) -> XxxDocumentSubGroup:\n row = self._engine.execute(self._xxx_document_sub_group_table.select().where(\n self._xxx_document_sub_group_table.columns.record_id == record_id).order_by(\n self._xxx_document_sub_group_table.columns.id.desc())).first()\n\n return self._map_audit_record(row)\n\n def get_audit_records(self, record_id: int) -> AuditList:\n rows = self._engine.execute(self._xxx_document_sub_group_table.select().where(\n self._xxx_document_sub_group_table.columns.record_id == record_id).order_by(\n self._xxx_document_sub_group_table.columns.id.desc())).fetchall()\n\n return [self._map_audit_record(row) for row in rows]\n\n def get_document_group_list(self) -> DocumentGroupLookupList:\n \"\"\"\n Returns a list of the document group ids and names\n\n :return: list of document group ids and names\n \"\"\"\n rows = self._engine.execute(db.select(\n [self._document_group_table.columns.id, self._document_group_table.columns.name]).order_by(\n self._document_group_table.columns.sort_order)).fetchall()\n\n return self._map_lookup_list(rows)\n\n def get_lookup_list(self, doc_group_id: int) -> DocumentGroupLookupList:\n \"\"\"\n Returns a list of the document group ids and names\n\n :return: list of document group ids and names\n \"\"\"\n rows = self._engine.execute(db.select(\n [self._document_sub_group_table.columns.id, self._document_sub_group_table.columns.name]).where(\n self._document_sub_group_table.columns.doc_group_id == doc_group_id).order_by(\n self._document_sub_group_table.columns.sort_order)).fetchall()\n\n return self._map_lookup_list(rows)\n\n def get_all(self) -> DocumentSubGroupList:\n rows = self._engine.execute(self._view_query.order_by(\n self._document_sub_group_table.columns.sort_order)).fetchall()\n\n return [self._map_document_sub_group_reord(row) for row in rows]\n\n def get_all_by_doc_group(self, doc_group_id: int) -> DocumentSubGroupList:\n rows = self._engine.execute(self._view_query.where(\n self._document_sub_group_table.columns.doc_group_id == doc_group_id).order_by(\n self._document_sub_group_table.columns.sort_order)).fetchall()\n\n return [self._map_document_sub_group_reord(row) for row in rows]\n\n def get(self, record_id: int) -> DocumentSubGroup:\n row = self._engine.execute(self._view_query.where(\n self._document_sub_group_table.columns.id == record_id)).first()\n\n if row:\n return self._map_document_sub_group_reord(row)\n\n def insert(self, app_id: int, usr: str, name: str, doc_group_id: int, icon: str = None,\n sort_order: int = 0, notes: str = None) -> DocumentSubGroup:\n result = self._engine.execute(self._document_sub_group_table.insert().values(\n {'name': name, 'name_upper': name.upper(), 'notes': notes, 'doc_group_id': doc_group_id,\n 'sort_order': sort_order, 'icon': icon}))\n\n doc_sub_grp = self.get(result.lastrowid)\n\n xxx_sub_doc = XxxDocumentSubGroup.create_from_object(AUDIT_ACTION_INSERT, app_id, usr, doc_sub_grp)\n self._insert_audit_record(xxx_sub_doc)\n\n return doc_sub_grp\n\n def insert_object(self, app_id: int, usr: str, doc_sub_grp: DocumentSubGroup) -> None:\n new_doc_sub_grp = self.insert(app_id, usr, doc_sub_grp.name, doc_sub_grp.doc_group_id, doc_sub_grp.icon,\n doc_sub_grp.sort_order, doc_sub_grp.notes)\n doc_sub_grp.reset(new_doc_sub_grp.record_id, new_doc_sub_grp.version, new_doc_sub_grp.date_updated,\n date_created=new_doc_sub_grp.date_created, doc_group_name=new_doc_sub_grp.doc_group_name)\n\n def update(self, app_id: int, usr: str, doc_sub_grp: DocumentSubGroup) -> bool:\n updated_at = datetime.now()\n new_version = doc_sub_grp.version + 1\n\n values = {'name': doc_sub_grp.name, 'name_upper': doc_sub_grp.name_upper, 'notes': doc_sub_grp.notes,\n 'doc_group_id': doc_sub_grp.doc_group_id, 'sort_order': doc_sub_grp.sort_order,\n 'icon': doc_sub_grp.icon, 'version': new_version, 'updated_at': updated_at}\n\n result = self._engine.execute(self._document_sub_group_table.update().where(db.and_(\n self._document_sub_group_table.columns.id == doc_sub_grp.record_id,\n self._document_sub_group_table.columns.version == doc_sub_grp.version)).values(values))\n\n doc_sub_grp.reset(doc_sub_grp.record_id, new_version, updated_at)\n\n xxx_doc_sub_grp = XxxDocumentSubGroup.create_from_object(AUDIT_ACTION_UPDATE, app_id, usr, doc_sub_grp)\n self._insert_audit_record(xxx_doc_sub_grp)\n\n return result.rowcount == 1\n\n def delete(self, app_id: int, usr: str, record_id: int, version: int) -> bool:\n doc_sub_grp = self.get(record_id)\n\n if not doc_sub_grp:\n return False\n if (doc_sub_grp.record_id != record_id) or (doc_sub_grp.version != version):\n return False\n\n result = self._engine.execute(self._document_sub_group_table.delete().where(\n db.and_(self._document_sub_group_table.columns.id == record_id,\n self._document_sub_group_table.columns.version == version)))\n\n if result.rowcount == 1:\n xxx_doc = XxxDocumentSubGroup.create_from_object(AUDIT_ACTION_DELETE, app_id, usr, doc_sub_grp)\n self._insert_audit_record(xxx_doc)\n\n return result.rowcount == 1\n","sub_path":"projects/packages/core/datastore/document_sub_group_datastore.py","file_name":"document_sub_group_datastore.py","file_ext":"py","file_size_in_byte":15013,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"528856446","text":"#! /usr/bin/env python\n\"\"\"\nCopyright 2016 Google Inc.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n\nAssociation-based semi-supervised training module.\n\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport tensorflow as tf\nimport sys\n\n\nimport semisup\nimport tensorflow.contrib.slim as slim\nfrom tensorflow.contrib.slim.python.slim.nets import inception_v3\n\nfrom tensorflow.python.platform import app\nfrom tensorflow.python.platform import flags\n\nsys.path.insert(0, '/usr/wiss/haeusser/libs/tfmodels/inception')\n\nFLAGS = flags.FLAGS\n\n\nflags.DEFINE_string('architecture', 'svhn', 'Which dataset to work on.')\n\n\n\nflags.DEFINE_integer('sup_per_class', 10,\n 'Number of labeled samples used per class.')\n\nflags.DEFINE_integer('sup_seed', -1,\n 'Integer random seed used for labeled set selection.')\n\nflags.DEFINE_integer('sup_per_batch', 10,\n 'Number of labeled samples per class per batch.')\n\nflags.DEFINE_integer('unsup_batch_size', 1000,\n 'Number of unlabeled samples per batch.')\n\nflags.DEFINE_integer('sup_batch_size', 1000,\n 'Number of labeled samples per batch.')\n\nflags.DEFINE_integer('eval_interval', 500,\n 'Number of steps between evaluations.')\n\nflags.DEFINE_float('learning_rate', 1e-3, 'Initial learning rate.')\n\nflags.DEFINE_float('minimum_learning_rate', 3e-6, 'Final learning rate.')\n\nflags.DEFINE_float('decay_factor', 0.33, 'Learning rate decay factor.')\n\nflags.DEFINE_float('decay_steps', 5000,\n 'Learning rate decay interval in steps.')\n\nflags.DEFINE_float('visit_weight', 1.0, 'Weight for visit loss.')\n\nflags.DEFINE_integer('max_steps', 20000, 'Number of training steps.')\n\nflags.DEFINE_string('logdir', '/tmp/semisup/imagenet', 'Training log path.')\n\nflags.DEFINE_integer('save_summaries_secs', 150,\n 'How often should summaries be saved (in seconds).')\n\nflags.DEFINE_integer('save_interval_secs', 300,\n 'How often should checkpoints be saved (in seconds).')\n\nflags.DEFINE_integer('log_every_n_steps', 100,\n 'Logging interval for slim training loop.')\n\nflags.DEFINE_string('master', '',\n 'BNS name of the TensorFlow master to use.')\n\nflags.DEFINE_integer(\n 'ps_tasks', 0,\n 'The number of parameter servers. If the value is 0, then the parameters '\n 'are handled locally by the worker.')\n\nflags.DEFINE_integer(\n 'task', 0,\n 'The Task ID. This value is used when training with multiple workers to '\n 'identify each worker.')\n\n# TODO(haeusser) convert to argparse as gflags will be discontinued\n#flags.DEFINE_multi_float('custom_lr_vals', None,\n# 'For custom lr schedule: lr values.')\n\n#flags.DEFINE_multi_int('custom_lr_steps', None,\n# 'For custom lr schedule: step values.')\n\nFLAGS.custom_lr_vals = None\nFLAGS.custom_lr_steps = None\nFLAGS.data_dir ='/work/haeusser/data/imagenet/shards'\nFLAGS.num_readers = 16\nFLAGS.input_queue_memory_factor = 16\nFLAGS.image_size = 120 #299 # remember to change variable IMAGE_SHAPE\n\n\ndef inception_model(inputs,\n emb_size=128,\n is_training=True):\n _, end_points = inception_v3.inception_v3(inputs, is_training=is_training, reuse=True)\n net = end_points['Mixed_7c']\n net = slim.flatten(net, scope='flatten')\n with slim.arg_scope([slim.fully_connected], normalizer_fn=None):\n emb = slim.fully_connected(net, emb_size, scope='fc')\n return emb\n\ndef main(_):\n from inception.imagenet_data import ImagenetData\n from inception import image_processing\n dataset = ImagenetData(subset='train')\n assert dataset.data_files()\n NUM_LABELS = dataset.num_classes() + 1\n IMAGE_SHAPE = [FLAGS.image_size, FLAGS.image_size, 3]\n graph = tf.Graph()\n with graph.as_default():\n model = semisup.SemisupModel(inception_model, NUM_LABELS,\n IMAGE_SHAPE)\n\n # t_sup_images, t_sup_labels = tools.get_data('train')\n # t_unsup_images, _ = tools.get_data('unlabeled')\n\n images, labels = image_processing.batch_inputs(\n dataset, 32, train=True,\n num_preprocess_threads=FLAGS.num_readers,\n num_readers=FLAGS.num_readers)\n\n t_sup_images, t_sup_labels = tf.train.batch(\n [images, labels],\n batch_size=FLAGS.sup_batch_size,\n enqueue_many=True,\n num_threads=FLAGS.num_readers,\n capacity=1000 + 3 * FLAGS.sup_batch_size,\n )\n\n t_unsup_images, t_unsup_labels = tf.train.batch(\n [images, labels],\n batch_size=FLAGS.sup_batch_size,\n enqueue_many=True,\n num_threads=FLAGS.num_readers,\n capacity=1000 + 3 * FLAGS.sup_batch_size,\n )\n\n # Compute embeddings and logits.\n t_sup_emb = model.image_to_embedding(t_sup_images)\n t_unsup_emb = model.image_to_embedding(t_unsup_images)\n t_sup_logit = model.embedding_to_logit(t_sup_emb)\n\n # Add losses.\n model.add_semisup_loss(\n t_sup_emb, t_unsup_emb, t_sup_labels, visit_weight=FLAGS.visit_weight)\n\n model.add_logit_loss(t_sup_logit, t_sup_labels)\n\n\n t_learning_rate = tf.maximum(\n tf.train.exponential_decay(\n FLAGS.learning_rate,\n model.step,\n FLAGS.decay_steps,\n FLAGS.decay_factor,\n staircase=True),\n FLAGS.minimum_learning_rate)\n\n # Create training operation and start the actual training loop.\n train_op = model.create_train_op(t_learning_rate)\n\n config = tf.ConfigProto()\n config.gpu_options.allow_growth = True\n\n slim.learning.train(\n train_op,\n logdir=FLAGS.logdir,\n save_summaries_secs=FLAGS.save_summaries_secs,\n save_interval_secs=FLAGS.save_interval_secs,\n master=FLAGS.master,\n is_chief=(FLAGS.task == 0),\n startup_delay_steps=(FLAGS.task * 20),\n log_every_n_steps=FLAGS.log_every_n_steps,\n session_config=config)\n\n\nif __name__ == '__main__':\n tf.logging.set_verbosity(tf.logging.INFO)\n app.run()\n","sub_path":"semisup/train_imagenet.py","file_name":"train_imagenet.py","file_ext":"py","file_size_in_byte":6814,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"448633515","text":"\"\"\"\r\nFILENAME: Count_UMI_Length.py\r\nDESCRIPTION: This script reads a fastq file and records the length of each UMI. It can optionally extract every\r\n UMI and records its length.\r\nINPUT: A directory containing fastq files that have UMIs in the their header.\r\nOUTPUT: A txt file that contains the number of 'n'-length UMIs. Optionally, the file will have every UMI\r\n and its corresponding length.\r\nCREATED: January 23, 2018.\r\nLAST MODIFIED: February 10, 2018.\r\n\"\"\"\r\n\r\nfrom __future__ import print_function\r\n\r\nimport argparse\r\nimport glob\r\nimport os\r\nimport re\r\nimport sys\r\nimport time\r\nfrom collections import Counter\r\nfrom datetime import datetime\r\n\r\n\r\ndef define_outputs(path_in, dir_out):\r\n \"\"\"Function to read input filenames and create output files.\"\"\"\r\n base_in = os.path.splitext(os.path.split(path_in)[1])[0]\r\n name_out = \"umi_length-{0}.txt\".format(base_in)\r\n path_out = os.path.join(dir_out, name_out)\r\n write_out = open(path_out, 'w')\r\n return write_out\r\n\r\n\r\ndef count_umi(f_in, dir_out, is_header, is_umi):\r\n \"\"\"Function to count the length of every UMIs in a sample.\r\n Optionally, extract the UMI from each read & its length.\r\n \"\"\"\r\n f_out = define_outputs(f_in, dir_out)\r\n header = \"{}\\t{}\\n\".format(\"UMI Sequence\", \"UMI Length\")\r\n if is_umi and is_header: # If the --header argument & the --umi argument are provided, write the column headers.\r\n f_out.write(header)\r\n line_count, length_list = 0, []\r\n regex_umi = re.compile(r\"\\w{16,}\") # Compile the regex for the UMI, since it'll be used repeatedly.\r\n with open(f_in) as f:\r\n for line in f:\r\n line_count += 1 # Keep track of the number of lines.\r\n if line.startswith(\"@\") and \"UMI:\" in line: # Search for the fastq header of each read.\r\n matched_umi = re.search(regex_umi, line)\r\n if matched_umi: # The UMI has been found.\r\n umi_sequence = matched_umi.group(0) # Extract UMI.\r\n umi_length = len(umi_sequence) # Get the length of the UMI sequence.\r\n length_list.append(umi_length) # Record every length in a list.\r\n if is_umi: # If the user specifies the --umi argument, write every UMI and its length.\r\n out_line = \"{}\\t{}\\n\".format(umi_sequence, umi_length)\r\n f_out.write(out_line)\r\n else: # A UMI has not been found, so just continue to the next line.\r\n print(\"Error! No UMI found for line #{} in file {}! This line will be skipped!\".format(line_count,\r\n os.path.basename(\r\n f_in)))\r\n length_counter = Counter(length_list) # Create a counter object for UMIs of every length.\r\n for k, v in length_counter.iteritems(): # k = length of UMI; v = number of times that length appears\r\n counter_summary = \"Number of {} nucleotide UMIs found: {}\\n\".format(k, v)\r\n f_out.write(counter_summary)\r\n print(\"Processed {} lines in file {}.\".format(line_count, os.path.basename(f_in)))\r\n f_out.close()\r\n\r\n\r\ndef main():\r\n cmdline = argparse.ArgumentParser(description=__doc__, formatter_class=argparse.RawDescriptionHelpFormatter)\r\n cmdline.add_argument('input_directory', help='the directory containing the input tagged fastq files')\r\n cmdline.add_argument('output_directory', help='the directory that will contain the output of this script')\r\n cmdline.add_argument('-e', '--header', help='write the column headers in the output file; has no effect if --umi '\r\n 'argument is not provided', action='store_true', default=False)\r\n cmdline.add_argument('-u', '--umi', help='write every found UMI and its length in the output file',\r\n action='store_true', default=False)\r\n args = cmdline.parse_args()\r\n\r\n # In my IDE, all system paths passed via the command line must end with a \"/\".\r\n if not args.input_directory.endswith(\"/\") or not args.output_directory.endswith(\"/\"):\r\n print(\"Error! You must end your path arguments with a \\\"/\\\"!\")\r\n sys.exit(1)\r\n\r\n # Record time & date of program start.\r\n start_time = time.clock()\r\n print(\"{} program started on {}.\".format(os.path.basename(__file__),\r\n datetime.now().strftime(\"%A, %B %d at %I:%M:%S %p\")))\r\n\r\n if not os.path.exists(args.output_directory):\r\n os.makedirs(args.output_directory)\r\n\r\n # Make a list of all matching input fastq files.\r\n input_files = sorted(glob.glob(args.input_directory + \"*.fastq\"))\r\n\r\n # Check if the list of input files is empty. If it is, warn the user & exit the program.\r\n if not input_files:\r\n print(\"Error! No input fastq files found. Check the following: \\n\"\r\n \"1. The input directory.\\n\"\r\n \"2. The {} script.\".format(os.path.basename(__file__)))\r\n sys.exit(1)\r\n\r\n # Perform counting of UMI length.\r\n for input_file in input_files:\r\n # print(\"Found FASTQ file {}.\".format(input_file))\r\n count_umi(input_file, args.output_directory, args.header, args.umi)\r\n\r\n # Record time & date of program completion.\r\n print(\"{} program finished on {}.\".format(os.path.basename(__file__),\r\n datetime.now().strftime(\"%A, %B %d at %I:%M:%S %p\")))\r\n print(\"Time taken: {} seconds.\".format(time.clock() - start_time))\r\n\r\n\r\nif __name__ == \"__main__\":\r\n main()\r\n","sub_path":"Count_UMI_Length.py","file_name":"Count_UMI_Length.py","file_ext":"py","file_size_in_byte":5772,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"232971674","text":"\"\"\"\nCopyright © 2019 Approximator. All rights reserverd.\nAuthor: Approximator (alex@nls.la)\n\"\"\"\nimport os\nimport yaml\nimport time\nimport shutil\nimport random\nimport datetime\nfrom pathlib import Path\nfrom concurrent.futures import ThreadPoolExecutor\n\nfrom faker import Faker\nfrom pytest import fixture\nfrom tornado_sqlalchemy import make_session_factory, SessionMixin\nfrom apms.lib.db.database import BASE, User, Photo\nfrom apms.lib.config import config\n\nfrom PIL import Image, ImageFilter\n\n# pylint: disable=too-few-public-methods,invalid-name,redefined-outer-name\nfake = Faker(\"en_US\")\n\n\n@fixture\ndef image_maker(test_config):\n class Maker:\n def __init__(self):\n self._current_photo_id = 0\n self._photos = []\n self._photos_dir = Path(\n yaml.load(open(test_config, \"r\"))[\"server\"][\"photos_dir\"]\n )\n\n def make(self, width=200, height=200):\n self._current_photo_id += 1\n image_file = f\"photo_{self._current_photo_id}.png\"\n\n photo = Photo(\n origin_id=self._current_photo_id,\n width=width,\n height=height,\n url=fake.url(),\n dir_name=\"\",\n file_name=image_file,\n date_added=fake.date_time(),\n date_downloaded=datetime.datetime.now(),\n text=fake.catch_phrase(),\n )\n self._photos.append(photo)\n return photo\n\n def generate_image(self, photo):\n img = Image.new(\"L\", [photo.width, photo.height], 255)\n data = img.load()\n\n for x in range(img.size[0]):\n for y in range(img.size[1]):\n data[x, y] = random.randint(1, 255)\n img = img.filter(filter=ImageFilter.BLUR)\n img.save(self._photos_dir / photo.file_name)\n return self._photos_dir / photo.file_name\n\n def save(self):\n print(\"Generating images\")\n start_time = time.time()\n with ThreadPoolExecutor() as executor:\n result = list(executor.map(self.generate_image, self._photos))\n print(\n f\"Generated {len(result)} images. Took {int(time.time() - start_time)} sec.\"\n )\n\n return Maker()\n\n\nclass AppHandler(SessionMixin):\n \"\"\"\n This class mimics tornado's RequestHandler\n Needed only for convenient session_factory and SessionMixin usage\n \"\"\"\n\n class App:\n def __init__(self, db_connection_string):\n self._session_factory = make_session_factory(db_connection_string)\n self.settings = {\"session_factory\": self._session_factory}\n BASE.metadata.create_all(self._session_factory.engine)\n\n def __init__(self, db_connection_string):\n self.application = AppHandler.App(db_connection_string)\n\n\n@fixture(scope=\"module\") # will be called once for the entire test module\ndef session_factory(test_config):\n config.load_config(test_config)\n yield AppHandler(config.db_connection_string)\n\n\n@fixture(scope=\"module\")\ndef test_config(tmpdir_factory):\n temp_dir = Path(tmpdir_factory.mktemp(\"apms-test-data\"))\n config_filename = temp_dir / \"config.yml\"\n db_filename = temp_dir / \"db.sqlite\"\n static_dir = temp_dir / \"build\"\n photos_dir = temp_dir / \"photos\"\n\n shutil.copytree(\"../apms-ui/build\", static_dir)\n\n config_data = {\n \"server\": {\n \"db_connection_string\": f\"sqlite:////{db_filename}\",\n \"static_dir\": static_dir,\n \"photos_dir\": f\"{photos_dir}\",\n },\n \"updater\": {\"pause\": True},\n \"api_clients\": {\n \"base\": \"http://127.0.0.1/method\",\n \"version\": \"5.71\",\n \"lang\": \"en\",\n \"token\": \"aaa\",\n \"user\": 111,\n \"user_info_fields\": [\"first_name\", \"last_name\", \"about\",],\n \"group_info_fields\": [\"city\", \"contacts\",],\n \"host_url_base\": \"http://127.0.0.1\",\n },\n }\n\n print(f\"Creating test config: {config_filename}\")\n photos_dir.mkdir(parents=True)\n yaml.dump(config_data, open(config_filename, \"w\"))\n return config_filename\n\n\n# pylint: disable=no-member\n@fixture\ndef fake_user_maker():\n class Maker:\n @classmethod\n def make(cls):\n return User(\n first_name=fake.first_name(),\n last_name=fake.last_name(),\n url=fake.url(),\n date_added=fake.date_time(),\n date_info_updated=datetime.datetime.now(),\n status_str=fake.catch_phrase(),\n )\n\n return Maker\n","sub_path":"apms/tests/conftest.py","file_name":"conftest.py","file_ext":"py","file_size_in_byte":4629,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"17336839","text":"import argparse\nimport os, sys\n\nsys.path.append('../../weather/')\n\nimport numpy as np\nfrom joblib import Parallel, delayed\nfrom sacrebleu import corpus_bleu\nfrom nltk.translate import gleu_score\nimport pandas as pd\n\nfrom sklearn.metrics import *\nfrom sklearn.utils.multiclass import type_of_target\nfrom sklearn.utils import check_consistent_length, column_or_1d, assert_all_finite\nfrom sklearn.utils.extmath import stable_cumsum\n\nimport matplotlib\n\nmatplotlib.use('agg')\nimport seaborn as sns\n\nsns.set()\n\nparser = argparse.ArgumentParser(description='Assess translation performance')\nparser.add_argument('path_in', type=str,\n help='Path of directory containing in-domain data.')\nparser.add_argument('path_out', type=str,\n help='Path of directory containing shifted data.')\nparser.add_argument('--save_path', type=str, default='./results.txt',\n help='Path to where to save output.')\nparser.add_argument('--beam_width', type=int, default=5,\n help='Path of directory where to save results.')\nparser.add_argument('--nbest', type=int, default=5,\n help='Path of directory where to save results.')\nparser.add_argument('--ensemble', action='store_true',\n help='Whether to load in additional ensemble-based measures.')\n\n\ndef _check_pos_label_consistency(pos_label, y_true):\n # ensure binary classification if pos_label is not specified\n # classes.dtype.kind in ('O', 'U', 'S') is required to avoid\n # triggering a FutureWarning by calling np.array_equal(a, b)\n # when elements in the two arrays are not comparable.\n classes = np.unique(y_true)\n if (pos_label is None and (\n classes.dtype.kind in 'OUS' or\n not (np.array_equal(classes, [0, 1]) or\n np.array_equal(classes, [-1, 1]) or\n np.array_equal(classes, [0]) or\n np.array_equal(classes, [-1]) or\n np.array_equal(classes, [1])))):\n classes_repr = \", \".join(repr(c) for c in classes)\n raise ValueError(\n f\"y_true takes value in {{{classes_repr}}} and pos_label is not \"\n f\"specified: either make y_true take value in {{0, 1}} or \"\n f\"{{-1, 1}} or pass pos_label explicitly.\"\n )\n elif pos_label is None:\n pos_label = 1.0\n\n return pos_label\n\n\ndef _binary_clf_curve_ret(y_true, y_score, pos_label=None, sample_weight=None):\n # Check to make sure y_true is valid\n y_type = type_of_target(y_true)\n if not (y_type == \"binary\" or\n (y_type == \"multiclass\" and pos_label is not None)):\n raise ValueError(\"{0} format is not supported\".format(y_type))\n\n check_consistent_length(y_true, y_score, sample_weight)\n y_true = column_or_1d(y_true)\n y_score = column_or_1d(y_score)\n assert_all_finite(y_true)\n assert_all_finite(y_score)\n\n if sample_weight is not None:\n sample_weight = column_or_1d(sample_weight)\n\n pos_label = _check_pos_label_consistency(pos_label, y_true)\n\n # make y_true a boolean vector\n y_true = (y_true == pos_label)\n\n # sort scores and corresponding truth values\n desc_score_indices = np.argsort(y_score, kind=\"mergesort\")[::-1]\n y_score = y_score[desc_score_indices]\n y_true = y_true[desc_score_indices]\n if sample_weight is not None:\n weight = sample_weight[desc_score_indices]\n else:\n weight = 1.\n\n # y_score typically has many tied values. Here we extract\n # the indices associated with the distinct values. We also\n # concatenate a value for the end of the curve.\n # distinct_value_indices = np.where(np.diff(y_score))[0]\n # threshold_idxs = np.r_[distinct_value_indices, y_true.size - 1]\n\n # accumulate the true positives with decreasing threshold\n tps = stable_cumsum(y_true * weight) # [threshold_idxs]\n if sample_weight is not None:\n # express fps as a cumsum to ensure fps is increasing even in\n # the presence of floating point errors\n fps = stable_cumsum((1 - y_true) * weight) # [threshold_idxs]\n else:\n fps = stable_cumsum((1 - y_true)) # [threshold_idxs]\n return fps, tps, y_score # [threshold_idxs]\n\n\ndef _precision_recall_curve_retention(y_true, probas_pred, *, pos_label=None,\n sample_weight=None):\n fps, tps, thresholds = _binary_clf_curve_ret(y_true, probas_pred,\n pos_label=pos_label,\n sample_weight=sample_weight)\n\n precision = tps / (tps + fps)\n precision[np.isnan(precision)] = 0\n recall = tps / tps[-1]\n\n # stop when full recall attained\n # and reverse the outputs so recall is decreasing\n last_ind = tps.searchsorted(tps[-1])\n sl = slice(-1, None, -1)\n return np.r_[precision[sl], 1], np.r_[recall[sl], 0], thresholds[sl]\n\n\ndef _acceptable_error(errors, threshold):\n return np.asarray(errors <= threshold, dtype=np.float32)\n\n\ndef _calc_fbeta_regection_curve(errors, uncertainty, threshold, beta=1.0, group_by_uncertainty=True, eps=1e-10):\n ae = _acceptable_error(errors, threshold)\n pr, rec, _ = _precision_recall_curve_retention(ae, -uncertainty)\n pr = np.asarray(pr)\n rec = np.asarray(rec)\n f_scores = (1 + beta ** 2) * pr * rec / (pr * beta ** 2 + rec + eps)\n\n return f_scores, pr, rec\n\n\ndef f_beta_metrics(errors, uncertainty, threshold, beta=1.0):\n \"\"\"\n\n :param errors: Per sample errors - array [n_samples]\n :param uncertainty: Uncertainties associated with each prediction. rray [n_samples]\n :param threshold: The error threshold below which we consider the prediction acceptable\n :param beta: The beta value for the F_beta metric. Defaults to 1\n :return: fbeta_auc, fbeta_95, retention\n \"\"\"\n f_scores, pr, rec = _calc_fbeta_regection_curve(errors, uncertainty, threshold, beta)\n ret = np.arange(pr.shape[0]) / pr.shape[0]\n\n f_auc = auc(ret[::-1], f_scores)\n f95 = f_scores[::-1][np.int(0.95 * pr.shape[0])]\n\n return f_auc, f95, f_scores[::-1]\n\n\ndef load_uncertainties(path, n_best=5, beam_width=5):\n eoe = np.loadtxt(os.path.join(path, 'entropy_expected.txt'), dtype=np.float32)\n exe = np.loadtxt(os.path.join(path, 'expected_entropy.txt'), dtype=np.float32)\n mi = np.loadtxt(os.path.join(path, 'mutual_information.txt'), dtype=np.float32)\n epkl = np.loadtxt(os.path.join(path, 'epkl.txt'), dtype=np.float32)\n mkl = np.loadtxt(os.path.join(path, 'mkl.txt'), dtype=np.float32)\n score = np.loadtxt(os.path.join(path, 'score.txt'), dtype=np.float32)\n aep_tu = np.loadtxt(os.path.join(path, 'aep_tu.txt'), dtype=np.float32)\n aep_du = np.loadtxt(os.path.join(path, 'aep_du.txt'), dtype=np.float32)\n npmi = np.loadtxt(os.path.join(path, 'npmi.txt'), dtype=np.float32)\n lprobs = np.loadtxt(os.path.join(path, 'log_probs.txt'), dtype=np.float32)\n sMKL_pe = np.loadtxt(os.path.join(path, 'score_npmi.txt'), dtype=np.float32)\n\n # Expectation of Products Measures\n ep_eoe = np.loadtxt(os.path.join(path, 'ep_entropy_expected.txt'), dtype=np.float32)\n ep_mi = np.loadtxt(os.path.join(path, 'ep_mutual_information.txt'), dtype=np.float32)\n ep_epkl = np.loadtxt(os.path.join(path, 'ep_epkl.txt'), dtype=np.float32)\n ep_mkl = np.loadtxt(os.path.join(path, 'ep_mkl.txt'), dtype=np.float32)\n\n # Heuristic Measures\n var = np.loadtxt(os.path.join(path, 'var.txt'), dtype=np.float32)\n varcombo = np.loadtxt(os.path.join(path, 'varcombo.txt'), dtype=np.float32)\n logvar = np.loadtxt(os.path.join(path, 'logvar.txt'), dtype=np.float32)\n logcombo = np.loadtxt(os.path.join(path, 'logcombo.txt'), dtype=np.float32)\n\n unc_dict = {'Total Uncertainty-PE': eoe,\n 'Total Uncertainty-EP': ep_eoe,\n 'SCR-PE': score,\n 'SCR-EP': aep_tu,\n 'Data Uncertainty': exe,\n 'Mutual Information-PE': mi,\n 'Mutual Information-EP': ep_mi,\n 'EPKL-PE': epkl,\n 'EPKL-EP': ep_epkl,\n 'Reverse Mutual Information': mkl,\n 'ep_MKL': ep_mkl,\n 'sMKL-PE': sMKL_pe,\n 'sMKL-EP': npmi,\n 'var': var,\n 'varcombo': varcombo,\n 'logvar': logvar,\n 'logcomvo': logcombo\n }\n\n for key in unc_dict.keys():\n uncertainties = unc_dict[key]\n unc_dict[key] = np.mean(np.reshape(uncertainties, [-1, beam_width])[:, :n_best], axis=1)\n return unc_dict\n\n\ndef eval_ood_detect(in_uncertainties, out_uncertainties, save_path):\n for key in in_uncertainties.keys():\n ood_detect(in_uncertainties[key],\n out_uncertainties[key],\n measure_name=key,\n save_path=save_path)\n\n\ndef ood_detect(in_measure, out_measure, measure_name, save_path):\n scores = np.concatenate((in_measure, out_measure), axis=0)\n scores = np.asarray(scores, dtype=np.float128)\n results_path = save_path\n save_path = os.path.split(save_path)[0]\n\n domain_labels = np.concatenate((np.zeros_like(in_measure, dtype=np.int32),\n np.ones_like(out_measure, dtype=np.int32)), axis=0)\n\n fpr, tpr, thresholds = roc_curve(domain_labels, scores)\n roc_auc = roc_auc_score(domain_labels, scores)\n with open(results_path, 'a') as f:\n f.write('AUROC using ' + measure_name + \": \" + str(np.round(roc_auc * 100.0, 1)) + '\\n')\n\n\ndef eval_predictions(refs, hypos, nlls, nbest=5):\n weights = np.exp(-nlls)\n weights /= weights.sum(axis=1, keepdims=True)\n\n hyposb = [hypo[0] for hypo in hypos]\n bleu = corpus_bleu(sys_stream=hyposb, ref_streams=[refs]).score\n\n refsg = [[ref.split()] for ref in refs]\n gleus = []\n for i in range(nbest):\n hyposg = [hypo[i].split() for hypo in hypos]\n gleu = np.asarray(\n [gleu_score.sentence_gleu(references=r, hypothesis=h) * 100.0 for r, h in zip(refsg, hyposg)])[:,\n np.newaxis]\n gleus.append(gleu)\n gleus = np.concatenate(gleus, axis=1)\n return bleu, gleus, weights\n\n\ndef calc_uncertainty_regection_curve(errors, uncertainty, group_by_uncertainty=True):\n n_objects = errors.shape[0]\n if group_by_uncertainty:\n data = pd.DataFrame(dict(\n errors=errors,\n uncertainty=uncertainty\n ))\n mean_errors = data.groupby(\"uncertainty\").mean()\n mean_errors.rename(columns={\"errors\": \"mean_errors\"}, inplace=True)\n data = data.join(mean_errors, \"uncertainty\")\n data.drop(\"errors\", axis=1, inplace=True)\n\n uncertainty_order = data[\"uncertainty\"].argsort()\n errors = data[\"mean_errors\"][uncertainty_order]\n else:\n uncertainty_order = uncertainty.argsort()\n errors = errors[uncertainty_order]\n\n error_rates = np.zeros(n_objects + 1)\n error_rates[:-1] = np.cumsum(errors)[::-1] / n_objects\n return error_rates\n\n\ndef calc_aucs(errors, uncertainty):\n uncertainty_rejection_curve = calc_uncertainty_regection_curve(errors, uncertainty)\n uncertainty_rejection_auc = uncertainty_rejection_curve.mean()\n random_rejection_auc = uncertainty_rejection_curve[0] / 2\n ideal_rejection_auc = calc_uncertainty_regection_curve(errors, errors).mean()\n\n rejection_ratio = (uncertainty_rejection_auc - random_rejection_auc) / (\n ideal_rejection_auc - random_rejection_auc) * 100.0\n return rejection_ratio, uncertainty_rejection_auc\n\n\ndef load_text(path, beam_width=5):\n refs, hypos = [], []\n with open(os.path.join(path, 'refs.txt'), 'r') as f:\n for line in f.readlines():\n refs.append(line[:-1])\n\n with open(os.path.join(path, 'hypos.txt'), 'r') as f:\n count = 0\n hypos = []\n hypos_joint = []\n for line in f.readlines():\n count += 1\n hypos_joint.append(line[:-1])\n if count % beam_width == 0:\n hypos.append(hypos_joint)\n hypos_joint = []\n\n ids = np.loadtxt(os.path.join(path, 'ref_ids.txt'), dtype=np.int32)\n nlls = -np.loadtxt(os.path.join(path, 'hypo_likelihoods.txt'), dtype=np.float32).reshape([-1, beam_width])\n\n return refs, hypos, ids, nlls\n\n\ndef eval_gleu_retention(errors, in_uncertainties, out_uncertainties, save_path):\n with open(save_path, 'a') as f:\n for key in in_uncertainties.keys():\n uncertainties = np.concatenate([in_uncertainties[key], out_uncertainties[key]], axis=0)\n prr, r_auc = calc_aucs(errors, uncertainties)\n f.write('R-ROC (SCORE) using ' + key + \": \" + str(np.round(r_auc, 3)) + '\\n')\n for key in in_uncertainties.keys():\n uncertainties = np.concatenate([in_uncertainties[key], out_uncertainties[key]], axis=0)\n prr, r_auc = calc_aucs(errors, uncertainties)\n f.write('PRR using ' + key + \": \" + str(np.round(prr, 3)) + '\\n')\n\n\ndef eval_fbeta(errors, threshold, in_uncertainties, out_uncertainties, save_path):\n with open(save_path, 'a') as f:\n for key in in_uncertainties.keys():\n uncertainties = np.concatenate([in_uncertainties[key], out_uncertainties[key]], axis=0)\n f_auc, f95, f_scores = f_beta_metrics(errors, uncertainties, threshold=threshold)\n f.write('F1-AUC using ' + key + \": \" + str(np.round(f_auc, 3)) + '\\n')\n for key in in_uncertainties.keys():\n uncertainties = np.concatenate([in_uncertainties[key], out_uncertainties[key]], axis=0)\n f_auc, f95, f_scores = f_beta_metrics(errors, uncertainties, threshold=threshold)\n f.write('F1@95 using ' + key + \": \" + str(np.round(f95, 3)) + '\\n')\n\n\ndef main():\n args = parser.parse_args()\n\n # Load refs and hypos\n refs_in, hypos_in, ids_in, nlls_in = load_text(args.path_in, beam_width=args.beam_width)\n refs_out, hypos_out, ids_out, nlls_out = load_text(args.path_out, beam_width=args.beam_width)\n\n bleu_in, gleus_in, weights_in = eval_predictions(refs_in, hypos_in, nlls_in)\n bleu_out, gleus_out, weights_out = eval_predictions(refs_out, hypos_out, nlls_out)\n\n refs = refs_in + refs_out\n hypos = [hypo[0] for hypo in hypos_in + hypos_out]\n bleu = corpus_bleu(sys_stream=hypos, ref_streams=[refs]).score\n\n gleus = np.concatenate([gleus_in, gleus_out], axis=0)\n weights = np.concatenate([weights_in, weights_out], axis=0)\n gleu_errors = 100.0 - gleus\n\n egleus = np.mean(np.sum(gleus * weights, axis=1))\n mgleus = np.mean(np.max(gleus, axis=1))\n\n with open(args.save_path, 'a') as f:\n f.write('BLEU dev-in: ' + str(np.round(bleu_in, 2)) + '\\n')\n f.write('BLEU dev-out: ' + str(np.round(bleu_out, 2)) + '\\n')\n f.write('BLEU dev: ' + str(np.round(bleu, 2)) + '\\n')\n f.write('eGLEU dev-in: ' + str(np.round(np.mean(np.sum(gleus_in * weights_in, axis=1)), 2)) + '\\n')\n f.write('eGLEU dev-out: ' + str(np.round(np.mean(np.sum(gleus_out * weights_out, axis=1)), 2)) + '\\n')\n f.write('mGLEU dev-in: ' + str(np.round(np.mean(np.max(gleus_in, axis=1)), 2)) + '\\n')\n f.write('mGLEU dev-out: ' + str(np.round(np.mean(np.max(gleus_out, axis=1)), 2)) + '\\n')\n f.write('eGLEU dev: ' + str(np.round(egleus, 2)) + '\\n')\n f.write('mGLEU dev: ' + str(np.round(mgleus, 2)) + '\\n')\n\n # Load uncertainties\n if args.ensemble:\n uncertainties_in = load_uncertainties(args.path_in, beam_width=args.beam_width, n_best=args.nbest)\n uncertainties_out = load_uncertainties(args.path_out, beam_width=args.beam_width, n_best=args.nbest)\n\n else:\n uncertainties_in = {'NLL': np.mean(nlls_in, axis=1)}\n uncertainties_out = {'NLL': np.mean(nlls_out, axis=1)}\n\n eval_gleu_retention(errors=np.sum(gleu_errors * weights, axis=1),\n in_uncertainties=uncertainties_in,\n out_uncertainties=uncertainties_out,\n save_path=args.save_path)\n\n eval_fbeta(errors=np.sum(gleu_errors * weights, axis=1),\n threshold=60.0,\n in_uncertainties=uncertainties_in,\n out_uncertainties=uncertainties_out,\n save_path=args.save_path)\n\n eval_ood_detect(in_uncertainties=uncertainties_in,\n out_uncertainties=uncertainties_out,\n save_path=args.save_path)\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"translation/assessment/evaluate.py","file_name":"evaluate.py","file_ext":"py","file_size_in_byte":16407,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"61252052","text":"from __future__ import annotations\n\nimport copy\nfrom contextlib import contextmanager\nfrom typing import Optional, Dict, Tuple, Any, Union, List\nfrom pathlib import Path\n\nimport numpy as np\nimport torch\nfrom compel.embeddings_provider import BaseTextualInversionManager\nfrom diffusers.models import UNet2DConditionModel\nfrom safetensors.torch import load_file\nfrom transformers import CLIPTextModel, CLIPTokenizer\n\nfrom .models.lora import LoRAModel\n\n\n\"\"\"\nloras = [\n (lora_model1, 0.7),\n (lora_model2, 0.4),\n]\nwith LoRAHelper.apply_lora_unet(unet, loras):\n # unet with applied loras\n# unmodified unet\n\n\"\"\"\n\n\n# TODO: rename smth like ModelPatcher and add TI method?\nclass ModelPatcher:\n @staticmethod\n def _resolve_lora_key(model: torch.nn.Module, lora_key: str, prefix: str) -> Tuple[str, torch.nn.Module]:\n assert \".\" not in lora_key\n\n if not lora_key.startswith(prefix):\n raise Exception(f\"lora_key with invalid prefix: {lora_key}, {prefix}\")\n\n module = model\n module_key = \"\"\n key_parts = lora_key[len(prefix) :].split(\"_\")\n\n submodule_name = key_parts.pop(0)\n\n while len(key_parts) > 0:\n try:\n module = module.get_submodule(submodule_name)\n module_key += \".\" + submodule_name\n submodule_name = key_parts.pop(0)\n except Exception:\n submodule_name += \"_\" + key_parts.pop(0)\n\n module = module.get_submodule(submodule_name)\n module_key = (module_key + \".\" + submodule_name).lstrip(\".\")\n\n return (module_key, module)\n\n @staticmethod\n def _lora_forward_hook(\n applied_loras: List[Tuple[LoRAModel, float]],\n layer_name: str,\n ):\n def lora_forward(module, input_h, output):\n if len(applied_loras) == 0:\n return output\n\n for lora, weight in applied_loras:\n layer = lora.layers.get(layer_name, None)\n if layer is None:\n continue\n output += layer.forward(module, input_h, weight)\n return output\n\n return lora_forward\n\n @classmethod\n @contextmanager\n def apply_lora_unet(\n cls,\n unet: UNet2DConditionModel,\n loras: List[Tuple[LoRAModel, float]],\n ):\n with cls.apply_lora(unet, loras, \"lora_unet_\"):\n yield\n\n @classmethod\n @contextmanager\n def apply_lora_text_encoder(\n cls,\n text_encoder: CLIPTextModel,\n loras: List[Tuple[LoRAModel, float]],\n ):\n with cls.apply_lora(text_encoder, loras, \"lora_te_\"):\n yield\n\n @classmethod\n @contextmanager\n def apply_sdxl_lora_text_encoder(\n cls,\n text_encoder: CLIPTextModel,\n loras: List[Tuple[LoRAModel, float]],\n ):\n with cls.apply_lora(text_encoder, loras, \"lora_te1_\"):\n yield\n\n @classmethod\n @contextmanager\n def apply_sdxl_lora_text_encoder2(\n cls,\n text_encoder: CLIPTextModel,\n loras: List[Tuple[LoRAModel, float]],\n ):\n with cls.apply_lora(text_encoder, loras, \"lora_te2_\"):\n yield\n\n @classmethod\n @contextmanager\n def apply_lora(\n cls,\n model: torch.nn.Module,\n loras: List[Tuple[LoRAModel, float]],\n prefix: str,\n ):\n original_weights = dict()\n try:\n with torch.no_grad():\n for lora, lora_weight in loras:\n # assert lora.device.type == \"cpu\"\n for layer_key, layer in lora.layers.items():\n if not layer_key.startswith(prefix):\n continue\n\n module_key, module = cls._resolve_lora_key(model, layer_key, prefix)\n if module_key not in original_weights:\n original_weights[module_key] = module.weight.detach().to(device=\"cpu\", copy=True)\n\n # enable autocast to calc fp16 loras on cpu\n # with torch.autocast(device_type=\"cpu\"):\n layer.to(dtype=torch.float32)\n layer_scale = layer.alpha / layer.rank if (layer.alpha and layer.rank) else 1.0\n layer_weight = layer.get_weight(original_weights[module_key]) * lora_weight * layer_scale\n\n if module.weight.shape != layer_weight.shape:\n # TODO: debug on lycoris\n layer_weight = layer_weight.reshape(module.weight.shape)\n\n module.weight += layer_weight.to(device=module.weight.device, dtype=module.weight.dtype)\n\n yield # wait for context manager exit\n\n finally:\n with torch.no_grad():\n for module_key, weight in original_weights.items():\n model.get_submodule(module_key).weight.copy_(weight)\n\n @classmethod\n @contextmanager\n def apply_ti(\n cls,\n tokenizer: CLIPTokenizer,\n text_encoder: CLIPTextModel,\n ti_list: List[Tuple[str, Any]],\n ) -> Tuple[CLIPTokenizer, TextualInversionManager]:\n init_tokens_count = None\n new_tokens_added = None\n\n try:\n ti_tokenizer = copy.deepcopy(tokenizer)\n ti_manager = TextualInversionManager(ti_tokenizer)\n init_tokens_count = text_encoder.resize_token_embeddings(None).num_embeddings\n\n def _get_trigger(ti_name, index):\n trigger = ti_name\n if index > 0:\n trigger += f\"-!pad-{i}\"\n return f\"<{trigger}>\"\n\n # modify tokenizer\n new_tokens_added = 0\n for ti_name, ti in ti_list:\n for i in range(ti.embedding.shape[0]):\n new_tokens_added += ti_tokenizer.add_tokens(_get_trigger(ti_name, i))\n\n # modify text_encoder\n text_encoder.resize_token_embeddings(init_tokens_count + new_tokens_added)\n model_embeddings = text_encoder.get_input_embeddings()\n\n for ti_name, ti in ti_list:\n ti_tokens = []\n for i in range(ti.embedding.shape[0]):\n embedding = ti.embedding[i]\n trigger = _get_trigger(ti_name, i)\n\n token_id = ti_tokenizer.convert_tokens_to_ids(trigger)\n if token_id == ti_tokenizer.unk_token_id:\n raise RuntimeError(f\"Unable to find token id for token '{trigger}'\")\n\n if model_embeddings.weight.data[token_id].shape != embedding.shape:\n raise ValueError(\n f\"Cannot load embedding for {trigger}. It was trained on a model with token dimension {embedding.shape[0]}, but the current model has token dimension {model_embeddings.weight.data[token_id].shape[0]}.\"\n )\n\n model_embeddings.weight.data[token_id] = embedding.to(\n device=text_encoder.device, dtype=text_encoder.dtype\n )\n ti_tokens.append(token_id)\n\n if len(ti_tokens) > 1:\n ti_manager.pad_tokens[ti_tokens[0]] = ti_tokens[1:]\n\n yield ti_tokenizer, ti_manager\n\n finally:\n if init_tokens_count and new_tokens_added:\n text_encoder.resize_token_embeddings(init_tokens_count)\n\n @classmethod\n @contextmanager\n def apply_clip_skip(\n cls,\n text_encoder: CLIPTextModel,\n clip_skip: int,\n ):\n skipped_layers = []\n try:\n for i in range(clip_skip):\n skipped_layers.append(text_encoder.text_model.encoder.layers.pop(-1))\n\n yield\n\n finally:\n while len(skipped_layers) > 0:\n text_encoder.text_model.encoder.layers.append(skipped_layers.pop())\n\n\nclass TextualInversionModel:\n embedding: torch.Tensor # [n, 768]|[n, 1280]\n\n @classmethod\n def from_checkpoint(\n cls,\n file_path: Union[str, Path],\n device: Optional[torch.device] = None,\n dtype: Optional[torch.dtype] = None,\n ):\n if not isinstance(file_path, Path):\n file_path = Path(file_path)\n\n result = cls() # TODO:\n\n if file_path.suffix == \".safetensors\":\n state_dict = load_file(file_path.absolute().as_posix(), device=\"cpu\")\n else:\n state_dict = torch.load(file_path, map_location=\"cpu\")\n\n # both v1 and v2 format embeddings\n # difference mostly in metadata\n if \"string_to_param\" in state_dict:\n if len(state_dict[\"string_to_param\"]) > 1:\n print(\n f'Warn: Embedding \"{file_path.name}\" contains multiple tokens, which is not supported. The first token will be used.'\n )\n\n result.embedding = next(iter(state_dict[\"string_to_param\"].values()))\n\n # v3 (easynegative)\n elif \"emb_params\" in state_dict:\n result.embedding = state_dict[\"emb_params\"]\n\n # v4(diffusers bin files)\n else:\n result.embedding = next(iter(state_dict.values()))\n\n if len(result.embedding.shape) == 1:\n result.embedding = result.embedding.unsqueeze(0)\n\n if not isinstance(result.embedding, torch.Tensor):\n raise ValueError(f\"Invalid embeddings file: {file_path.name}\")\n\n return result\n\n\nclass TextualInversionManager(BaseTextualInversionManager):\n pad_tokens: Dict[int, List[int]]\n tokenizer: CLIPTokenizer\n\n def __init__(self, tokenizer: CLIPTokenizer):\n self.pad_tokens = dict()\n self.tokenizer = tokenizer\n\n def expand_textual_inversion_token_ids_if_necessary(self, token_ids: list[int]) -> list[int]:\n if len(self.pad_tokens) == 0:\n return token_ids\n\n if token_ids[0] == self.tokenizer.bos_token_id:\n raise ValueError(\"token_ids must not start with bos_token_id\")\n if token_ids[-1] == self.tokenizer.eos_token_id:\n raise ValueError(\"token_ids must not end with eos_token_id\")\n\n new_token_ids = []\n for token_id in token_ids:\n new_token_ids.append(token_id)\n if token_id in self.pad_tokens:\n new_token_ids.extend(self.pad_tokens[token_id])\n\n return new_token_ids\n\n\nclass ONNXModelPatcher:\n from .models.base import IAIOnnxRuntimeModel\n from diffusers import OnnxRuntimeModel\n\n @classmethod\n @contextmanager\n def apply_lora_unet(\n cls,\n unet: OnnxRuntimeModel,\n loras: List[Tuple[LoRAModel, float]],\n ):\n with cls.apply_lora(unet, loras, \"lora_unet_\"):\n yield\n\n @classmethod\n @contextmanager\n def apply_lora_text_encoder(\n cls,\n text_encoder: OnnxRuntimeModel,\n loras: List[Tuple[LoRAModel, float]],\n ):\n with cls.apply_lora(text_encoder, loras, \"lora_te_\"):\n yield\n\n # based on\n # https://github.com/ssube/onnx-web/blob/ca2e436f0623e18b4cfe8a0363fcfcf10508acf7/api/onnx_web/convert/diffusion/lora.py#L323\n @classmethod\n @contextmanager\n def apply_lora(\n cls,\n model: IAIOnnxRuntimeModel,\n loras: List[Tuple[LoRAModel, float]],\n prefix: str,\n ):\n from .models.base import IAIOnnxRuntimeModel\n\n if not isinstance(model, IAIOnnxRuntimeModel):\n raise Exception(\"Only IAIOnnxRuntimeModel models supported\")\n\n orig_weights = dict()\n\n try:\n blended_loras = dict()\n\n for lora, lora_weight in loras:\n for layer_key, layer in lora.layers.items():\n if not layer_key.startswith(prefix):\n continue\n\n layer.to(dtype=torch.float32)\n layer_key = layer_key.replace(prefix, \"\")\n # TODO: rewrite to pass original tensor weight(required by ia3)\n layer_weight = layer.get_weight(None).detach().cpu().numpy() * lora_weight\n if layer_key is blended_loras:\n blended_loras[layer_key] += layer_weight\n else:\n blended_loras[layer_key] = layer_weight\n\n node_names = dict()\n for node in model.nodes.values():\n node_names[node.name.replace(\"/\", \"_\").replace(\".\", \"_\").lstrip(\"_\")] = node.name\n\n for layer_key, lora_weight in blended_loras.items():\n conv_key = layer_key + \"_Conv\"\n gemm_key = layer_key + \"_Gemm\"\n matmul_key = layer_key + \"_MatMul\"\n\n if conv_key in node_names or gemm_key in node_names:\n if conv_key in node_names:\n conv_node = model.nodes[node_names[conv_key]]\n else:\n conv_node = model.nodes[node_names[gemm_key]]\n\n weight_name = [n for n in conv_node.input if \".weight\" in n][0]\n orig_weight = model.tensors[weight_name]\n\n if orig_weight.shape[-2:] == (1, 1):\n if lora_weight.shape[-2:] == (1, 1):\n new_weight = orig_weight.squeeze((3, 2)) + lora_weight.squeeze((3, 2))\n else:\n new_weight = orig_weight.squeeze((3, 2)) + lora_weight\n\n new_weight = np.expand_dims(new_weight, (2, 3))\n else:\n if orig_weight.shape != lora_weight.shape:\n new_weight = orig_weight + lora_weight.reshape(orig_weight.shape)\n else:\n new_weight = orig_weight + lora_weight\n\n orig_weights[weight_name] = orig_weight\n model.tensors[weight_name] = new_weight.astype(orig_weight.dtype)\n\n elif matmul_key in node_names:\n weight_node = model.nodes[node_names[matmul_key]]\n matmul_name = [n for n in weight_node.input if \"MatMul\" in n][0]\n\n orig_weight = model.tensors[matmul_name]\n new_weight = orig_weight + lora_weight.transpose()\n\n orig_weights[matmul_name] = orig_weight\n model.tensors[matmul_name] = new_weight.astype(orig_weight.dtype)\n\n else:\n # warn? err?\n pass\n\n yield\n\n finally:\n # restore original weights\n for name, orig_weight in orig_weights.items():\n model.tensors[name] = orig_weight\n\n @classmethod\n @contextmanager\n def apply_ti(\n cls,\n tokenizer: CLIPTokenizer,\n text_encoder: IAIOnnxRuntimeModel,\n ti_list: List[Tuple[str, Any]],\n ) -> Tuple[CLIPTokenizer, TextualInversionManager]:\n from .models.base import IAIOnnxRuntimeModel\n\n if not isinstance(text_encoder, IAIOnnxRuntimeModel):\n raise Exception(\"Only IAIOnnxRuntimeModel models supported\")\n\n orig_embeddings = None\n\n try:\n ti_tokenizer = copy.deepcopy(tokenizer)\n ti_manager = TextualInversionManager(ti_tokenizer)\n\n def _get_trigger(ti_name, index):\n trigger = ti_name\n if index > 0:\n trigger += f\"-!pad-{i}\"\n return f\"<{trigger}>\"\n\n # modify tokenizer\n new_tokens_added = 0\n for ti_name, ti in ti_list:\n for i in range(ti.embedding.shape[0]):\n new_tokens_added += ti_tokenizer.add_tokens(_get_trigger(ti_name, i))\n\n # modify text_encoder\n orig_embeddings = text_encoder.tensors[\"text_model.embeddings.token_embedding.weight\"]\n\n embeddings = np.concatenate(\n (np.copy(orig_embeddings), np.zeros((new_tokens_added, orig_embeddings.shape[1]))),\n axis=0,\n )\n\n for ti_name, ti in ti_list:\n ti_tokens = []\n for i in range(ti.embedding.shape[0]):\n embedding = ti.embedding[i].detach().numpy()\n trigger = _get_trigger(ti_name, i)\n\n token_id = ti_tokenizer.convert_tokens_to_ids(trigger)\n if token_id == ti_tokenizer.unk_token_id:\n raise RuntimeError(f\"Unable to find token id for token '{trigger}'\")\n\n if embeddings[token_id].shape != embedding.shape:\n raise ValueError(\n f\"Cannot load embedding for {trigger}. It was trained on a model with token dimension {embedding.shape[0]}, but the current model has token dimension {embeddings[token_id].shape[0]}.\"\n )\n\n embeddings[token_id] = embedding\n ti_tokens.append(token_id)\n\n if len(ti_tokens) > 1:\n ti_manager.pad_tokens[ti_tokens[0]] = ti_tokens[1:]\n\n text_encoder.tensors[\"text_model.embeddings.token_embedding.weight\"] = embeddings.astype(\n orig_embeddings.dtype\n )\n\n yield ti_tokenizer, ti_manager\n\n finally:\n # restore\n if orig_embeddings is not None:\n text_encoder.tensors[\"text_model.embeddings.token_embedding.weight\"] = orig_embeddings\n","sub_path":"invokeai/backend/model_management/lora.py","file_name":"lora.py","file_ext":"py","file_size_in_byte":17468,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"181554956","text":"from gensim.models import Word2Vec\nimport pandas as pd\nimport re as re\nimport numpy as np\nimport random as random\nimport matplotlib.pyplot as plt\n\nsize_vec = 100\n\n#read data from a file 'tweets'\ndef read_data(file_name = 'tweets.csv'):\n data = pd.read_csv(file_name)\n texts = data['text'].to_list()\n sentiment = data['airline_sentiment'].map({'positive': 1, 'negative': 0 , 'neutral': 2}).to_list()\n return texts , sentiment\n\n#Data tokenization of tweets\ndef re_sub_data(data_text):\n sentences = []\n for text in data_text:\n tokens = re.sub(r\"[^a-z0-9]+\",\" \" , text.lower()).split()\n sentences.append(tokens)\n return sentences\n\n#Run Word2Vec model in tweets data\ndef build_Word2Vec(sentences):\n model = Word2Vec(sentences , size= size_vec , window = 5 , min_count = 5 , workers= 4 , sg = 0 )\n return model \n\n#get vocab stored in word2Vec model\ndef get_vocab_list(model):\n vocab_list = np.array(list(model.wv.vocab.keys()))\n return vocab_list\n\n#get average of words in certain sentence\ndef get_average_sentence(sentence , model):\n vocabs = get_vocab_list(model)\n words = np.intersect1d(sentence , vocabs)\n\n if words.shape[0] > 0:\n words_add = np.sum(model.wv[words] , axis=0)\n average = np.divide(words_add , float(len(sentence)))\n return average\n else:\n return np.zeros(size_vec).tolist()\n \n#get average of all Sentences \ndef get_average_sentences(all_sentences , model , sentiment):\n all_avg = list()\n index = 0 \n for sen in all_sentences:\n sen_avg = get_average_sentence(sen , model)\n all_avg.append( (sen_avg , sentiment[index]) )\n index += 1\n return all_avg\n\n#get sum of words in a sentence\ndef get_sum_sentence(sentence , model):\n vocabs = get_vocab_list(model)\n words = np.intersect1d(sentence , vocabs)\n\n if words.shape[0] > 0:\n words_add = np.sum(model.wv[words] , axis=0)\n return words_add\n else:\n return np.zeros(size_vec).tolist()\n\n#get sum of all sentences \ndef get_sum_sentences(all_sentences , model , sentiment):\n all_sum = list()\n index = 0 \n for sen in all_sentences:\n sen_sum = get_sum_sentence(sen , model)\n all_sum.append( (sen_sum , sentiment[index]) )\n index += 1\n return all_sum\n\n#dividing data to train and test\ndef divide_data(data):\n random.shuffle(data) \n train_data = data[:int(len(data)*0.8)]\n test_data = data[int(len(train_data)):int(len(data)*1)]\n return train_data , test_data\n\n#split data to data(x) and labels (y)\ndef split_data(X_Y_data):\n X = [row[0] for row in X_Y_data]\n Y = [row[1] for row in X_Y_data]\n return X , Y\n\n#initiate Data to train and test after splitting\ndef init_data(data):\n train_data , test_data = divide_data(data)\n train_X , train_Y = split_data(train_data)\n test_X , test_Y = split_data(test_data)\n return train_X , train_Y , test_X , test_Y\n\n#sorting Sentiment by 0,1,2\ndef sort_by_sentiment(sentences):\n sentences.sort(key = lambda x:x[1])\n return sentences\n\n#Ploting statistics of Negative, Positive, Neutral \ndef plot_data_statistcs(texts , sentiment):\n d0 = list(item for item in sentiment).count(0)\n d1 = list(item for item in sentiment).count(1)\n d2 = list(item for item in sentiment).count(2)\n objects = ('negative', 'positive', 'neutral')\n performance = [d0 , d1 , d2]\n plt.ylim([0 , d0+d1+d2])\n plt.bar(objects, performance, align='center', alpha=0.5)\n plt.xticks(objects, objects)\n plt.ylabel('data')\n plt.title('data statistics')\n plt.show()\n \ntexts , sentiment = read_data() \nplot_data_statistcs(texts , sentiment)","sub_path":"Sentiment_Analysis.py","file_name":"Sentiment_Analysis.py","file_ext":"py","file_size_in_byte":3663,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"147183333","text":"import tensorflow as tf\nimport numpy as np\nimport os\nimport argparse\nimport pickle\nimport time\nimport json\n\n\nCLASS_NAMES = np.array(['badger', 'bird', 'bobcat', 'car', 'cat', 'coyote', 'deer', 'dog', \n\t\t\t\t\t\t'fox', 'insect', 'lizard', 'mountain_lion', 'opossum', 'rabbit',\n\t\t\t\t\t\t'raccoon', 'rodent', 'skunk', 'squirrel',\n\t\t\t\t\t ])\nIMG_HEIGHT = 160\nIMG_WIDTH = 160\nAUTOTUNE = tf.data.experimental.AUTOTUNE\nBATCH_SIZE = 32\nSHUFFLE_BUFFER_SIZE = 1000\nLEARNING_RATE = 0.0001\nTRAINING_EPOCHS = 10\n\n\ndef process_path(file_path):\n\tparts = tf.strings.split(file_path, os.path.sep)\n\tlabel = parts[-2] == CLASS_NAMES\n\timg = tf.io.read_file(file_path)\n\timg = tf.image.decode_jpeg(img, channels=3)\n\timg = tf.image.convert_image_dtype(img, tf.float32)\n\timg = tf.image.resize(img, [IMG_WIDTH, IMG_HEIGHT])\n\treturn img, label\n\n\ndef build_dataset(file_pattern):\n\tds = tf.data.Dataset.list_files(file_pattern)\n\tds = ds.map(process_path, num_parallel_calls=AUTOTUNE)\n\tds = ds.shuffle(buffer_size=SHUFFLE_BUFFER_SIZE)\n\tds = ds.batch(BATCH_SIZE)\n\tds = ds.prefetch(buffer_size=AUTOTUNE)\n\treturn ds\n\n\nif __name__ == '__main__':\n\tparser = argparse.ArgumentParser()\n\tparser.add_argument('--layers', help='number of layers to tune',\n type=int, default=0)\n\tparser.add_argument('--epochs', help='number of epochs to train',\n type=int, default=10)\n\tparser.add_argument('--rate', help='learning rate',\n type=float, default=0.0001)\n\targs = parser.parse_args()\n \n\trun_id = int(time.time())\n \n\ttrain = build_dataset('train/*/*.jpg')\n\ttest = build_dataset('test/*/*.jpg')\n\tIMG_SHAPE = (160, 160, 3)\n\tbase_model = tf.keras.applications.inception_v3.InceptionV3(input_shape=IMG_SHAPE,\n\t\t\t\t\t\t\t\t\t\t\t\t\tinclude_top=False,\n\t\t\t\t\t\t\t\t\t\t\t\t\tweights='imagenet')\n\tbase_model.summary()\n\tbase_model.trainable = True\n\tfor layer in base_model.layers[:len(base_model.layers) - args.layers]:\n\t\tlayer.trainable = False\n\tmodel = tf.keras.models.Sequential([\n\t\tbase_model,\n\t\ttf.keras.layers.GlobalAveragePooling2D(),\n\t\ttf.keras.layers.Dense(CLASS_NAMES.shape[0]),\n\t\ttf.keras.layers.Activation('softmax'),\n\t])\n\tmodel.compile(\n\t\toptimizer=tf.keras.optimizers.RMSprop(lr=args.rate),\n\t\tloss=tf.keras.losses.CategoricalCrossentropy(from_logits=True),\n\t\tmetrics=['accuracy']\n\t)\n\tmodel.summary()\n\thistory = model.fit(train,\n\t\t\t\t\t\tepochs=args.epochs,\n\t\t\t\t\t\tvalidation_data=train)\n\n\twith open('config_%s.json' % run_id, 'w') as fh:\n\t\tjson.dump({\n 'layers': args.layers,\n 'epochs': args.epochs,\n 'rate': args.rate,\n 'history': {\n k: [float(e) for e in v]\n for k, v in history.history.items()\n },\n }, fh)\n \n\tif not os.path.exists('classifier'):\n\t\tos.mkdir('classifier')\n\ttf.saved_model.save(model, 'classifier/')\n\n","sub_path":"train_caltech_classifier.py","file_name":"train_caltech_classifier.py","file_ext":"py","file_size_in_byte":2816,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"460908831","text":"# -*- coding: utf-8 -*-\n#!/usr/bin/env python\n\"\"\"\nModule containing ProxyTool class.\n\"\"\"\nimport asyncio\nimport datetime\nimport itertools\nimport logging\nimport pyppeteer\nimport re\nimport requests\nimport socket\nimport time\nimport yarl\n# Proxytools\nfrom .page import Page\nfrom .proxy import Proxy\n\n# Module vars\n_logger = logging.getLogger(__name__)\n\n\nclass TaskTimeout(Exception):\n \"\"\" Task Timeout Exception \"\"\"\n pass\n\nclass TaskError(Exception):\n \"\"\" Generic Task Exception \"\"\"\n pass\n\nclass ProxyToolError(Exception):\n \"\"\"\n Generic Proxytools exception\n \"\"\"\n pass\n\n\nclass Client:\n \"\"\"\n Proxytools client.\n\n The is the main entry point for proxytools.\n \"\"\"\n def __init__(self, debug=False):\n self.loop = asyncio.get_event_loop()\n self.geoip_url = yarl.URL('http://ip-api.com/json/')\n self.whois_server = 'whois.apnic.net'\n self.debug = debug\n self.loop.set_debug(self.debug)\n\n def _chunker(self, iterable, n, fillvalue=None):\n \"\"\"\n Split `iterable` into chunks of size `n`.\n\n :param iterable: iterator to chunk\n :param n: chunk size\n :param fillvalue: use value as padding\n\n :type iterable: list\n :type n: int\n :type fillvalue: str or int\n\n :returns: iterator\n \"\"\"\n args = [iter(iterable)] * n\n return itertools.zip_longest(*args, fillvalue=fillvalue)\n\n def detect_cloudflare(self, html):\n \"\"\"\n Return True if html is cloudflare.\n \"\"\"\n pattern = '.*Checking your browser before accessing.*'\n if re.search(pattern, html):\n return True\n else:\n return False\n\n async def _async_get_pages(self, urls, tab_concurrency=10, headless=True,\n timeout=10, bin_path=None, chrome_args=[]):\n \"\"\"\n Asynchronously get pages from `urls` using chromium.\n\n :param urls: URLs to get\n :param tab_concurrency: max concurrent chromium tabs\n :param headless: use chrome in headless mode\n :param bin_path: path to chrome executable\n :param chrome_args: headless chrome args\n\n :type urls: list\n :type tab_concurrency: int\n :type headless: bool\n :type bin_path: str\n :type chrome_args: list\n\n :returns: list\n \"\"\"\n kwargs = {\n 'headless': headless,\n 'args': chrome_args\n }\n if bin_path:\n kwargs['executablePath'] = bin_path\n browser = await pyppeteer.launch(kwargs)\n # browser = await pyppeteer.launch({'headless': headless})\n pages = []\n # Create incognito tab\n context = await browser.createIncognitoBrowserContext()\n for chunk in self._chunker(urls, tab_concurrency):\n new_pages = await asyncio.gather(\n *[self.get_page(url, context, timeout=timeout) for url in chunk if url],\n return_exceptions=True)\n pages.extend(new_pages)\n\n # Cleanup\n try:\n await context.close()\n except:\n pass\n\n try:\n await browser.close()\n except:\n pass\n\n return pages\n\n async def _async_get_source_urls(self, num=10, headless=True, bin_path=None, chrome_args=[]):\n \"\"\"\n Scrape proxy sources from Google.\n\n :param num: number of results to fetch [1-100]\n :param headless: use chrome in headless mode\n :param bin_path: path to chrome executable\n :param chrome_args: headless chrome args\n\n :type num: int\n :type headless: bool\n :type bin_path: str\n :type chrome_args: list\n\n :returns: list\n \"\"\"\n if num < 1 or num > 100:\n raise ValueError('source `num` must be between 1-100]')\n\n urls = []\n kwargs = {\n 'headless': headless,\n 'args': chrome_args\n }\n if bin_path:\n kwargs['executablePath'] = bin_path\n browser = await pyppeteer.launch(kwargs)\n # browser = await pyppeteer.launch({'headless': headless})\n # Create incognito tab\n context = await browser.createIncognitoBrowserContext()\n tab = await context.newPage()\n await tab._client.send('Emulation.clearDeviceMetricsOverride');\n await tab.goto('https://www.google.com/search?q=free+proxy+list&gws_rd=cr&num={}'.format(num))\n results = await tab.querySelectorAll('div.srg div.r ')\n for result in results:\n link = await result.querySelector('a')\n prop = await link.getProperty('href')\n url = await prop.jsonValue()\n urls.append(url)\n\n # Cleanup\n try:\n await tab.close()\n except:\n pass\n\n try:\n await context.close()\n except:\n pass\n\n try:\n await browser.close()\n except:\n pass\n\n return urls\n\n async def _async_test_proxy(self,\n proxy,\n url,\n headless=True,\n timeout=10,\n bin_path=None,\n chrome_args=[],\n selector=None):\n \"\"\"\n Test `proxy` by attempting to load `url'.\n\n :param proxy: The proxy to test\n :param url: the URL to test against\n :param selector: css selector used to verify page load\n :param headless: run chrome headless mode\n :param timeout: the async task timeout\n :param bin_path: path to chrome executable\n :param chrome_args: headless chromium args\n\n :type proxy: proxytools.Proxy\n :type url: yarl.URL\n :type selector: str\n :type headless: bool\n :type timeout: int\n :type bin_path: str\n :type chrome_args: list\n\n :returns: dict\n \"\"\"\n chrome_args.append('--proxy-server=http={}'.format(str(proxy)))\n chrome_args.append('--proxy-server=https={}'.format(str(proxy)))\n\n kwargs = {\n 'headless': headless,\n 'args': chrome_args\n }\n\n if bin_path:\n kwargs['executablePath'] = bin_path\n\n browser = await pyppeteer.launch(kwargs)\n\n # Create incognito tab\n context = await browser.createIncognitoBrowserContext()\n try:\n page = await self.get_page(url, context, timeout=timeout, selector=selector)\n status = 'OK'\n except Exception as e:\n status = str(e)\n\n # Cleanup\n try:\n await context.close()\n except:\n pass\n\n try:\n await browser.close()\n except:\n pass\n\n return {'proxy': str(proxy), 'status': status}\n\n async def _async_test_proxies(self,\n proxies,\n url,\n headless=True,\n timeout=10,\n browser_concurrency=1,\n exit_success_count=None,\n selector=None,\n bin_path=None,\n chrome_args=[]):\n \"\"\"\n Test `proxies` by attempting to load `url' and awaiting `selector`.\n\n :param proxies: list of proxies\n :param url: the URL to test the proxies against\n :param headless: run chrome headless mode\n :param timeout: seconds to wait before quitting each test\n :param browser_concurrency: max concurrent chromium tabs\n :param selector: css selector used to verify page load\n :param exit_success_count: exit when number of working proxies is reached\n :param bin_path: path to chrome executable\n :param chrome_args: headless chromium args\n\n :type proxies: list of proxytools.Proxy\n :type url: yarl.URL\n :type headless: bool\n :type timeout: int\n :type browser_concurrency: int\n :type selector: str\n :type exit_success_count: int\n :type bin_path: str\n :type chrome_args: list\n\n :returns: dict\n \"\"\"\n results = []\n count = 0\n status_ok_count = 0\n start_ts = datetime.datetime.now()\n for chunk in self._chunker(proxies, browser_concurrency):\n n_results = await asyncio.gather(\n *[self._async_test_proxy(\n proxy, url, headless=headless,\n timeout=timeout, selector=selector, bin_path=bin_path, chrome_args=chrome_args) for proxy in chunk],\n return_exceptions=True)\n count += len(chunk)\n minutes = round((datetime.datetime.now() - start_ts).seconds / 60, 2)\n _logger.info('Tested {} of {} proxies in {} minutes'\n .format(count, len(proxies), minutes))\n for result in n_results:\n results.append(result)\n if isinstance(result, dict):\n if result['status'] == 'OK':\n status_ok_count += 1\n if exit_success_count is not None:\n if status_ok_count == exit_success_count:\n return results\n # results.extend(n_results)\n return results\n\n async def get_page(self, url, context, timeout=10, selector=None):\n \"\"\"\n Asynchronously fetch page from `url` using chromium\n browser `context`.\n\n :param url: the page URL\n :param context: pyppeteer browser context\n :param timeout: seconds to wait before quiting\n :param selector: css selector used to verify page load\n\n :type url: yarl.URL\n :type context: pyppeteer.browser.BrowserContext\n :type timeout: int\n :type selector: str\n\n :returns: Page\n :raises: TaskTimeout\n \"\"\"\n tab = await context.newPage()\n # Fix viewport\n await tab._client.send('Emulation.clearDeviceMetricsOverride');\n _logger.info('Fetching {}'.format(url))\n # Get page html\n # Proxy timeouts don't seem to respect load_timeout, so enforce it with asyncio\n try:\n resp = await asyncio.wait_for(tab.goto(str(url), timeout=timeout*1000), timeout=timeout)\n except asyncio.TimeoutError:\n _logger.warning('Timed out fetching: {}'.format(str(url)))\n raise TaskTimeout('Navigation timed out')\n except Exception as e:\n raise TaskError(str(e))\n\n # Handle cloudlflare\n html = await resp.text()\n\n # Needs work\n # if self.detect_cloudflare(html):\n # _logger.info('Cloudflare detected - awaiting navigation')\n # await asyncio.sleep(12)\n # try:\n # resp = await asyncio.wait_for(tab.reload(), timeout=timeout)\n # except asyncio.TimeoutError:\n # _logger.warning('Timed out fetching: {}'.format(str(url)))\n # raise TaskTimeout('Navigation timed out')\n # except Exception as e:\n # raise TaskError(str(e))\n\n _logger.info('Got {}'.format(str(url)))\n if selector:\n await tab.waitForSelector(selector, timeout=timeout*1000)\n html = await resp.text()\n # Close page tab\n try:\n await tab.close()\n except:\n pass\n page = Page(url=url, html=html)\n return page\n\n def get_pages(self, urls, timeout=10, tab_concurrency=10, headless=True, bin_path=None, chrome_args=[]):\n \"\"\"\n Get pages from `urls` using chromium browser.\n\n Uses async functions to fetch the pages in concurrent browser\n tabs.\n\n :param urls: list of URL strings\n :param bin_path: path to chrome executable\n :param chrome_args: headless chromium args\n :param tab_concurrency: max concurrent chromium tabs\n\n :type urls: list\n :type bin_path: str\n :type chrome_args: list\n :type tab_concurrency: int\n\n :type urls: list\n :type bin_path: str\n\n :returns: proxytools.page.Page\n \"\"\"\n # Convert url strings in to yarl.URLs\n urls = [yarl.URL(url) for url in urls]\n results = self.loop.run_until_complete(\n self._async_get_pages(urls,\n timeout=timeout,\n headless=headless,\n bin_path=bin_path,\n tab_concurrency=tab_concurrency,\n chrome_args=chrome_args))\n pages = []\n for result in results:\n if isinstance(result, Page):\n pages.append(result)\n else:\n _logger.warning(result)\n\n return pages\n\n def get_source_urls(self, headless=True, num=10, bin_path=None, chrome_args=[]):\n \"\"\"\n Search Google for URLs containing free proxy lists.\n\n :param num: number of proxy sources to get from Google\n :param headless: run chrome headless mode\n :param bin_path: path to chrome executable\n :param chrome_args: headless chromium args\n\n :type num: int\n :type headless: bool\n :type bin_path: str\n :type chrome_args: list\n\n :returns: list\n \"\"\"\n _logger.info('Searching Google for proxy sources..')\n return self.loop.run_until_complete(\n self._async_get_source_urls(headless=headless, num=num, bin_path=bin_path, chrome_args=chrome_args))\n\n def get_pages_with_proxies(self, source_num=10, headless=True, tab_concurrency=10, bin_path=None, chrome_args=[]):\n \"\"\"\n Scrape the web for pages containing proxies.\n\n :param source_num: number of proxy sources to get from Google\n :param headless: run chrome headless mode\n :param bin_path: path to chrome executable\n :param chrome_args: headless chromium args\n :param tab_concurrency: max concurrent chromium tabs\n\n :type source_num: int\n :type headless: bool\n :type bin_path: str\n :type chrome_args: list\n :type tab_concurrency: int\n\n :returns: list\n \"\"\"\n urls = self.get_source_urls(num=source_num, headless=headless, bin_path=bin_path, chrome_args=chrome_args)\n _logger.info('Found {} source URLs'.format(len(urls)))\n pages = self.get_pages(urls, headless=headless, tab_concurrency=tab_concurrency, bin_path=bin_path, chrome_args=chrome_args)\n _logger.info('Downloaded {} pages'.format(len(pages)))\n proxy_pages = [page for page in pages if page.contains_ips()]\n _logger.info('Found {} pages containing proxies'.format(len(pages)))\n return proxy_pages\n\n def search_proxies(self, source_num=10, tab_concurrency=10, headless=True, bin_path=None, chrome_args=[]):\n \"\"\"\n Scrape the web for proxies.\n\n :param source_num: number of proxy sources to get from Google\n :param headless: run chrome headless mode\n :param bin_path: path to chrome executable\n :param chrome_args: headless chromium args\n :param tab_concurrency: max concurrent chromium tabs\n\n :type source_num: int\n :type headless: bool\n :type bin_path: str\n :type chrome_args: list\n :type tab_concurrency: int\n\n :returns: list\n \"\"\"\n proxies = []\n proxy_pages = self.get_pages_with_proxies(source_num=source_num,\n headless=headless,\n tab_concurrency=tab_concurrency,\n bin_path=bin_path,\n chrome_args=chrome_args)\n for page in proxy_pages:\n proxies.extend(page.proxies())\n _logger.info('Scraped {} proxies'.format(len(proxies)))\n return proxies\n\n def test_proxies(self, proxies, url, timeout=10,\n selector=None, headless=True, browser_concurrency=2,\n exit_success_count=None, bin_path=None, chrome_args=[]):\n \"\"\"\n Test proxies can load page at `url`.\n\n :param proxies: list of proxies\n :param url: the URL to test the proxies against\n :param headless: run chrome headless mode\n :param timeout: seconds to wait before quitting each test\n :param browser_concurrency: max concurrent chromium browsers\n :param selector: css selector used to verify page load\n :param exit_success_count: exit when number of working proxies is reached\n :param bin_path: path to chrome executable\n :param chrome_args: headless chromium args\n\n :type proxies: list of proxytools.Proxy\n :type url: yarl.URL\n :type headless: bool\n :type timeout: int\n :type browser_concurrency: int\n :type selector: str\n :type exit_success_count: int\n :type bin_path: str\n :type chrome_args: list\n\n :returns: dict\n \"\"\"\n return self.loop.run_until_complete(\n self._async_test_proxies(proxies,\n url,\n timeout=timeout,\n browser_concurrency=browser_concurrency,\n selector=selector,\n exit_success_count=exit_success_count,\n headless=headless,\n bin_path=bin_path,\n chrome_args=chrome_args))\n\n def get_proxies(self, test_url, limit=10, timeout=10,\n selector=None, headless=True, browser_concurrency=2,\n tab_concurrency=10, source_num=10,\n bin_path=None, chrome_args=[]):\n \"\"\"\n Scrape the web for working proxies.\n Test proxies can load `test_url`.\n\n :param proxies: list of proxies\n :param test_url: the URL to test the proxies against\n :param headless: run chrome headless mode\n :param timeout: seconds to wait before quitting each test\n :param browser_concurrency: max number of concurrent chromium browsers\n :param tab_concurrency: max number of concurrent chromium tabs\n :param selector: css selector used to verify proxy is working\n :param source_num: number of proxy sources to get from Google\n :param bin_path: path to chrome executable\n :param chrome_args: headless chromium args\n\n :type proxies: list of proxytools.Proxy\n :type test_url: yarl.URL\n :type headless: bool\n :type timeout: int\n :type browser_concurrency: int\n :type tab_concurrency: int\n :type selector: str\n :type source_num: int\n :type bin_path: str\n :type chrome_args: list\n\n :returns: dict\n \"\"\"\n proxies = self.search_proxies(source_num=source_num,\n headless=headless,\n tab_concurrency=tab_concurrency,\n bin_path=bin_path,\n chrome_args=chrome_args)\n\n results = self.test_proxies(proxies,\n test_url,\n headless=headless,\n browser_concurrency=browser_concurrency,\n selector=selector,\n exit_success_count=limit,\n bin_path=bin_path,\n chrome_args=chrome_args)\n proxies = [r for r in results if r['status'] == 'OK']\n return proxies[0:limit]\n\n def get_geography(self, proxies):\n \"\"\"\n Get geographic location of `proxies`.\n\n :param proxies: list of proxy URLs\n :type proxies: list\n :returns: dict\n \"\"\"\n results = {}\n for p in proxies:\n proxy = Proxy.from_string(p)\n country = proxy.country()\n results[p] = country\n time.sleep(1)\n\n return results\n","sub_path":"proxytools/client.py","file_name":"client.py","file_ext":"py","file_size_in_byte":20380,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"230325527","text":"import re\n\n# from reportes.Reportes import RealizarReportes,Error\nfrom Analisis_Ascendente.reportes.Reportes import RealizarReportes, Error\nfrom Analisis_Ascendente.storageManager.jsonMode import *\nfrom tkinter import messagebox as MessageBox\nfrom Analisis_Ascendente.Tabla_simbolos.TablaSimbolos import TablaDeSimbolos\n\nL_errores_lexicos = []\nL_errores_sintacticos = []\nconsola = []\nexceptions = []\nts_global = TablaDeSimbolos({})\nlista_optimizaciones_C3D = []\ncolumna = 0\n\nfrom graphviz import Digraph\n\nvarGramatical = []\nvarSemantico = []\nreservadas = {\n 'smallint': 'SMALLINT',\n 'integer': 'INTEGER',\n 'bigint': 'BIGINT',\n 'decimal': 'DECIMAL',\n 'numeric': 'NUMERIC',\n 'double': 'DOUBLE',\n 'precision': 'PRECISION',\n 'real': 'REAL',\n 'money': 'MONEY',\n 'text': 'TEXT',\n 'varying': 'VARYING',\n 'varchar': 'VARCHAR',\n 'character': 'CHARACTER',\n 'char': 'CHAR',\n 'timestamp': 'TIMESTAMP',\n 'date': 'DATE',\n 'time': 'TIME',\n 'interval': 'INTERVAL',\n 'year': 'YEAR',\n 'month': 'MONTH',\n 'day': 'DAY',\n 'hour': 'HOUR',\n 'minute': 'MINUTE',\n 'second': 'SECOND',\n 'to': 'TO',\n 'boolean': 'BOOLEAN',\n 'true': 'TRUE',\n 'false': 'FALSE',\n 'create': 'CREATE',\n 'type': 'TYPE',\n 'as': 'AS',\n 'enum': 'ENUM',\n 'not': 'NOT',\n 'and': 'AND',\n 'or': 'OR',\n 'is': 'IS',\n 'null': 'NULL',\n 'between': 'BETWEEN',\n 'in': 'IN',\n 'ilike': 'ILIKE',\n 'like': 'LIKE',\n 'similar': 'SIMILAR',\n 'table': 'TABLE',\n 'replace': 'REPLACE',\n 'database': 'DATABASE',\n 'databases': 'DATABASES',\n 'show': 'SHOW',\n 'if': 'IF',\n 'exists': 'EXISTS',\n 'alter': 'ALTER',\n 'rename': 'RENAME',\n 'owner': 'OWNER',\n 'mode': 'MODE',\n 'drop': 'DROP',\n 'constraint': 'CONSTRAINT',\n 'unique': 'UNIQUE',\n 'check': 'CHECK',\n 'references': 'REFERENCES',\n 'primary': 'PRIMARY',\n 'key': 'KEY',\n 'foreign': 'FOREIGN',\n 'add': 'ADD',\n 'column': 'COLUMN',\n 'set': 'SET',\n 'select': 'SELECT',\n 'from': 'FROM',\n 'delete': 'DELETE',\n 'where': 'WHERE',\n 'default': 'DEFAULT',\n 'insert': 'INSERT',\n 'into': 'INTO',\n 'values': 'VALUES',\n 'update': 'UPDATE',\n 'count': 'COUNT',\n 'avg': 'AVG',\n 'sum': 'SUM',\n 'distinct': 'DISTINCT',\n 'abs': 'ABS',\n 'cbrt': 'CBRT',\n 'ceil': 'CEIL',\n 'ceiling': 'CEILING',\n 'degrees': 'DEGREES',\n 'div': 'DIV',\n 'exp': 'EXP',\n 'factorial': 'FACTORIAL',\n 'floor': 'FLOOR',\n 'gcd': 'GCD',\n 'lcm': 'LCM',\n 'ln': 'LN',\n 'log': 'LOG',\n 'log10': 'LOG10',\n 'min_scale': 'MIN_SCALE',\n 'mod': 'MOD',\n 'pi': 'PI',\n 'power': 'POWER',\n 'radians': 'RADIANS',\n 'round': 'ROUND',\n 'scale': 'SCALE',\n 'sign': 'SIGN',\n 'sqrt': 'SQRT',\n 'trim_scale': 'TRIM_SCALE',\n 'truc': 'TRUC',\n 'width_bucket': 'WIDTH_BUCKET',\n 'random': 'RANDOM',\n 'setseed': 'SETSEED',\n 'max': 'MAX',\n 'min': 'MIN',\n 'having': 'HAVING',\n 'union': 'UNION',\n 'intersect': 'INTERSECT',\n 'except': 'EXCEPT',\n 'all': 'ALL',\n 'acos': 'ACOS',\n 'acosd': 'ACOSD',\n 'asin': 'ASIN',\n 'asind': 'ASIND',\n 'atan': 'ATAN',\n 'atand': 'ATAND',\n 'atan2': 'ATAN2',\n 'atan2d': 'ATAN2D',\n 'cos': 'COS',\n 'cosd': 'COSD',\n 'cot': 'COT',\n 'cotd': 'COTD',\n 'sin': 'SIN',\n 'sind': 'SIND',\n 'tan': 'TAN',\n 'tand': 'TAND',\n 'sinh': 'SINH',\n 'cosh': 'COSH',\n 'tanh': 'TANH',\n 'asinh': 'ASINH',\n 'acosh': 'ACOSH',\n 'atanh': 'ATANH',\n 'group': 'GROUP',\n 'by': 'BY',\n 'now': 'NOW',\n 'current_date': 'CURRENT_DATE',\n 'current_time': 'CURRENT_TIME',\n 'date_part': 'date_part',\n 'isnull': 'ISNULL',\n 'notnull':'NOTNULL',\n 'unknown': 'UNKNOWN',\n 'extract': 'EXTRACT',\n 'inherits': 'INHERITS',\n 'serial': 'SERIAL',\n 'on': 'ON',\n 'inner': 'INNER',\n 'join': 'JOIN',\n 'left': \"LEFT\",\n 'right': \"RIGHT\",\n 'full': 'FULL',\n 'outer': 'OUTER',\n 'md5': 'MD5',\n 'sing': 'SING',\n 'width_bucket': 'WIDTH_BUCKET',\n 'trunc': 'TRUNC',\n 'length': 'LENGTH',\n 'substring': 'SUBSTRING',\n 'trim': 'TRIM',\n 'sha256': 'SHA256',\n 'substr': 'SUBSTR',\n 'get_byte': 'GET_BYTE',\n 'set_byte': 'SET_BYTE',\n 'convert': 'CONVERT',\n 'encode': 'ENCODE',\n 'decode': 'DECODE',\n 'greatest': 'GREATEST',\n 'least': 'LEAST',\n 'order': 'ORDER',\n 'by': 'BY',\n 'limit': 'LIMIT',\n 'offset': 'OFFSET',\n 'when': 'WHEN',\n 'case': 'CASE',\n 'then': 'THEN',\n 'end': 'END',\n 'use': 'USE',\n 'asc': 'ASC',\n 'desc': 'DESC',\n 'constant':'CONSTANT',\n 'collate':'COLLATE',\n 'anyelement':'ANYELEMENT',\n 'anycompatible':'ANYCOMPATIBLE',\n 'out':'OUT',\n 'alias':'ALIAS',\n 'for':'FOR',\n 'function':'FUNCTION',\n 'returns':'RETURNS',\n 'language':'LANGUAGE',\n 'plpgsql':'PLPGSQL',\n 'declare':'DECLARE',\n 'begin':'BEGIN',\n 'end':'END',\n 'return':'RETURN',\n 'query':'QUERY',\n 'index' : 'INDEX',\n 'hash' : 'HASH',\n 'using' : 'USING',\n 'nulls' : 'NULLS',\n 'first' : 'FIRST',\n 'last' : 'LAST',\n 'lower' : 'LOWER',\n 'procedure': 'PROCEDURE',\n 'call' : 'CALL',\n 'next' : 'NEXT',\n 'else' : 'ELSE',\n 'elsif' : 'ELSIF',\n 'type': 'TYPE',\n 'rowtype': 'ROWTYPE',\n 'record':'RECORD',\n 'strict':'STRICT',\n 'returning':'RETURNING',\n 'inout':'INOUT',\n 'execute' : 'EXECUTE'\n}\n\ntokens = [\n 'PTCOMA',\n 'COMA',\n 'LLIZQ',\n 'LLDR',\n 'PARIZQ',\n 'PARDR',\n 'IGUAL',\n 'MAS',\n 'MENOS',\n 'GNOT',\n 'MULT',\n 'DIVI',\n 'ANDO',\n 'ORO',\n 'NOTO',\n 'MENOR',\n 'MAYOR',\n 'IGUALIGUAL',\n 'NOIGUAL',\n 'NUMDECIMAL',\n 'ENTERO',\n 'CADENA',\n 'ID',\n 'MODU',\n 'PUNTO',\n 'EXPO',\n 'MAYORIGUAL',\n 'MENORIGUAL',\n 'MENMEN',\n 'MAYMAY',\n 'MENMAY',\n 'CRIZQ',\n 'CRDR',\n 'DOSPT',\n 'DOLAR',\n ] + list(reservadas.values())\n\n# Tokens\nt_PTCOMA = r';'\nt_COMA = r','\nt_PARIZQ = r'\\('\nt_PARDR = r'\\)'\nt_IGUAL = r'='\nt_MAS = r'\\+'\nt_MENOS = r'-'\nt_GNOT = r'~'\nt_MULT = r'\\*'\nt_DIVI = r'/'\nt_ANDO = r'\\&'\nt_ORO = r'\\|'\nt_NOTO = r'!'\nt_MENOR = r'<'\nt_MAYOR = r'>'\nt_IGUALIGUAL = r'=='\nt_MAYORIGUAL = r'>='\nt_MENORIGUAL = r'<='\nt_MENMEN = r'<<'\nt_MAYMAY = r'>>'\nt_NOIGUAL = r'!='\nt_MENMAY = r'<>'\nt_MODU = r'%'\nt_PUNTO = r'\\.'\nt_EXPO = r'\\^'\nt_LLIZQ = r'\\{'\nt_LLDR = r'\\}'\nt_CRIZQ = r'\\['\nt_CRDR = r'\\]'\nt_DOSPT = r':'\nt_DOLAR = r'\\$'\n\ndef t_NUMDECIMAL(t):\n r'\\d+\\.\\d+'\n try:\n t.value = float(t.value)\n global columna\n columna = contador_columas(len(str(t.value)))\n except ValueError:\n ##print(\"Valor no es parseable a decimal %d\", t.value)\n t.value = 0\n return t\n\n\ndef t_ENTERO(t):\n r'\\d+'\n try:\n t.value = int(t.value)\n global columna\n columna = contador_columas(len(str(t.value)))\n except ValueError:\n ##print(\"Valor no es parseable a integer %d\", t.value)\n t.value = 0\n return t\n\n\ndef t_ID(t):\n r'[a-zA-Z_][a-zA-Z_0-9_]*'\n t.type = reservadas.get(t.value.lower(), 'ID')\n global columna\n columna = contador_columas(len(str(t.value)))\n return t\n\n\ndef t_CADENA(t):\n r'(\\\".*?\\\")|(\\'.*?\\')'\n t.value = t.value[1:-1] # remuevo las comillas\n global columna\n columna = contador_columas(len(str(t.value)))\n return t\n\n\ndef t_COMENTARIO_MULTILINEA(t):\n r'/\\*(.|\\n)*?\\*/'\n t.lexer.lineno += t.value.count('\\n')\n global columna\n columna = 0\n\n\n# Comentario simple // ...\ndef t_COMENTARIO_SIMPLE(t):\n r'--.*\\n'\n t.lexer.lineno += 1\n global columna\n columna = 0\n\n\n# t_ignore = \" \\t\"\ndef t_IGNORAR(t):\n r'\\ |\\t'\n global columna\n if t.value == '\\t':\n columna = contador_columas(columna + 9)\n else:\n columna = contador_columas(columna)\n\n\ndef t_newline(t):\n r'\\n+'\n t.lexer.lineno += t.value.count(\"\\n\")\n global columna\n columna = 0\n\n\ndef t_error(t):\n global L_errores_lexicos;\n global columna\n\n colum = contador_columas(columna)\n data = Error(str(\"Error Lexico\"), str(t.value[0]), str(t.lexer.lineno), str(colum))\n L_errores_lexicos.append(data)\n ##print(\"Caracter irreconocible! '%s'\" % t.value[0])\n t.lexer.skip(1)\n\n\nimport ply.lex as lex\n\nlexer = lex.lex(reflags=re.IGNORECASE)\nlexer.lineno = 1\nlexer.input(\"\")\n# lex.lex(reflags=re.IGNORECASE)\n\n# from expresion import *\n\n# from Instrucciones.expresion import *\n# from Instrucciones.instruccion import *\n# from Instrucciones.Time import Time\n# from Instrucciones.Create.createTable import CreateTable\n# from Instrucciones.Create.createDatabase import CreateReplace,ComplementoCR\n# from Instrucciones.Select.select import Select, Limit, Having, GroupBy\n# from Instrucciones.Select.union import Union\n# from Instrucciones.Use_Data_Base.useDB import Use\n# from Instrucciones.Select.select1 import selectTime\n\n\nfrom Analisis_Ascendente.Instrucciones.expresion import *\nfrom Analisis_Ascendente.Instrucciones.instruccion import *\nfrom Analisis_Ascendente.Instrucciones.Time import Time\nfrom Analisis_Ascendente.Instrucciones.Create.createTable import CreateTable, Acompaniamiento,Campo\nfrom Analisis_Ascendente.Instrucciones.Create.createDatabase import CreateReplace, ComplementoCR\nfrom Analisis_Ascendente.Instrucciones.Select.select import Select, Limit, Having, GroupBy\nfrom Analisis_Ascendente.Instrucciones.Select.union import Union\nfrom Analisis_Ascendente.Instrucciones.Use_Data_Base.useDB import Use\nfrom Analisis_Ascendente.Instrucciones.Select.select1 import selectTime\nfrom Analisis_Ascendente.Instrucciones.Insert.insert import InsertInto\nfrom Analisis_Ascendente.Instrucciones.Select.Select2 import Selectp3\nfrom Analisis_Ascendente.Instrucciones.Expresiones.IdAsId import IdAsId\nfrom Analisis_Ascendente.Instrucciones.Expresiones.Trigonometrica import Trigonometrica\nfrom Analisis_Ascendente.Instrucciones.Select import selectInst\nfrom Analisis_Ascendente.Instrucciones.Expresiones.Math import Math_\nfrom Analisis_Ascendente.Instrucciones.Expresiones.Expresion import Expresion\nfrom Analisis_Ascendente.Instrucciones.Expresiones.Binario import Binario\nfrom Analisis_Ascendente.Instrucciones.Drop.drop import Drop\nfrom Analisis_Ascendente.Instrucciones.Alter.alterDatabase import AlterDatabase\nfrom Analisis_Ascendente.Instrucciones.Alter.alterTable import AlterTable\nfrom Analisis_Ascendente.Instrucciones.Alter.alterTable import Alter\nfrom Analisis_Ascendente.Instrucciones.Update.Update import Update\nfrom Analisis_Ascendente.Instrucciones.Delete.delete import Delete\nfrom Analisis_Ascendente.Instrucciones.Expresiones.Where import Where\nfrom Analisis_Ascendente.Instrucciones.Type.type import CreateType\nfrom Analisis_Ascendente.Instrucciones.Select import SelectDist\nfrom Analisis_Ascendente.Instrucciones.Type.type import CreateType\n\n#----------------------------------Imports FASE2--------------------------\nfrom Analisis_Ascendente.Instrucciones.PLPGSQL.Declaracion import Declaracion\nfrom Analisis_Ascendente.Instrucciones.PLPGSQL.Alias import Alias\nfrom Analisis_Ascendente.Instrucciones.PLPGSQL.plinsert import plinsert\nfrom Analisis_Ascendente.Instrucciones.Index.Index import Index\nfrom Analisis_Ascendente.Instrucciones.PLPGSQL.createFunction import CreateFunction\nfrom Analisis_Ascendente.Instrucciones.PLPGSQL.createFunction import Parametro\nfrom Analisis_Ascendente.Instrucciones.PLPGSQL.Return import Return\nfrom Analisis_Ascendente.Instrucciones.Index.DropIndex import DropIndex\nfrom Analisis_Ascendente.Instrucciones.Index.AlterIndex import AlterIndex\nfrom Analisis_Ascendente.Instrucciones.PLPGSQL.DropProcedure import DropProcedure\nfrom Analisis_Ascendente.Instrucciones.PLPGSQL.CreateProcedure import CreateProcedure\nfrom Analisis_Ascendente.Instrucciones.PLPGSQL.Ifpl import Ifpl\nfrom Analisis_Ascendente.Instrucciones.PLPGSQL.CasePL import CasePL\nfrom Analisis_Ascendente.Instrucciones.PLPGSQL.plCall import plCall\nfrom Analisis_Ascendente.Instrucciones.PLPGSQL.dropFunction import DropFunction\nfrom Analisis_Ascendente.Instrucciones.PLPGSQL.plasignacion import Plasignacion\nfrom Analisis_Ascendente.Instrucciones.PLPGSQL.SelectCount import SelectCount\n\nprecedence = (\n ('left', 'OR'),\n ('left', 'AND', 'BETWEEN', 'NOT', 'LIKE', 'ILIKE', 'IN', 'ON'),\n ('left', 'ORO'),\n ('left', 'ANDO'),\n ('left', 'NOIGUAL', 'MENMAY', 'IGUALIGUAL'),\n ('left', 'MAYOR', 'MENOR', 'MAYORIGUAL', 'MENORIGUAL'),\n ('left', 'MAYMAY', 'MENMEN'),\n ('left', 'MAS', 'MENOS'),\n ('left', 'MULT', 'DIVI', 'MODU'),\n ('left', 'EXPO'),\n ('left', 'PARIZQ', 'PARDR'),\n ('right', 'UMENOS', 'NEG', 'NB', 'UMAS')\n)\n\n\n# ('left', 'NOTO', 'GNOT'),\n\n\n# varSemantico.append('SEMANTICO')\ndef p_s(t):\n 's : instrucciones'\n t[0] = t[1]\n varGramatical.append('s ::= intrucciones')\n varSemantico.append('. ')\n\n\ndef p_instrucciones(t):\n '''instrucciones : instrucciones instruccion'''\n t[1].append(t[2])\n t[0] = t[1]\n varGramatical.append('s ::= intrucciones')\n varSemantico.append('instrucciones=instruecciones;instrucciones.append(instruccion)')\n\n\ndef p_instruccion(t):\n 'instrucciones : instruccion'\n t[0] = [t[1]]\n varGramatical.append('instrucciones ::= instrucciones instruccion')\n varSemantico.append('intrucciones=[instruccion] ')\n\n\ndef p_useDatabase(t):\n 'instruccion : USE ID PTCOMA'\n t[0] = Use(t[2])\n varGramatical.append('instruccion ::= USE ID PTCOMA')\n varSemantico.append('instruccion = Use(ID) ')\n\n\n# CREATE\ndef p_create(t):\n 'instruccion : CREATE TABLE ID PARIZQ campos PARDR PTCOMA'\n global columna\n t[0] = CreateTable(t[3], t[5], None, lexer.lineno, columna)\n varGramatical.append('instruccion :: = CREATE TABLE ID PARIZQ campos PARDR PTCOMA')\n varSemantico.append('instruccion :: = CreateTable(ID,campos,None) ')\n\n\ndef p_create2(t):\n 'instruccion : CREATE TABLE ID PARIZQ campos PARDR INHERITS PARIZQ ID PARDR PTCOMA'\n global columna\n t[0] = CreateTable(t[3], t[5], t[9], lexer.lineno, columna)\n varGramatical.append('instruccion :: = CREATE TABLE ID PARIZQ campos PARDR INHERITS PARIZQ ID PARDR PTCOMA')\n varSemantico.append('instruccion = CreateTable(ID, campos,ID)')\n\n\ndef p_campos(t):\n '''campos : campos COMA campo'''\n t[1].append(t[3])\n t[0] = t[1]\n varGramatical.append('campos :: = campos COMA campo')\n varSemantico.append('campos = campos; campos.append(campo)')\n\n\ndef p_campos2(t):\n 'campos : campo'\n t[0] = [t[1]]\n varGramatical.append('campos :: = campo')\n varSemantico.append(' campos = campo')\n\n\ndef p_campo(t):\n '''campo : ID tipo acompaniamiento'''\n global columna\n t[0] = Campo(1, t[1], t[2], t[3], None, None, None, lexer.lineno, columna)\n varGramatical.append('campo :: = ID tipo acompaniamiento')\n varSemantico.append('campo = Campo(1,ID,tipo,acompaniamiento,None,None,None) ')\n\n\ndef p_campoSimple(t):\n 'campo : ID tipo'\n global columna\n t[0] = Campo(1, t[1], t[2], None, None, None, None, lexer.lineno, columna)\n varGramatical.append('campo :: = ID tipo')\n varSemantico.append('campo = Campo(1,ID,Tipo,None,None,None,None)')\n\n\ndef p_foreign(t):\n 'campo : CONSTRAINT ID FOREIGN KEY PARIZQ listaID PARDR REFERENCES ID PARIZQ listaID PARDR'\n global columna\n t[0] = Campo(2, t[2], None, None, t[6], t[9], t[11], lexer.lineno, columna)\n varGramatical.append('campo :: = CONSTRAINT ID FOREIGN KEY PARIZQ listaID PARDR REFERENCES ID PARIZQ listaID PARDR')\n varSemantico.append('campo = Campo(2,CONSTRAINT,None,None,listaID,ID,listaID)')\n\n\ndef p_foreign2(t):\n 'campo : FOREIGN KEY PARIZQ listaID PARDR REFERENCES ID PARIZQ listaID PARDR'\n global columna\n t[0] = Campo(3, None, None, None, t[4], t[7], t[9], lexer.lineno, columna)\n varGramatical.append('campo :: = FOREIGN KEY PARIZQ listaID PARDR REFERENCES ID PARIZQ listaID PARDR')\n varSemantico.append(' campo = Campo(3,None,None,None,listaID,ID,listaID)')\n\n\ndef p_campoTypenotocar(t):\n 'campo : ID ID'\n global columna\n t[0] = Campo(5, t[1], t[2], None, None, None, None, lexer.lineno, columna)\n varGramatical.append('campo :: = ID tipo')\n varSemantico.append(' campo = Campo(5, ID, ID, None, None, None, None,lexer.lineno,columna)')\n\n\ndef p_campoCadenas(t):\n 'campo : CADENA'\n global columna\n t[0] = Primitivo(t[1], lexer.lineno, columna) #\n varGramatical.append('campo :: = CADENA')\n varSemantico.append(' campo = Primitivo(CADENA)')\n\n\ndef p_primary(t):\n 'campo : PRIMARY KEY PARIZQ listaID PARDR'\n global columna\n t[0] = Campo(4, t[4], None, None, None, None, None, lexer.lineno, columna)\n varGramatical.append('campo :: = PRIMARY KEY PARIZQ listaID PARDR')\n varSemantico.append('campo = Campo(4, listaID, None, None, None, None, None) ')\n\n\ndef p_listacampo(t):\n '''acompaniamiento : acompaniamiento acom'''\n t[1].append(t[2])\n t[0] = t[1]\n # print(t[0])\n varGramatical.append('acompaniamiento :: = acompaniamiento acom')\n varSemantico.append(' acompaniamiento = acompaniamiento; acompaniamiento.append(acom) ')\n\n\ndef p_listacampo2(t):\n 'acompaniamiento : acom'\n t[0] = [t[1]]\n varGramatical.append('acompaniamiento :: = acom')\n varSemantico.append('acompaniamiento.append([acom]) ')\n\n\ndef p_acompaniamiento(t):\n '''acom : NOT NULL\n | NULL\n | UNIQUE PARIZQ listaID PARDR\n | DEFAULT valores\n | PRIMARY KEY\n | CONSTRAINT ID\n | REFERENCES ID\n | CHECK PARIZQ checkprima PARDR\n '''\n\n if t[1].lower() == 'not':\n\n t[0] = Acompaniamiento('NOTNULL', None, lexer.lineno, columna)\n varGramatical.append('acom :: = NOT NULL')\n varSemantico.append('acom = Acompaniamiento(NOTNULL, None) ')\n elif t[1].lower() == 'null':\n\n t[0] = Acompaniamiento('NULL', None, lexer.lineno, columna)\n varGramatical.append('acom :: = NULL')\n varSemantico.append('acom = Acompaniamiento(NULL, None)')\n elif t[1].lower() == 'unique':\n\n t[0] = Acompaniamiento('UNIQUE', t[3], lexer.lineno, columna)\n varGramatical.append('acom :: = UNIQUE PARIZQ listaID PARDR')\n varSemantico.append('acom = Acompaniamiento(UNIQUE, t[3])')\n elif t[1].lower() == 'default':\n\n t[0] = Acompaniamiento('DEFAULT', t[2], lexer.lineno, columna)\n varGramatical.append('acom :: = DEFAULT valores')\n varSemantico.append('acom = Acompaniamiento(DEFAULT, t[2])')\n elif t[1].lower() == 'primary':\n\n t[0] = Acompaniamiento('PRIMARYKEY', None, lexer.lineno, columna)\n varGramatical.append('acom :: = PRIMARY KEY')\n varSemantico.append('acom = Acompaniamiento(PRIMARYKEY, None)')\n elif t[1].lower() == 'constraint':\n t[0] = Acompaniamiento('CONSTRAINT', t[2], lexer.lineno, columna)\n varGramatical.append('acom :: = CONSTRAINT ID')\n varSemantico.append('acom = Acompaniamiento(CONSTRAINT,t[2]) ')\n elif t[1].lower() == 'references':\n t[0] = Acompaniamiento('REFERENCES', t[2], lexer.lineno, columna)\n varGramatical.append('acom :: = REFERENCES ID')\n varSemantico.append('acom = Acompaniamiento(REFERENCES,t[2]) ')\n elif t[1].lower() == 'check':\n t[0] = Acompaniamiento('CHECK', t[3], lexer.lineno, columna)\n varGramatical.append('acom :: = CHECK PARIZQ checkprima PARDR')\n varSemantico.append('acom = Acompaniamiento(CHECK, t[3]) ')\n\n\ndef p_acompaniamiento2(t):\n 'acom : UNIQUE'\n global columna\n t[0] = Acompaniamiento('UNIQUE', None, lexer.lineno, columna)\n varGramatical.append('acom :: = UNIQUE')\n varSemantico.append('acom = Acompaniamiento(UNIQUE, None)')\n\n\ndef p_acompaniamiento3(t):\n 'acom : UNIQUE ID'\n global columna\n t[0] = Acompaniamiento('UNIQUE', Id(t[2], lexer.lineno, columna), lexer.lineno, columna)\n varGramatical.append('acom :: = UNIQUE ID')\n varSemantico.append('acom = Acompaniamiento(UNIQUE, Id(t[2]))')\n\n\ndef p_tipos(t):\n '''tipo : SMALLINT\n | INTEGER\n | BIGINT\n | NUMERIC\n | REAL\n | DOUBLE\n | MONEY\n | TEXT\n | TIMESTAMP\n | DATE\n | TIME\n | INTERVAL\n | BOOLEAN\n | SERIAL'''\n global columna\n t[0] = Tipo(t[1].upper(), None, lexer.lineno, columna)\n varGramatical.append('tipo :: =' + str(t[1]))\n varSemantico.append('tipo = Tipo(' + str(t[1]) + '.upper(), None)')\n\n\n# agregar esto en sus conflictos\ndef p_tipos_1(t):\n ''' tipo : DECIMAL PARIZQ ENTERO COMA ENTERO PARDR '''\n global columna\n t[0] = Tipo(t[1].upper() + \"-\" + str(t[3]) + \"-\" + str(t[5]), None, lexer.lineno, columna)\n varGramatical.append('tipo :: = DECIMAL PARIZQ ENTERO COMA ENTERO PARDR')\n varSemantico.append('tipo = Tipo(DECIMAL-ENTERO-ENTERO,None)')\n\n\n# hasta aqui lo nuevo\ndef p_tiposTexto(t):\n '''tipo : CHARACTER PARIZQ ENTERO PARDR\n | VARCHAR PARIZQ ENTERO PARDR\n | CHAR PARIZQ ENTERO PARDR\n | CHARACTER VARYING PARIZQ ENTERO PARDR'''\n global columna\n if t[2] == '(':\n t[0] = Tipo(str(t[1].upper() + \"-\" + str(t[3])), Primitivo(t[3], lexer.lineno, columna), lexer.lineno, columna)\n else:\n t[0] = Tipo(str(t[1].upper() + t[2].upper() + \"-\" + str(t[4])), Primitivo(t[4], lexer.lineno, columna),\n lexer.lineno, columna)\n\n if t[3] == '(':\n varGramatical.append('tipo :: = ' + str(t[1]) + str(t[2]) + str(t[3]) + str(t[4]) + str(t[5]))\n varSemantico.append('tipo = Tipo(' + str(t[1]) + '- ENTERO' + ',' + 'PRIMITIVO(' + str(t[3]) + ')')\n else:\n varGramatical.append('tipo :: = ' + str(t[1]) + str(t[2]) + str(t[3]) + str(t[4]))\n varSemantico.append('tipo = Tipo(' + str(t[1]) + '- ENTERO' + ',' + 'PRIMITIVO(' + str(t[3]) + ')')\n\n\n# INSERT INTO\ndef p_insertInto(t):\n 'instruccion : INSERT INTO ID PARIZQ listaID PARDR VALUES value PTCOMA'\n global columna\n t[0] = InsertInto(1, t[3], t[5], t[8], lexer.lineno, columna)\n varGramatical.append('instruccion :: = INSERT INTO ID PARIZQ listaID PARDR VALUES value PTCOMA')\n varSemantico.append('instruccion = InsertInto(1,ID, listaID, value)')\n\n\ndef p_insertInto2(t):\n 'instruccion : INSERT INTO ID VALUES value PTCOMA'\n global columna\n t[0] = InsertInto(2, t[3], None, t[5], lexer.lineno, columna)\n varGramatical.append('instruccion :: = INSERT INTO ID VALUES value PTCOMA')\n varSemantico.append('instruccion = InsertInto(2,ID, None, value)')\n\n\n# lista de id\ndef p_listaID(t):\n 'listaID : listaID COMA var'\n t[1].append(t[3])\n t[0] = t[1]\n varGramatical.append('listaID :: = listaID COMA var')\n varSemantico.append('listaID = listaID; listaID.append(var)')\n\n\ndef p_listaID2(t):\n 'listaID : var'\n t[0] = [t[1]]\n varGramatical.append('listaID :: = var')\n varSemantico.append('listaID = var')\n\ndef p_listaID222(t):\n 'listaID : LOWER PARIZQ ID PARDR'\n t[0] = [t[3]]\n varGramatical.append('listaID :: = var')\n varSemantico.append('listaID = var')\n\n# quitar values\ndef p_values(t):\n 'values : values COMA value'\n t[1].append(t[3])\n t[0] = t[1]\n varGramatical.append('values :: = values COMA value')\n varSemantico.append('values = values; values.append(value)')\n\n\ndef p_values2(t):\n 'values : value'\n t[0] = [t[1]]\n varGramatical.append('values :: = value')\n varSemantico.append('values.append(value) ')\n\n\n# cambio\ndef p_value(t):\n 'value : PARIZQ listaExpresiones PARDR'\n t[0] = t[2]\n varGramatical.append('value :: = PARIZQ listaExpresiones PARDR')\n varSemantico.append('value = listaExpresiones ')\n\n\n# lista de expresiones\ndef p_listaExpresiones(t):\n 'listaExpresiones : listaExpresiones COMA E'\n t[1].append(t[3])\n t[0] = t[1]\n varGramatical.append('listaExpresiones :: = listaExpresiones COMA E')\n varSemantico.append('listaExpresiones = listaExpresiones; listaExpresiones.append(E)')\n\n\ndef p_listaExpresiones2(t):\n 'listaExpresiones : E'\n t[0] = [t[1]]\n varGramatical.append('listaExpresiones :: = E')\n varSemantico.append('listaExpresiones.append(E)')\n\n\n# lista de valores\ndef p_listaValores(t):\n 'listaValores : listaValores COMA valores'\n t[1].append(t[3])\n t[0] = t[1]\n varGramatical.append('listaValores :: = listaValores COMA valores')\n varSemantico.append(' listaValores = listaValores; listaValores.append(valores)')\n\n\ndef p_listaValores2(t):\n 'listaValores : valores'\n t[0] = [t[1]]\n varGramatical.append('listaValores :: = valores')\n varSemantico.append('listaValores = valores ')\n\n\n# VALORES\ndef p_valores(t):\n '''valores : ENTERO '''\n global columna\n t[0] = Primitivo(t[1], lexer.lineno, columna)\n varGramatical.append('valores ::= ENTERO')\n varSemantico.append('valores = Primitivo(ENTERO) ')\n\n\ndef p_valoresDec(t):\n '''valores : NUMDECIMAL '''\n global columna\n t[0] = Primitivo(t[1], lexer.lineno, columna)\n varGramatical.append('valores ::= NUMDECIMAL')\n varSemantico.append('valores = Primitivo(NUMDECIMAL) ')\n\n\ndef p_valoresCad(t):\n '''valores : CADENA '''\n global columna\n t[0] = Primitivo(t[1], lexer.lineno, columna)\n varGramatical.append('valores ::= CADENA')\n varSemantico.append('valores = Primitivo(CADENA) ')\n\n\n# este es un conjunto de valores o llamada a metodos\n# ejemplo (1,2,3,4,5,6) now() sqrt()\n# def p_valoresCad1(t):\n# '''valores : columna '''\n# t[0] = t[1]\n# ??\n\n\ndef p_valoresCad2(t):\n '''valores : Time'''\n # t[0] = Time(2, None, None, None)\n t[0] = t[1]\n varGramatical.append('valores ::= Time')\n varSemantico.append('valores = Time ')\n\n\n# def p_valores2(t):\n# '''valores2 : valores\n# | var'''\n# t[0] = Primitivo(t[1])\n\n\n# UPDATE\ndef p_update(t):\n 'instruccion : UPDATE ID SET asignaciones PTCOMA'\n global columna\n t[0] = Update(t[2], t[4], None, lexer.lineno, columna)\n varGramatical.append('instruccion ::= UPDATE ID SET asignaciones PTCOMA')\n varSemantico.append('instruccion = Update(ID, asignaciones, None) ')\n\n\ndef p_update2(t):\n 'instruccion : UPDATE ID SET asignaciones WHERE andOr PTCOMA'\n global columna\n t[0] = Update(t[2], t[4], t[6], lexer.lineno, columna)\n varGramatical.append('instruccion ::= UPDATE ID SET asignaciones WHERE andOr PTCOMA')\n varSemantico.append('instruccion = Update(ID,asignaciones,andOr) ')\n\n\ndef p_asignaciones(t):\n 'asignaciones : asignaciones COMA asignacion'\n t[1].append(t[3])\n t[0] = t[1]\n varGramatical.append('asignaciones ::= asignaciones COMA asignacion')\n varSemantico.append('asignaciones = asignaciones; asignaciones.append(asignacion) ')\n\n\ndef p_asignaciones2(t):\n 'asignaciones : asignacion'\n t[0] = [t[1]]\n varGramatical.append('asignaciones ::= asignacion')\n varSemantico.append('asignaciones = asignacion ')\n\n\ndef p_where(t):\n '''where : asignacion\n '''\n t[0] = t[1] # sube una clase Expresion\n varGramatical.append('where ::= asignacion')\n varSemantico.append('where = asignacion ')\n\n\ndef p_where7(t):\n '''where : boolean\n '''\n t[0] = t[1] # sube una clase Expresion o Primitivo si fuera False | True\n varGramatical.append('where ::= boolean')\n varSemantico.append('where = boolean ')\n\n\ndef p_whereN(t):\n '''where : columna IN PARIZQ listaValores PARDR\n | columna IN PARIZQ select2 PARDR\n | columna BETWEEN valores AND valores '''\n global columna\n if t[2].upper() == 'IN':\n t[0] = Where(2, None, t[1], t[4], None, None, None, lexer.lineno, columna)\n varGramatical.append('where ::= columna IN PARIZQ select2 PARDR')\n varSemantico.append('where = Where(2, None, columna, select2, None, None, None)')\n elif t[2].upper() == 'BETWEEN':\n t[0] = Where(3, None, t[1], None, t[3], t[5], None, lexer.lineno, columna)\n varGramatical.append('where ::= columna BETWEEN valores AND valores')\n varSemantico.append('where = Where(3, None, columna, None, valores, valores, None) ')\n\n\ndef p_whereN1(t):\n 'where : NOT boolean'\n global columna\n t[0] = Where(1, t[2], None, None, None, None, None, lexer.lineno, columna)\n varGramatical.append('where ::= NOT boolean')\n varSemantico.append('where = Where(1, boolean, None, None, None, None, None)')\n\n\ndef p_whereN_1(t):\n '''where : columna ILIKE valores\n | columna LIKE valores\n | '''\n global columna\n if t[2].upper() == 'ILIKE':\n t[0] = Where(4, None, t[1], None, t[3], None, None, lexer.lineno, columna)\n varGramatical.append('where ::= columna LIKE valores')\n varSemantico.append('where = Where(4, None, columna, None, valores, None, None)')\n else:\n t[0] = Where(5, None, t[1], None, t[3], None, None, lexer.lineno, columna)\n varGramatical.append('where ::= columna LIKE valores')\n varSemantico.append('where = Where(4, None, columna, None, valores, None, None)')\n\n\ndef p_where1(t):\n '''where : valores comparisonP2\n | prim comparisonP2\n | boolean comparisonP2\n ''' # guardados en valor1\n global columna\n t[0] = Where(6, None, None, None, t[1], None, t[2], lexer.lineno, columna)\n varGramatical.append('where ::= valores comparisonP2')\n varSemantico.append('where = Where(6, None, None, None, valores, None, comparisonP')\n # boolean puede ser expresion o primitivo\n\n\ndef p_where2(t):\n '''where : var IS NOT DISTINCT FROM valores '''\n global columna\n t[0] = Where(7, None, t[1], None, t[6], None, None, lexer.lineno, columna)\n varGramatical.append('where ::= var IS NOT DISTINCT FROM valores')\n varSemantico.append('where = Where(7, None, var, None, valores, None, None)')\n\n\n# corregir aqui freddy\n\ndef p_where3(t):\n '''where : var IS DISTINCT FROM valores\n '''\n global columna\n t[0] = Where(8, None, t[1], None, t[5], None, None, lexer.lineno, columna)\n varGramatical.append('where ::= var IS DISTINCT FROM valores')\n varSemantico.append('where = Where(8, None, var, None, valores, None, None) ')\n\n\ndef p_where4(t):\n '''where : columna NOT IN PARIZQ select2 PARDR\n | columna NOT IN PARIZQ listaValores PARDR\n '''\n global columna\n t[0] = Where(9, None, t[1], t[5], None, None, None, lexer.lineno, columna)\n varGramatical.append('where ::= columna NOT IN PARIZQ select2 PARDR')\n varSemantico.append('where = Where(9, None, columna, select2, None, None, None) ')\n\n\ndef p_whereNE(t):\n '''where : columna NOT EXISTS PARIZQ select2 PARDR\n | columna NOT EXISTS PARIZQ listaValores PARDR'''\n global columna\n t[0] = Where(10, None, t[1], t[5], None, None, None, lexer.lineno, columna)\n varGramatical.append('where ::= NOT EXISTS PARIZQ listaValores PARDR')\n varSemantico.append('where = Where(10, None, None, listaValores, None, None, None) ')\n\n\ndef p_whereE(t):\n '''where : columna EXISTS PARIZQ select2 PARDR\n | columna EXISTS PARIZQ listaValores PARDR'''\n global columna\n t[0] = Where(11, None, t[1], t[4], None, None, None, lexer.lineno, columna)\n varGramatical.append('where ::= EXISTS PARIZQ listaValores PARDR')\n varSemantico.append('where = Where(11, None, None, listaValores, None, None, None)')\n\n\ndef p_ComparisonP(t):\n ''' comparisonP2 : IS TRUE\n | IS FALSE\n | IS UNKNOWN\n '''\n if t[2].upper() == 'TRUE':\n t[0] = 1\n varGramatical.append('comparisonP2 ::= IS TRUE')\n varSemantico.append('comparisonP2 = 1 ')\n elif t[2].upper() == 'FALSE':\n t[0] = 2\n varGramatical.append('comparisonP2 ::= IS FALSE')\n varSemantico.append('comparisonP2 = 2 ')\n elif t[2].upper() == 'UNKNOWN':\n t[0] = 3\n varGramatical.append('comparisonP2 ::= IS UNKNOWN')\n varSemantico.append('comparisonP2 = 3')\n\n\ndef p_ComparisonP1(t):\n ''' comparisonP2 : IS NOT TRUE\n | IS NOT FALSE\n | IS NOT UNKNOWN\n '''\n if t[3].upper() == 'TRUE':\n t[0] = 4\n varGramatical.append('comparisonP2 ::= IS NOT TRUE')\n varSemantico.append('comparisonP2 = 4')\n elif t[3].upper() == 'FALSE':\n t[0] = 5\n varGramatical.append('comparisonP2 ::= IS NOT FALSE')\n varSemantico.append('comparisonP2 = 5')\n elif t[3].upper() == 'UNKNOWN':\n t[0] = 6\n varGramatical.append('comparisonP2 ::= IS NOT UNKNOWN')\n varSemantico.append('comparisonP2 = 6 ')\n\n\ndef p_ComparisonP2(t):\n ''' comparisonP2 : IS NULL\n '''\n t[0] = 7\n varGramatical.append('comparisonP2 ::= IS NULL')\n varSemantico.append('comparisonP2 = 7')\n\n\ndef p_ComparisonP3(t):\n ''' comparisonP2 : IS NOT NULL\n '''\n t[0] = 8\n varGramatical.append('comparisonP2 ::= IS NOT NULL')\n varSemantico.append('comparisonP2 = 8')\n\n\ndef p_ComparisonP4(t):\n ''' comparisonP2 : NOTNULL\n | ISNULL\n '''\n if t[1].upper() == 'NOTNULL':\n t[0] = 9\n varGramatical.append('comparisonP2 ::= ' + str(t[1]))\n varSemantico.append('comparisonP2 = 9')\n else:\n t[0] = 10\n varGramatical.append('comparisonP2 ::= ' + str(t[1]))\n varSemantico.append('comparisonP2 = 10')\n\n\ndef p_andOr(t):\n '''andOr : andOr AND andOr\n | andOr OR andOr\n '''\n global columna\n t[0] = Expresion(t[1], t[3], t[2], lexer.lineno, columna)\n varGramatical.append('andOr ::= andOr ' + str(t[2]) + ' andOr')\n varSemantico.append('andOr = Expresion(andOr, andOr, AND) ')\n\n\ndef p_andOr2(t):\n 'andOr : where'\n t[0] = t[1]\n varGramatical.append('andOr ::= where ')\n varSemantico.append('andOr = where')\n\n\n# LA ASGINACION SE DEJA DE ESTA FORMA PUESTO QUE LA EXPRESION\n# ABSORVE ESTO\n# cambio de produccion asignacion a E\ndef p_asignacion(t):\n '''asignacion : E IGUAL E\n '''\n global columna\n t[0] = Expresion(t[1], t[3], t[2], lexer.lineno, columna)\n #print('=')\n varGramatical.append('asignacion ::= E ' + str(t[2]) + ' E')\n varSemantico.append('asignacion = Expresion(E, E, IGUAL) ')\n\n\n# PARA LLAMAR UNA FUNCION DENTRO DE UNA EXPRESION ******************\ndef p_callfunction(t):\n '''E : ID PARIZQ listaExpresiones PARDR'''\n global columna\n t[0] = Funcion(t[1],t[3],lexer.lineno,columna)\n varGramatical.append('E ::= ID PARIZQ listaExpresiones PARDR')\n varSemantico.append('E = Funcion(ID,listaExpresiones)')\n\ndef p_callfunction1(t):\n '''E : ID PARIZQ PARDR'''\n global columna\n t[0] = Funcion(t[1],None,lexer.lineno,columna)\n varGramatical.append('E ::= ID PARIZQ listaExpresiones PARDR')\n varSemantico.append('E = Funcion(ID,listaExpresiones)')\n\n#*******************************************************************\n#Para el select Count***********************************************\ndef p_selectCount(t):\n '''E : SELECT COUNT PARIZQ MULT PARDR FROM ID'''\n global columna\n t[0] = SelectCount(t[7])\n varGramatical.append('E ::= SELECT COUNT PARIZQ MULT PARDR FROM ID')\n varSemantico.append('E = SelectCount(ID)')\n\n#*******************************************************************\n#Para aceptar Select en expresion paraun asignacion*****************\ndef p_selectInstruccion(t):\n '''E : instruccion'''\n t[0] = t[1]\n varGramatical.append('E ::= instruccion')\n varSemantico.append('E = instruccion')\n\ndef p_E(t):\n '''E : operando\n\t | boolean\n | unario\n | valores\n | var\n | pnum\n | math\n | asignacion\n | trig\n | bina'''\n t[0] = t[1]\n varGramatical.append('E ::= valores ')\n varSemantico.append('E = valores ')\n\ndef p_E1(t):\n '''E : PARIZQ E PARDR '''\n t[0] = t[2]\n varGramatical.append('E ::= PARIZQ E PARDR')\n varSemantico.append('E = E')\n\n\n# print(\"expresion\")\n# if t[1] == '(' : t[0] = t[2]\n# else : t[0] = t[1]\n\ndef p_E2(t):\n '''boolean : FALSE\n | TRUE'''\n global columna\n t[0] = Primitivo(t[1].upper(), lexer.lineno, columna)\n varGramatical.append('boolean ::= ' + str(t[1]))\n varSemantico.append('boolean = Primitivo(' + str(t[1]) + ')')\n\n\ndef p_oper(t):\n '''operando : E MAS E\n\t | E MENOS E\n\t | E MULT E\n \t | E DIVI E\n | E MODU E\n | E EXPO E\n\t | E MENMEN E\n\t | E MAYMAY E\n\t | E AND E\n\t | E OR E\n\t '''\n global columna\n t[0] = Expresion(t[1], t[3], t[2], lexer.lineno, columna)\n if t[2] == '+':\n varGramatical.append('operando ::= E MAS E')\n varSemantico.append('operando = Expresion(E, E, MAS) ')\n elif t[2] == '-':\n varGramatical.append('operando ::= E MENOS E')\n varSemantico.append('operando = Expresion(E, E, MENOS) ')\n elif t[2] == '*':\n varGramatical.append('operando ::= E MULT E')\n varSemantico.append('operando = Expresion(E, E, MULT) ')\n elif t[2] == '/':\n varGramatical.append('operando ::= E DIVI E')\n varSemantico.append('operando = Expresion(E, E, DIV) ')\n elif t[2] == '%':\n varGramatical.append('operando ::= E MODU E')\n varSemantico.append('operando = Expresion(E, E, MODU) ')\n elif t[2] == '^':\n varGramatical.append('operando ::= E EXPO E')\n varSemantico.append('operando = Expresion(E, E, EXPO) ')\n elif t[2] == '<<':\n varGramatical.append('operando ::= E MENMEN E')\n varSemantico.append('operando = Expresion(E, E, MENMEN) ')\n elif t[2] == '>>':\n varGramatical.append('operando ::= E MAYMAY E')\n varSemantico.append('operando = Expresion(E, E, MAYMAY) ')\n elif t[2] == 'and':\n varGramatical.append('operando ::= E AND E')\n varSemantico.append('operando = Expresion(E, E, AND) ')\n elif t[2] == 'or':\n varGramatical.append('operando ::= E OR E')\n varSemantico.append('operando = Expresion(E, E, OR ) ')\n\n\ndef p_booleanos(t):\n '''boolean : E IGUALIGUAL E\n\t | E NOIGUAL E\n | E MENMAY E\n\t | E MENOR E\n\t | E MAYOR E\n\t | E MENORIGUAL E\n\t | E MAYORIGUAL E'''\n global columna\n t[0] = Expresion(t[1], t[3], t[2], lexer.lineno, columna)\n if t[2] == '==':\n varGramatical.append('boolean ::= E IGUALIGUAL E')\n varSemantico.append('boolean = Expresion(E, E, IGUALIGUAL)')\n elif t[2] == '!=':\n varGramatical.append('boolean ::= E NOIGUAL E')\n varSemantico.append('boolean = Expresion(E, E, NOIGUAL)')\n elif t[2] == '<>':\n varGramatical.append('boolean ::= E MENMAY E')\n varSemantico.append('boolean = Expresion(E, E, MENMAY)')\n elif t[2] == '<':\n varGramatical.append('boolean ::= E MENOR E')\n varSemantico.append('boolean = Expresion(E, E, MENOR)')\n elif t[2] == '>':\n varGramatical.append('boolean ::= E MAYOR E')\n varSemantico.append('boolean = Expresion(E, E, MAYOR)')\n elif t[2] == '<=':\n varGramatical.append('boolean ::= E MENORIGUAL E')\n varSemantico.append('boolean = Expresion(E, E, MENORIGUAL)')\n elif t[2] == '>=':\n varGramatical.append('boolean ::= E MAYORIGUAL E')\n varSemantico.append('boolean = Expresion(E, E, MAYORIGUAL)')\n\n\ndef p_unarios(t):\n '''unario : NOTO E %prec NEG\n\t | MENOS E %prec UMENOS\n\t | GNOT E %prec NB\n | MAS E %prec UMAS'''\n global columna\n t[0] = Unario(t[1], t[2], lexer.lineno, columna)\n #(t[1])\n varGramatical.append('unario ::= NOTO E %prec NEG')\n varSemantico.append('unario = Unario(NOTO,E)')\n\n\ndef p_var(t):\n 'var : ID'\n global columna\n t[0] = Id(t[1], lexer.lineno, columna)\n varGramatical.append('var ::= ID')\n varSemantico.append('var = Id(ID)')\n\n\ndef p_alias(t):\n 'var : ID PUNTO ID'\n #print(t[1] + t[2] + t[3])\n global columna\n\n t[0] = IdId(Id(t[1], lexer.lineno, columna), Id(t[3], lexer.lineno, columna), lexer.lineno, columna)\n varGramatical.append('var ::= ID PUNTO ID')\n varSemantico.append('var = IdId(Id(ID, ID)')\n\n\ndef p_alias1notocar(t):\n 'var : ID PUNTO MULT'\n ##print(t[1] + t[2] + t[3])\n global columna\n t[0] = IdId(Id(t[1], lexer.lineno, columna), Id(t[3], lexer.lineno, columna), lexer.lineno, columna)\n varGramatical.append('var ::= ID PUNTO MULT')\n varSemantico.append('var = IdId(Id(ID, MULT)')\n\n\ndef p_pnum2(t):\n '''pnum : PUNTO E'''\n #print('punto')\n varGramatical.append('pnum ::= PUNTO E')\n varSemantico.append('pnum = E ')\n # t[0] = Id(t[1])\n\n\n# DELETE\ndef p_delete(t):\n 'instruccion : DELETE FROM ID WHERE andOr PTCOMA'\n global columna\n t[0] = Delete(1, t[3], t[5], lexer.lineno, columna)\n varGramatical.append('instruccion ::= DELETE FROM ID WHERE andOr PTCOMA')\n varSemantico.append('instruccion = Delete(ID, andOR) ')\n\n\ndef p_delete2(t):\n 'instruccion : DELETE FROM ID PTCOMA'\n global columna\n t[0] = Delete(2, t[3], None, lexer.lineno, columna)\n varGramatical.append('instruccion ::= DELETE FROM ID PTCOMA')\n varSemantico.append('instruccion = Delete(ID, None)')\n\n\n# DROP\ndef p_drop(t):\n '''instruccion : DROP DATABASE ID PTCOMA\n | DROP DATABASE IF EXISTS ID PTCOMA\n | DROP TABLE ID PTCOMA'''\n global columna\n if t[2].upper() == 'TABLE':\n t[0] = Drop(2, False, t[3], lexer.lineno, columna)\n varGramatical.append('instruccion ::= DROP TABLE ID PTCOMA')\n varSemantico.append('instruccion = Drop(2, False, ID) ')\n elif t[3].upper() == 'IF':\n t[0] = Drop(1, True, t[5], lexer.lineno, columna)\n varGramatical.append('instruccion ::= DROP DATABASE IF EXISTS ID PTCOMA')\n varSemantico.append('instrucciones = Drop(1, True, ID) ')\n else:\n t[0] = Drop(1, False, t[3], lexer.lineno, columna)\n varGramatical.append('instruccion ::= DROP DATABASE ID PTCOMA')\n varSemantico.append('instrucciones = Drop(1, False, ID)')\n\n\n# CREATE or REPLACE DATABASE\ndef p_createDB(t):\n '''instruccion : opcionCR IF NOT EXISTS ID PTCOMA\n | opcionCR ID PTCOMA'''\n global columna\n if t[2].upper() == 'IF':\n t[0] = CreateReplace(t[1], True, t[5], None, lexer.lineno, columna)\n varGramatical.append('instruccion ::= opcionCR IF NOT EXISTS ID PTCOMA')\n varSemantico.append('instruccion = CreateReplace(opcionCR, True, ID, None)')\n else:\n t[0] = CreateReplace(t[1], False, t[2], None, lexer.lineno, columna)\n varGramatical.append('instruccion ::= opcionCR ID PTCOMA')\n varSemantico.append('instruccion = CreateReplace(opcionR, False, ID, None)')\n\n\ndef p_createDB2(t):\n '''instruccion : opcionCR ID complemento PTCOMA\n | opcionCR IF NOT EXISTS ID complemento PTCOMA'''\n global columna\n if t[2].upper() == 'IF':\n t[0] = CreateReplace(t[1], True, t[5], t[6], lexer.lineno, columna)\n varGramatical.append('instruccion ::= opcionCR IF NOT EXISTS ID complemento PTCOMA')\n varSemantico.append('instruccion = CreateReplace(opcionCR, True, complemento, PTCOMA)')\n else:\n t[0] = CreateReplace(t[1], False, t[2], t[3], lexer.lineno, columna)\n varGramatical.append('instruccion ::= opcionCR ID complemento PTCOMA')\n varSemantico.append('instruccion = CreateReplace(opcionCR, False, ID, complemento)')\n\n\ndef p_opcionCR(t):\n '''opcionCR : CREATE DATABASE\n | CREATE OR REPLACE DATABASE'''\n if t[2].upper() == 'OR':\n t[0] = 2\n varGramatical.append('opcionCR ::= CREATE OR REPLACE DATABASE')\n varSemantico.append('opcionCR = 2')\n else:\n t[0] = 1\n varGramatical.append('opcionCR ::= CREATE DATABASE')\n varSemantico.append('opcionCR = 1')\n\n\ndef p_complementoCR(t):\n '''complemento : OWNER IGUAL ID\n | OWNER ID\n | OWNER IGUAL CADENA'''\n global columna\n if t[2] == '=':\n t[0] = ComplementoCR(t[3], None, lexer.lineno, columna)\n varGramatical.append('complemento ::= OWNER IGUAL CADENA')\n varSemantico.append('complemento = ComplementoCR(ID, None)')\n else:\n t[0] = ComplementoCR(t[2], None, lexer.lineno, columna)\n varGramatical.append('complemento ::= OWNER ID')\n varSemantico.append('complemento = ComplementoCR(ID, None)')\n\n\ndef p_complementoCR2(t):\n '''complemento : OWNER IGUAL ID MODE IGUAL ENTERO\n | OWNER ID MODE IGUAL ENTERO\n | OWNER IGUAL ID MODE ENTERO\n | OWNER ID MODE ENTERO\n | OWNER IGUAL CADENA MODE IGUAL ENTERO\n '''\n global columna\n if t[2] == '=':\n if t[5] == '=':\n t[0] = ComplementoCR(t[3], t[6], lexer.lineno, columna)\n varGramatical.append('complemento ::= OWNER IGUAL ID MODE IGUAL ENTERO')\n varSemantico.append('complemento = ComplementoCR(ID, ENTERO) ')\n else:\n t[0] = ComplementoCR(t[3], t[5], lexer.lineno, columna)\n varGramatical.append('complemento ::= OWNER IGUAL ID MODE ENTERO')\n varSemantico.append('complemento = ComplementoCR(ID, ENTERO)')\n else:\n if t[4] == '=':\n t[0] = ComplementoCR(t[2], t[5], lexer.lineno, columna)\n varGramatical.append('complemento ::= OWNER ID MODE IGUAL ENTERO')\n varSemantico.append('complemento = ComplementoCR(ID, ENTERO) ')\n else:\n t[0] = ComplementoCR(t[2], t[4], lexer.lineno, columna)\n varGramatical.append('complemento ::= OWNER ID MODE ENTERO')\n varSemantico.append('complemento = ComplementoCR(ID, ENTERO) ')\n\n\n# SHOW\ndef p_showDB(t):\n 'instruccion : SHOW DATABASES PTCOMA'\n global columna\n t[0] = Show(True, lexer.lineno, columna)\n varGramatical.append('instruccion ::= SHOW DATABASES PTCOMA')\n varSemantico.append('instruccion = Show(True) ')\n\n\ndef p_showDB1(t):\n 'instruccion : SHOW DATABASES LIKE CADENA PTCOMA'\n t[0] = t[1]\n varGramatical.append('instruccion ::= SHOW DATABASES LIKE CADENA PTCOMA')\n varSemantico.append('instruccion = SHOW ')\n\n\n# ALTER\ndef p_alterDB(t):\n '''instruccion : ALTER DATABASE ID RENAME TO ID PTCOMA\n | ALTER DATABASE ID OWNER TO valores PTCOMA'''\n global columna\n if t[4].upper() == 'RENAME':\n t[0] = AlterDatabase(1, t[3], t[6], lexer.lineno, columna)\n varGramatical.append('instruccion ::= ALTER DATABASE ID RENAME TO ID PTCOMA')\n varSemantico.append('instruccion = AlterDatabase(1, ID, ID)')\n else:\n t[0] = AlterDatabase(2, t[3], t[6], lexer.lineno, columna)\n varGramatical.append('instruccion ::= ALTER DATABASE ID OWNER TO valores PTCOMA')\n varSemantico.append('instruccion = AlterDatabase(2, ID, valores)')\n\n\ndef p_alterT(t):\n '''instruccion : ALTER TABLE ID lalterprima PTCOMA\n '''\n global columna\n t[0] = AlterTable(t[3], t[4], lexer.lineno, columna)\n varGramatical.append('instruccion ::= ALTER TABLE ID lalterprima PTCOMA')\n varSemantico.append('instruccion = AlterTable(ID,lalterprima ) ')\n\n\ndef p_alterT8notocar(t):\n 'lalterprima : lalterprima alterprima'\n t[1].append(t[2])\n t[0] = t[1]\n varGramatical.append('lalterprima ::= lalterprima alterprima')\n varSemantico.append('lalterprima = lalterprima ; lalterprima.append(alterprima)')\n\n\ndef p_alterT9notocar(t):\n 'lalterprima : alterprima'\n t[0] = [t[1]]\n varGramatical.append('lalterprima ::= alterprima')\n varSemantico.append('lalterprima = [alterprima]')\n\n\ndef p_alterT10notocar(t):\n 'alterprima : ADD COLUMN listaID tipo '\n global columna\n t[0] = Alter(1, 'ADD', ' COLUMN', t[3], t[4], None, None, None, None, lexer.lineno, columna)\n varGramatical.append('alterprima ::= ADD COLUMN listaID tipo')\n varSemantico.append(' alterprima = Alter(1,ADD, COLUMN, listaID, tipo, None, None, None, None)')\n\n\ndef p_alterT11notocar(t):\n 'alterprima : DROP COLUMN listaID'\n global columna\n t[0] = Alter(2, 'DROP', ' COLUMN', t[3], None, None, None, None, None, lexer.lineno, columna)\n varGramatical.append('alterprima ::= DROP COLUMN listaID')\n varSemantico.append('alterprima = Alter(2,DROP, COLUMN, listaID, None, None, None, None, None) ')\n\n\ndef p_alterT12notocar(t):\n global columna\n 'alterprima : ADD CHECK PARIZQ checkprima PARDR'\n t[0] = Alter(3, 'ADD', ' CHECK', None, None, t[4], None, None, None, lexer.lineno, columna)\n varGramatical.append('alterprima ::= ADD CHECK checkprima')\n varSemantico.append('alterprima = Alter(3,ADD, CHECK, None, None, checkprima, None, None, None)')\n\n\ndef p_alterT13notocar(t):\n 'alterprima : DROP CONSTRAINT ID'\n global columna\n t[0] = Alter(4, 'DROP', ' CONSTRAINT', t[3], None, None, None, None, None, lexer.lineno, columna)\n varGramatical.append('alterprima ::= DROP CONSTRAINT ID')\n varSemantico.append('alterprima = Alter(4,DROP, CONSTRAINT, ID, None, None, None, None, None) ')\n\n\ndef p_alterT15notocar(t):\n 'alterprima : ADD FOREIGN KEY PARIZQ listaID PARDR REFERENCES ID PARIZQ listaID PARDR '\n global columna\n t[0] = Alter(5, 'ADD', ' FOREIGN KEY', t[5], None, None, t[8], None, t[10], lexer.lineno, columna)\n varGramatical.append('alterprima ::= ADD FOREIGN KEY PARIZQ listaID PARDR REFERENCES ID PARIZQ listaID PARDR')\n varSemantico.append('alterprima = Alter(6,ADD, FOREIGN KEY, listaID, None, None, ID, None, listaID)')\n\n\ndef p_alterT16notocar(t):\n 'alterprima : ALTER COLUMN ID TYPE tipo'\n global columna\n t[0] = Alter(6, 'ALTER', ' COLUMN', t[3], t[5], None, None, 'TYPE', None, lexer.lineno, columna)\n varGramatical.append('alterprima ::= ALTER COLUMN ID TYPE tipo')\n varSemantico.append('alterprima = Alter(7,ALTER, COLUMN, ID, Tipo, None, None, TYPE, None)')\n\n\ndef p_alterT17notocar(t):\n 'alterprima : ALTER COLUMN ID SET NOT NULL'\n global columna\n t[0] = Alter(7, 'ALTER', ' COLUMN', t[3], None, None, None, 'SET NOT NULL', None, lexer.lineno, columna)\n varGramatical.append('alterprima ::= ALTER COLUMN ID SET NOT NULL')\n varSemantico.append('alterprima = Alter(8,ALTER, COLUMN, ID, None, None, None, SET NOT NULL, None)')\n\n\ndef p_alterT20notocar(t):\n 'alterprima : ADD PRIMARY KEY PARIZQ listaID PARDR '\n global columna\n t[0] = Alter(8, 'PRIMARY', ' KEY', t[5], None, None, None, None, None, lexer.lineno, columna)\n varGramatical.append('alterprima ::= ADD PRIMARY KEY PARIZQ listaID PARDR')\n varSemantico.append('alterprima = Alter(9,PRIMARY, KEY, None, None, None, None, None, None)')\n\n\ndef p_alterT21notocar(t):\n 'alterprima : ADD CONSTRAINT ID PRIMARY KEY PARIZQ listaID PARDR '\n global columna\n t[0] = Alter(9, 'ADD', 'CONSTRAINT:' + str(t[3]), t[7], None, None, None, None, None, lexer.lineno, columna)\n varGramatical.append('alterprima ::= ADD CONSTRAINT ID PRIMARY KEY PARIZQ listaID PARDR ')\n varSemantico.append('alterprima = Alter(9,ADD, listaID, None, None, None, None, None, None)')\n\n\ndef p_alterT22notocar(t):\n 'alterprima : ADD CONSTRAINT ID FOREIGN KEY PARIZQ listaID PARDR REFERENCES ID PARIZQ listaID PARDR '\n global columna\n t[0] = Alter(10, 'ADD', 'CONSTRAINT:' + str(t[3]), t[7], None, None, t[10], None, t[12], lexer.lineno, columna)\n varGramatical.append(\n 'alterprima ::= ADD CONSTRAINT ID FOREIGN KEY PARIZQ listaID PARDR REFERENCES ID PARIZQ listaID PARDR')\n varSemantico.append('alterprima = Alter(10,ADD, FOREIGN KEY, KEY, None, None, PARDR, None, ID)')\n\n\ndef p_alterT23notocar(t):\n 'alterprima : ADD CONSTRAINT ID UNIQUE PARIZQ listaID PARDR'\n global columna\n t[0] = Alter(11, 'ADD', 'CONSTRAINT:' + str(t[3]), t[6], None, None, None, None, None, lexer.lineno, columna)\n varGramatical.append('alterprima ::= ADD CONSTRAINT ID UNIQUE PARIZQ listaID PARDR')\n varSemantico.append('alterprima = Alter(11, ADD, CONSTRAINT: ID, listaID, None, None, None, None, None)')\n\n\n##################################################################\n# SELECT\ndef p_selectTime(t):\n ''' instruccion : SELECT Time PTCOMA'''\n global columna\n t[0] = Select(1, False, t[2], None, None, None, None, None, None, None, lexer.lineno, columna)\n varGramatical.append('instruccion ::= SELECT Time PTCOMA')\n varSemantico.append('instruccion = Select(1, False, Time, None, None, None, None, None, None) ')\n\n\ndef p_selectTime2(t):\n ''' Time : EXTRACT PARIZQ momento FROM TIMESTAMP CADENA PARDR\n '''\n global columna\n t[0] = Time(1, t[3], t[6], None, lexer.lineno, columna)\n varGramatical.append('Time ::= EXTRACT PARIZQ momento FROM TIMESTAMP CADENA PARDR')\n varSemantico.append('Time = Time(1, momento, CADENA, None)')\n\n\ndef p_selectTime0(t):\n ''' Time : date_part PARIZQ CADENA COMA INTERVAL CADENA PARDR\n '''\n global columna\n t[0] = Time(3, None, t[3], t[6], lexer.lineno, columna)\n varGramatical.append('Time ::= date_part PARIZQ CADENA COMA INTERVAL CADENA PARDR')\n varSemantico.append('Time = Time(3, None, CADENA, CADENA)')\n\n\ndef p_selectTime3(t):\n ''' Time : NOW PARIZQ PARDR\n | TIMESTAMP CADENA\n '''\n global columna\n if t[1].upper() == 'NOW':\n t[0] = Time(2, None, None, None, lexer.lineno, columna)\n varGramatical.append('Time ::= NOW PARIZQ PARDR')\n varSemantico.append('Time = Time(2, None, None, None)')\n else:\n t[0] = Time(6, None, t[2], None, lexer.lineno, columna)\n varGramatical.append('Time ::= TIMESTAMP CADENA')\n varSemantico.append('Time = Time(6, None, CADENA, None) ')\n\n\ndef p_selectTime4(t):\n ''' Time : CURRENT_TIME\n | CURRENT_DATE\n '''\n global columna\n if t[1].upper() == 'CURRENT_TIME':\n t[0] = Time(5, None, None, None, lexer.lineno, columna)\n varGramatical.append('Time ::= CURRENT_TIME')\n varSemantico.append('Time = Time(5, None, None, None)')\n else:\n t[0] = Time(4, None, None, None, lexer.lineno, columna)\n varGramatical.append('Time ::= CURRENT_DATE')\n varSemantico.append('Time = Time(4, None, None, None) ')\n\n\ndef p_momento(t):\n ''' momento : YEAR\n | MONTH\n | DAY\n | HOUR\n | MINUTE\n | SECOND\n '''\n t[0] = t[1].upper()\n varGramatical.append('momento ::= ' + str(t[1]))\n varSemantico.append('momento = ' + str(t[1].upper()))\n\n\n# ESTE SELECT SIRVE PARA HACER UNA LLAMADA A UNA CONSULTA QUE POSIBLEMENTE USE LA UNION\n# INTERSECT U OTRO\n# def p_instruccionSELECT(t):\n# '''instruccion : PARIZQ select2 PARDR inst_union\n# '''\n# t[0]=t[1]\n\n# SELECT SENCILLO QUE LLAMA FUNCIONES\ndef p_instruccionSELECT2(t):\n '''instruccion : select2 PTCOMA\n '''\n t[0] = t[1]\n varGramatical.append('instruccion ::= select2 PCOMA')\n varSemantico.append('instruccion = select2')\n\n\n# SELECT AUXILIAR QUE PROCEDE HACER EL UNION\ndef p_union2(t):\n '''instruccion : PARIZQ select2 PARDR UNION ALL PARIZQ select2 PARDR PTCOMA\n '''\n global columna\n t[0] = Union('UNION', True, t[2], t[7], lexer.lineno, columna)\n\n\n# SELECT AUXILIAR QUE PROCEDE HACER EL INTERSECT CON OTRO QUERY\ndef p_union3(t):\n '''instruccion : PARIZQ select2 PARDR INTERSECT ALL PARIZQ select2 PARDR PTCOMA\n '''\n global columna\n t[0] = Union('INTERSECT', True, t[2], t[7], lexer.lineno, columna)\n varGramatical.append('instruccion ::= PARIZQ select2 PARDR INTERSECT ALL PARIZQ select2 PARDR PTCOMA')\n varSemantico.append('instruccion = Union(INTERSECT, True, select2, select2)')\n\n\n# SELECT AUXILIAR QUE PROCEDE HACER EL EXCEP CON OTRO QUERY\ndef p_union4(t):\n '''instruccion : PARIZQ select2 PARDR EXCEPT ALL PARIZQ select2 PARDR PTCOMA\n '''\n global columna\n t[0] = Union('EXCEPT', True, t[2], t[7], lexer.lineno, columna)\n varGramatical.append('instruccion ::= PARIZQ select2 PARDR EXCEPT ALL PARIZQ select2 PARDR PTCOMA')\n varSemantico.append('instruccione = Union(EXCEPT, True, select2, select2) ')\n\n\n# ESTOS HACEN LO MISMO SIN LA PALABRA RESERVADA ALL\ndef p_union5(t):\n '''instruccion : PARIZQ select2 PARDR UNION PARIZQ select2 PARDR PTCOMA\n '''\n global columna\n t[0] = Union('UNION', False, t[2], t[6], lexer.lineno, columna)\n varGramatical.append('instruccion ::= PARIZQ select2 PARDR UNION PARIZQ select2 PARDR PTCOMA')\n varSemantico.append('instruccion = Union(UNION, False, select2, select2) ')\n\n\ndef p_union6(t):\n '''instruccion : PARIZQ select2 PARDR INTERSECT PARIZQ select2 PARDR PTCOMA\n '''\n global columna\n t[0] = Union('INTERSECT', False, t[2], t[6], lexer.lineno, columna)\n varGramatical.append('instruccion ::= PARIZQ select2 PARDR INTERSECT PARIZQ select2 PARDR PTCOMA')\n varSemantico.append('instruccion = Union(INTERSECT, False, select2, select2)')\n\n\ndef p_union7(t):\n '''instruccion : PARIZQ select2 PARDR EXCEPT PARIZQ select2 PARDR PTCOMA\n '''\n global columna\n t[0] = Union('EXCEPT', False, t[2], t[6], lexer.lineno, columna)\n varGramatical.append('instruccion ::= PARIZQ select2 PARDR EXCEPT PARIZQ select2 PARDR PTCOMA')\n varSemantico.append('instruccion = Union(EXCEPT, False, select2, select2)')\n\n\ndef p_groupBy(t):\n '''compSelect : list\n '''\n t[0] = t[1]\n varGramatical.append('compSelect ::= list')\n varSemantico.append('compSelect = list')\n\n\ndef p_groupBy1(t):\n '''compSelect : list GROUP BY compGroup\n '''\n global columna\n t[0] = GroupBy(t[1], t[4], None, lexer.lineno, columna)\n varGramatical.append('compSelect ::= list GROUP BY compGroup')\n varSemantico.append('compSelect = GroupBy(list, compGroup, None)')\n\n\ndef p_groupBy2(t):\n '''compSelect : GROUP BY compGroup\n '''\n global columna\n t[0] = GroupBy(None, t[3], None, lexer.lineno, columna)\n varGramatical.append('compSelect ::= GROUP BY compGroup')\n varSemantico.append('compSelect = GroupBy(None, compGroup, None)')\n\n\ndef p_having(t):\n '''compGroup : list ordenar\n '''\n global columna\n t[0] = Having(t[1], t[2], None, lexer.lineno, columna)\n varGramatical.append('compSelect ::= list ordenar')\n varSemantico.append('compGroup = Having(list, ordenar, None)')\n\n\ndef p_having1(t):\n '''compGroup : list ordenar HAVING andOr\n '''\n global columna\n t[0] = Having(t[1], t[2], t[4], lexer.lineno, columna)\n varGramatical.append('compSelect ::= list ordenar HAVING andOr')\n varSemantico.append('compGroup = Having(list, ordenar, andOr)')\n\n\n# aqui vienen los modos de ascendente o decendente que pueden o no acompañar al group by\ndef p_ordenar1(t):\n '''ordenar : DESC'''\n t[0] = 'DESC'\n varGramatical.append('ordenar ::= DESC')\n varSemantico.append('ordenar = DESC')\n\n\ndef p_ordenar2(t):\n '''ordenar : ASC'''\n t[0] = 'ASC'\n varGramatical.append('ordenar ::= ASC')\n varSemantico.append('ordenar = ASC')\n\n\ndef p_ordenar3(t):\n '''ordenar : '''\n t[0] = None\n varGramatical.append('ordenar ::= ')\n varSemantico.append('ordenar = ')\n\n\n# --------------------------------------------------------------\n# aqui imician los select que vienen sin union intersect o excep\n# select 's\ndef p_instselect(t):\n '''select2 : SELECT DISTINCT select_list FROM inner orderby\n '''\n global columna\n t[0] = Select(2, True, None, t[3], None, t[5], t[6], t[7], None, None, lexer.lineno, columna)\n varGramatical.append('select2 ::= SELECT DISTINCT select_list FROM inner orderby')\n varSemantico.append('select2 = Select(2, True, None, select_list, None, inner, orderby, None, None) ')\n\n\ndef p_instselect2(t):\n '''select2 : SELECT select_list FROM subquery inner orderby limit\n '''\n global columna\n t[0] = Select(3, False, None, t[2], t[4], t[5], t[6], t[7], None, None, lexer.lineno, columna)\n varGramatical.append('select2 ::= SELECT select_list FROM subquery inner orderby opc_Order limit')\n varSemantico.append('select2 = Select(3, False, None, select_list, subquery, inner, orderby, opc_Order, limit, None)')\n\ndef p_instselect3(t):\n '''select2 : SELECT select_list\n '''\n global columna\n t[0] = Select(4, False, None, t[2], None, None, None, None, None, None, lexer.lineno, columna)\n varGramatical.append('select2 ::= SELECT select_list')\n varSemantico.append('select2 = Select(4, False, None, select_list, None, None, None, None, None) ')\n\n\ndef p_instselect4(t):\n '''select2 : SELECT select_list FROM subquery inner WHERE complemSelect orderby limit\n '''\n global columna\n t[0] = Select(5, False, None, t[2], t[4], t[5], t[8], None, t[9], t[7], lexer.lineno, columna)\n varGramatical.append('select2 ::= SELECT select_list FROM subquery inner WHERE complemSelect orderby opc_Order limit')\n varSemantico.append(\n 'select2 = Select(5, False, None, select_list, subquery, inner, orderby, limit, complemSelect) ')\n\n\ndef p_instselect7(t):\n '''select2 : SELECT DISTINCT select_list FROM subquery inner WHERE complemSelect orderby limit\n '''\n global columna\n t[0] = Select(6, True, None, t[3], t[5], t[6], t[9], None, t[10], t[8], lexer.lineno, columna)\n varGramatical.append(\n 'select2 ::= SELECT DISTINCT select_list FROM subquery inner WHERE complemSelect orderby limit')\n varSemantico.append('select2 = Select(6, True, None, select_list, subquery, inner, orderby, limit, complemSelect)')\n\n\n# ------------------------------------------------------------------------\ndef p_order_by(t):\n '''orderby : ORDER BY listaID opc_Order\n '''\n dictionary = {\n \"lista\": t[3],\n \"mode\": t[4]\n }\n #funciona\n t[0] = dictionary\n varGramatical.append('orderby ::= ORDER BY listaID')\n varSemantico.append('orderby = listaID ')\n\n\ndef p_order_by_2(t):\n 'orderby : '\n t[0] = None\n varGramatical.append('orderby ::= ')\n varSemantico.append('orderby = ')\n\n\ndef p_order_limit(t):\n '''limit : LIMIT ENTERO\n | LIMIT ALL\n '''\n global columna\n if t[2].upper() == 'ALL':\n t[0] = Limit(True, None, None, lexer.lineno, columna)\n varGramatical.append('limit ::= LIMIT ALL')\n varSemantico.append('limit = Limit(True, None, None)')\n else:\n t[0] = Limit(False, t[2], None, lexer.lineno, columna)\n varGramatical.append('limit ::= LIMIT ENTERO')\n varSemantico.append('limit = Limit(False, t[2], None) ')\n\n\ndef p_order_limit_1(t):\n '''limit : LIMIT ENTERO OFFSET ENTERO\n '''\n global columna\n t[0] = Limit(False, t[2], t[4], lexer.lineno, columna)\n varGramatical.append('limit ::= LIMIT ENTERO OFFSET ENTERO')\n varSemantico.append('limit = Limit(False, ENTERO, ENTERO)')\n\n\ndef p_order_limit_2(t):\n 'limit : '\n t[0] = None\n varGramatical.append('limit ::= ')\n varSemantico.append('limit = ')\n\n\ndef p_subquery(t):\n '''subquery : PARIZQ select2 PARDR\n '''\n t[0] = t[2]\n varGramatical.append('subquery ::= PARIZQ select2 PARDR')\n varSemantico.append('subquery = select2')\n\n\ndef p_subquery2(t):\n 'subquery : '\n t[0] = None\n varGramatical.append('subquery ::= ')\n varSemantico.append('subquery = ')\n\n\ndef p_innerjoin(t):\n '''inner : list '''\n t[0] = t[1]\n varGramatical.append('inner ::= list')\n varSemantico.append('inner = list')\n\n\ndef p_innerjoin1(t):\n '''inner : compSelect '''\n t[0] = t[1]\n varGramatical.append('inner ::= compSelect')\n varSemantico.append('inner = compSelect ')\n\n\n# hasta aqui no viene inner\n\ndef p_innerjoin2(t):\n '''inner : list INNER JOIN columna ON asignacion '''\n varGramatical.append('inner ::= list INNER JOIN columna ON asignacion')\n varSemantico.append('iv62 ')\n\n\ndef p_innerjoin3(t):\n '''inner : list INNER JOIN columna ON asignacion complemSelect '''\n varGramatical.append('inner ::= list INNER JOIN columna ON asignacion complemSelect')\n varSemantico.append('iv62 ')\n\n\n# aqui si viene inner join pero sin where\n\n\ndef p_instselect5(t):\n '''complemSelect : andOr\n '''\n t[0] = t[1]\n varGramatical.append('complemSelect ::= andOr')\n varSemantico.append('complemSelect = andOr')\n\n\n# compo group es complemento del group by al llevar el having\ndef p_instselect6(t):\n '''complemSelect : andOr GROUP BY compGroup ordenar\n '''\n global columna\n t[0] = GroupBy(t[1], t[4], t[5], lexer.lineno, columna)\n varGramatical.append('complemSelect ::= andOr GROUP BY compGroup ordenar')\n varSemantico.append('complemSelect = GroupBy(andOr, compGroup, ordenar)')\n\n\ndef p_selectList(t):\n '''select_list : MULT\n | list'''\n t[0] = t[1]\n if t[1] == '*':\n varGramatical.append('select_list ::= MULT')\n varSemantico.append('select_list =MULT')\n else:\n varGramatical.append('select_list ::= list')\n varSemantico.append('select_list = list ')\n\n\ndef p_list2(t):\n '''list : list COMA columna '''\n t[1].append(t[3])\n t[0] = t[1]\n varGramatical.append('list ::= list COMA columna')\n varSemantico.append('list = list; list.append(columna) ')\n\n\ndef p_list3(t):\n '''list : columna '''\n t[0] = [t[1]]\n varGramatical.append('list ::= columna')\n varSemantico.append('list = [columna]')\n\n\ndef p_cases(t):\n '''columna : CASE cases END ID\n '''\n # ahora en columna puede venir:\n global columna\n t[0] = ColCase(t[2], t[4], lexer.lineno, columna)\n varGramatical.append('columna ::= CASE cases END ID')\n varSemantico.append('columna = ColCase(cases, ID) ')\n\n\ndef p_cases1(t):\n '''cases : cases case\n '''\n t[1].append(t[2])\n t[0] = t[1]\n varGramatical.append('cases ::= cases case')\n varSemantico.append('cases = cases; cases.append(case) ')\n\n\ndef p_cases2(t):\n '''cases : case\n '''\n t[0] = [t[1]]\n varGramatical.append('cases ::= case')\n varSemantico.append('cases = [case] ')\n\n\ndef p_cases3(t):\n '''case : WHEN asignacion THEN valores '''\n global columna\n t[0] = Case(t[2], t[4], lexer.lineno, columna)\n varGramatical.append('case ::= WHEN asignacion THEN valores')\n varSemantico.append('case = Case(asignacion, valores) ')\n\n\n# prim [as] seg\ndef p_prim(t):\n '''prim : var\n | math\n | trig\n | bina\n | Time\n | E\n '''\n t[0] = t[1]\n varGramatical.append('prim ::= var')\n varSemantico.append('prim = var ')\n\n\ndef p_prim2(t):\n 'prim : PARIZQ select2 PARDR'\n t[0] = t[2]\n varGramatical.append('prim ::= PARIZQ select2 PARDR')\n varSemantico.append('prim = select2')\n\n\ndef p_seg(t):\n '''seg : ID\n '''\n global columna\n t[0] = Id(t[1], lexer.lineno, columna)\n varGramatical.append('seg ::= ID')\n varSemantico.append('seg = Id(ID) ')\n\n\ndef p_seg2(t):\n 'seg : CADENA'\n global columna\n t[0] = Primitivo(t[1], lexer.lineno, columna)\n varGramatical.append('seg ::= CADENA')\n varSemantico.append('seg = Primitivo(CADENA) ')\n\n\ndef p_columna0(t):\n '''columna : prim AS seg'''\n global columna\n t[0] = IdAsId(t[1], t[3], lexer.lineno, columna)\n varGramatical.append('columna ::= prim AS seg')\n varSemantico.append('columna = IdAsId(prim, seg)')\n\n\ndef p_columna1(t):\n '''columna : prim seg'''\n global columna\n t[0] = IdAsId(t[1], t[2], lexer.lineno, columna)\n varGramatical.append('columna ::= prim seg')\n varSemantico.append('columna = IdAsId(prim, seg) ')\n\n\ndef p_columna2(t):\n 'columna : prim'\n t[0] = t[1]\n varGramatical.append('columna ::= prim')\n varSemantico.append('columna = prim')\n\n\ndef p_math2(t):\n ''' math : ABS PARIZQ E PARDR\n | CBRT PARIZQ E PARDR\n | CEIL PARIZQ E PARDR\n | CEILING PARIZQ E PARDR\n | DEGREES PARIZQ E PARDR\n | EXP PARIZQ E PARDR\n | FACTORIAL PARIZQ E PARDR\n | FLOOR PARIZQ E PARDR\n | LCM PARIZQ E PARDR\n | LN PARIZQ E PARDR\n | LOG PARIZQ E PARDR\n | LOG10 PARIZQ E PARDR\n | RADIANS PARIZQ E PARDR\n | ROUND PARIZQ E PARDR\n | SIGN PARIZQ E PARDR\n | SQRT PARIZQ E PARDR\n | TRUC PARIZQ E PARDR\n | WIDTH_BUCKET PARIZQ E PARDR\n | SETSEED PARIZQ E PARDR\n | SUM PARIZQ E PARDR\n | MD5 PARIZQ E PARDR\n | SING PARIZQ E PARDR\n | WIDTH_BUCKET PARIZQ listaValores PARDR\n | AVG PARIZQ E PARDR\n | COUNT PARIZQ E PARDR\n | MIN PARIZQ E PARDR\n | MAX PARIZQ E PARDR\n | TRUNC PARIZQ E PARDR\n '''\n global columna\n t[0] = Math_(t[1].upper(), t[3], None, lexer.lineno, columna)\n varGramatical.append('math ::= ' + str(t[1]) + ' ' + str(t[2]) + ' E ' + str(t[4]))\n varSemantico.append('math = Math_(' + str(t[1].upper() + '), E, None) '))\n\n#def p_mathnotocar(t):\n# 'math : COUNT PARIZQ MULT PARDR'\n# global columna\n#\n# t[0] = Math_(t[1].upper(), Id(str(t[3]),lexer.lineno,columna), None, lexer.lineno, columna)\n# varGramatical.append('math ::= ' + str(t[1]) + ' ' + str(t[2]) + ' E ' + str(t[4]))\n# varSemantico.append('math = Math_(' + str(t[1].upper() + '), E, None) '))\n\n\ndef p_math3(t):\n ''' math : DIV PARIZQ E COMA E PARDR\n | GCD PARIZQ E COMA E PARDR\n | MOD PARIZQ E COMA E PARDR\n | POWER PARIZQ E COMA E PARDR\n '''\n global columna\n t[0] = Math_(t[1].upper(), t[3], t[5], lexer.lineno, columna)\n varGramatical.append('math ::= ' + str(t[1]) + ' ' + str(t[2]) + ' E ' + str(t[4]) + ' E ' + str(t[6]))\n varSemantico.append('math = Math_(' + str(t[1].upper()) + ', E, E)')\n\n\ndef p_math4(t):\n ''' math : PI PARIZQ PARDR\n | RANDOM PARIZQ PARDR\n '''\n global columna\n t[0] = Math_(t[1].upper(), None, None, lexer.lineno, columna)\n varGramatical.append('math ::= ' + str(t[1]) + ' ' + str(t[2]) + ' ' + str(t[3]))\n varSemantico.append('math = Math_(' + str(t[1].upper()) + ', None, None)')\n\n\ndef p_math6(t):\n ''' math : MIN_SCALE\n | SCALE\n | TRIM_SCALE\n '''\n global columna\n t[0] = Math_(t[1].upper(), None, None, lexer.lineno, columna)\n varGramatical.append('math ::= ' + str(t[1]))\n varSemantico.append('math = Math_(' + str(t[1].upper()) + ', None, None)')\n\n\ndef p_binarios(t):\n '''bina : LENGTH PARIZQ E PARDR\n | SHA256 PARIZQ E PARDR\n | ENCODE PARIZQ E PARDR\n | DECODE PARIZQ E PARDR\n '''\n global columna\n if t[1].upper() == 'LENGTH':\n t[0] = Binario(1, t[3], None, None, lexer.lineno, columna)\n varGramatical.append('bina ::= LENGTH PARIZQ E PARDR')\n varSemantico.append('bina = Binario(1, E, None, None)')\n elif t[1].upper() == 'SHA256':\n t[0] = Binario(2, t[3], None, None, lexer.lineno, columna)\n varGramatical.append('bina ::= SHA256 PARIZQ E PARDR')\n varSemantico.append('bina = Binario(2, E, None, None)')\n elif t[1].upper() == 'ENCODE':\n t[0] = Binario(3, t[3], None, None, lexer.lineno, columna)\n varGramatical.append('bina ::= ENCODE PARIZQ E PARDR')\n varSemantico.append('bina = Binario(3, E, None, None)')\n elif t[1].upper() == 'DECODE':\n t[0] = Binario(4, t[3], None, None, lexer.lineno, columna)\n varGramatical.append('bina ::= DECODE PARIZQ E PARDR')\n varSemantico.append('bina = Binario(4, E, None, None)')\n\n\ndef p_binarios2(t):\n '''bina : SUBSTRING PARIZQ var COMA ENTERO COMA ENTERO PARDR\n | SUBSTR PARIZQ var COMA ENTERO COMA ENTERO PARDR'''\n global columna\n t[0] = Binario(5, t[3], t[5], t[7], lexer.lineno, columna)\n if t[1].lower() == 'substring':\n varGramatical.append('bina ::= SUBSTRING PARIZQ var COMA ENTERO COMA ENTERO PARDR')\n varSemantico.append('bina = Binario(5, var, ENTERO, ENTERO)')\n else:\n varGramatical.append('bina ::= SUBSTR PARIZQ var COMA ENTERO COMA ENTERO PARDR')\n varSemantico.append('bina = Binario(5, var, ENTERO, ENTERO)')\n\n\ndef p_binarios3(t):\n '''bina : TRIM PARIZQ CADENA FROM columna PARDR'''\n global columna\n t[0] = Binario(6, t[3], t[5], None, lexer.lineno, columna)\n varGramatical.append('bina ::= TRIM PARIZQ CADENA FROM columna PARDR')\n varSemantico.append('bina = Binario(6, CADENA, columna, None)')\n\n\ndef p_binarios4(t):\n '''bina : GET_BYTE PARIZQ CADENA COMA ENTERO PARDR'''\n global columna\n t[0] = Binario(7, t[3], t[5], None, lexer.lineno, columna)\n varGramatical.append('bina ::= GET_BYTE PARIZQ CADENA COMA ENTERO PARDR')\n varSemantico.append('bina = Binario(7, CADENA, ENTERO, None)')\n\n\ndef p_binarios5(t):\n '''bina : SET_BYTE PARIZQ CADENA COMA ENTERO COMA ENTERO PARDR'''\n global columna\n t[0] = Binario(8, t[3], t[5], t[7], lexer.lineno, columna)\n varGramatical.append('bina ::= SET_BYTE PARIZQ CADENA COMA ENTERO COMA ENTERO PARDR')\n varSemantico.append('bina = Binario(8, CADENA, ENTERO, ENTERO)')\n\n\ndef p_binarios6(t):\n '''bina : CONVERT PARIZQ CADENA AS tipo PARDR'''\n global columna\n t[0] = Binario(9, t[3], t[5], None, lexer.lineno, columna)\n varGramatical.append('bina ::= CONVERT PARIZQ CADENA AS tipo PARDR')\n varSemantico.append('bina = Binario(9, CADENA, tipo, None)')\n\n\ndef p_funcionesAgregadas(t):\n '''bina : GREATEST PARIZQ listaValores PARDR'''\n global columna\n t[0] = Binario(10, t[3], None, None, lexer.lineno, columna)\n varGramatical.append('bina ::= GREATEST PARIZQ listaValores PARDR')\n varSemantico.append('bina = Binario(10, listaValores, None, None)')\n\n\ndef p_funcionesAgregadas1(t):\n '''\n bina : LEAST PARIZQ listaValores PARDR'''\n global columna\n t[0] = Binario(11, t[3], None, None, lexer.lineno, columna)\n varGramatical.append('bina ::= LEAST PARIZQ listaValores PARDR')\n varSemantico.append('bina = Binario(11, listaValores, None, None)')\n\n\ndef p_trig2(t):\n ''' trig : ACOS PARIZQ E PARDR\n | ACOSD PARIZQ E PARDR\n | ASIN PARIZQ E PARDR\n | ASIND PARIZQ E PARDR\n | ATAN PARIZQ E PARDR\n | ATAND PARIZQ E PARDR\n | COS PARIZQ E PARDR\n | COSD PARIZQ E PARDR\n | COT PARIZQ E PARDR\n | COTD PARIZQ E PARDR\n | SIN PARIZQ E PARDR\n | SIND PARIZQ E PARDR\n | TAN PARIZQ E PARDR\n | TAND PARIZQ E PARDR\n | SINH PARIZQ E PARDR\n | COSH PARIZQ E PARDR\n | TANH PARIZQ E PARDR\n | ASINH PARIZQ E PARDR\n | ACOSH PARIZQ E PARDR\n | ATANH PARIZQ E PARDR '''\n global columna\n\n t[0] = Trigonometrica(t[1].upper(), t[3], None, lexer.lineno, columna)\n varGramatical.append('trig ::= ' + str(t[1]) + ' ' + str(t[2]) + ' E ' + str(t[4]))\n varSemantico.append('trig = Trigonometrica(' + str(t[1].upper()) + ', E) ')\n\n\ndef p_trig2_2(t):\n ''' trig : ATAN2 PARIZQ E COMA E PARDR\n | ATAN2D PARIZQ E COMA E PARDR'''\n global columna\n t[0] = Trigonometrica(t[1].upper(), t[3], t[5], lexer.lineno, columna)\n varGramatical.append('trig ::= ' + str(t[1]) + ' ' + str(t[2]) + ' E ' + str(t[4]) + ' E ' + str(t[6]))\n varSemantico.append('trig = Trigonometrica(ATAN2, E, E)')\n\n\ndef p_instruccion_createEnum(t):\n ''' instruccion : CREATE TYPE ID AS ENUM PARIZQ listaExpresiones PARDR PTCOMA\n '''\n global columna\n t[0] = CreateType(t[3], t[7], lexer.lineno, columna)\n varGramatical.append('instruccion ::= CREATE TYPE ID AS ENUM PARIZQ listaExpresiones PARDR PTCOMA')\n varSemantico.append('instruccion = CreateType(ID, listaExpresiones) ')\n\n\ndef p_checkopcional(t):\n ''' checkprima : listaValores\n | E '''\n t[0] = t[1]\n varGramatical.append('checkprima ::= listaValores')\n varSemantico.append(' checkprima.append(listaValores)')\n\n# --------------------------- Fase 2 ------------------------------------------------\n# PLDECLA ********************************************\ndef p_pldecla(t):\n '''pldecla : ID CONSTANT tipo COLLATE CADENA NOT NULL plasig PTCOMA'''\n global columna\n t[0] = Declaracion(t[1],True,t[3],t[5],True,t[8],lexer.lineno,columna)\n varGramatical.append('pldecla ::= ID CONSTANT tipo COLLATE CADENA NOT NULL plasig PTCOMA')\n varSemantico.append('pldecla = Declaracion(ID,True,tipo,CADENA,True,plasig)')\n\ndef p_pldecla1(t):\n '''pldecla : ID tipo COLLATE CADENA NOT NULL plasig PTCOMA'''\n global columna\n t[0] = Declaracion(t[1],False,t[2],t[4],True,t[7],lexer.lineno,columna)\n varGramatical.append('pldecla ::= ID tipo COLLATE CADENA NOT NULL plasig PTCOMA')\n varSemantico.append('pldecla = Declaracion(ID,False,tipo,CADENA,True,plasig)')\n\ndef p_pldecla2(t):\n '''pldecla : ID CONSTANT tipo NOT NULL plasig PTCOMA'''\n global columna\n t[0] = Declaracion(t[1],True,t[3],None,True,t[6],lexer.lineno,columna)\n varGramatical.append('pldecla ::= ID CONSTANT tipo NOT NULL plasig PTCOMA')\n varSemantico.append('pldecla = Declaracion(ID,True,tipo,None,True,plasig)')\n\ndef p_pldecla3(t):\n '''pldecla : ID CONSTANT tipo COLLATE CADENA plasig PTCOMA'''\n global columna\n t[0] = Declaracion(t[1],True,t[3],t[5],False,t[6],lexer.lineno,columna)\n varGramatical.append('pldecla ::= ID CONSTANT tipo COLLATE CADENA plasig PTCOMA')\n varSemantico.append('pldecla = Declaracion(ID,True,tipo,CADENA,False,plasig)')\n\ndef p_pldecla4(t):\n '''pldecla : ID CONSTANT tipo COLLATE CADENA NOT NULL PTCOMA'''\n global columna\n t[0] = Declaracion(t[1], True, t[3], t[5],True,None, lexer.lineno, columna)\n varGramatical.append('pldecla ::= ID CONSTANT tipo COLLATE CADENA NOT NULL PTCOMA')\n varSemantico.append('pldecla = Declaracion(ID,True,tipo,CADENA,True,None)')\n\ndef p_pldecla5(t):\n '''pldecla : ID tipo NOT NULL plasig PTCOMA'''\n global columna\n t[0] = Declaracion(t[1],False,t[2],None,True, t[5], lexer.lineno, columna)\n varGramatical.append('pldecla ::= ID tipo NOT NULL plasig PTCOMA')\n varSemantico.append('pldecla = Declaracion(ID,False,tipo,None,True,plasig)')\n\ndef p_pldecla6(t):\n '''pldecla : ID CONSTANT tipo plasig PTCOMA'''\n global columna\n t[0] = Declaracion(t[1],True,t[3],None,False,t[4],lexer.lineno,columna)\n varGramatical.append('pldecla ::= ID CONSTANT tipo plasig PTCOMA')\n varSemantico.append('pldecla = Declaracion(ID,True,tipo,None,False,plasig)')\n\ndef p_pldecla7(t):\n '''pldecla : ID CONSTANT tipo NOT NULL PTCOMA'''\n global columna\n t[0] = Declaracion(t[1],True,t[3],None,True,None,lexer.lineno,columna)\n varGramatical.append('pldecla ::= ID CONSTANT tipo NOT NULL PTCOMA')\n varSemantico.append('pldecla = Declaracion(ID,True,tipo,None,True,None)')\n\ndef p_pldecla8(t):\n '''pldecla : ID CONSTANT tipo COLLATE CADENA PTCOMA'''\n global columna\n t[0] = Declaracion(t[1],True,t[3],t[5],False,None,lexer.lineno,columna)\n varGramatical.append('pldecla ::= ID CONSTANT tipo COLLATE CADENA PTCOMA')\n varSemantico.append('pldecla = Declaracion(ID,True,tipo,CADENA,False,None)')\n\ndef p_pldecla9(t):\n '''pldecla : ID tipo plasig PTCOMA'''\n global columna\n t[0] = Declaracion(t[1],False,t[2],None,False,t[3],lexer.lineno,columna)\n varGramatical.append('pldecla ::= ID tipo plasig PTCOMA')\n varSemantico.append('pldecla = Declaracion(ID,False,tipo,None,False,plasig)')\n\ndef p_pldecla10(t):\n '''pldecla : ID tipo NOT NULL PTCOMA'''\n global columna\n t[0] = Declaracion(t[1],False,t[2],None,True,None,lexer.lineno,columna)\n varGramatical.append('pldecla ::= ID tipo NOT NULL PTCOMA')\n varSemantico.append('pldecla = Declaracion(ID,False,tipo,None,True,None)')\n\ndef p_pldecla11(t):\n '''pldecla : ID CONSTANT tipo PTCOMA'''\n global columna\n t[0] = Declaracion(t[1],True,t[3],None,False,None,lexer.lineno,columna)\n varGramatical.append('pldecla ::= ID CONSTANT tipo PTCOMA')\n varSemantico.append('pldecla = Declaracion(ID,True,tipo,None,False,None)')\n\ndef p_pldecla12(t):\n '''pldecla : ID tipo COLLATE CADENA PTCOMA'''\n global columna\n t[0] = Declaracion(t[1],False,t[2],t[4],False,None,lexer.lineno,columna)\n varGramatical.append('pldecla ::= ID tipo COLLATE CADENA PTCOMA')\n varSemantico.append('pldecla = Declaracion(ID,False,tipo,CADENA,False,None)')\n\ndef p_pldecla13(t):\n '''pldecla : ID tipo PTCOMA'''\n global columna\n t[0] = Declaracion(t[1],False,t[2],None,False,None,lexer.lineno,columna)\n varGramatical.append('pldecla ::= ID tipo PTCOMA')\n varSemantico.append('pldecla = Declaracion(ID,False,tipo,None,False,None)')\n\n#PLASIG ********************************\ndef p_plasig(t):\n '''plasig : DEFAULT E\n | DOSPT IGUAL E\n | IGUAL E'''\n if t[1].lower() == 'default':\n t[0] = t[2]\n varGramatical.append('plasig ::= DEFAULT E')\n varSemantico.append('plasig = E')\n elif t[1] == '=':\n t[0] = t[2]\n varGramatical.append('plasig ::= DOSPT IGUAL E')\n varSemantico.append('plasig = E')\n else:\n t[0] = t[3]\n varGramatical.append('plasig ::= IGUAL E')\n varSemantico.append('plasig = E')\n\n#PLALIAS ******************************\ndef p_plalias(t):\n '''plalias : ID ALIAS FOR DOLAR ENTERO PTCOMA\n | ID ALIAS FOR ID PTCOMA'''\n global columna\n if t[4] == '$':\n t[0] = Alias(t[1],t[5],None,lexer.lineno,columna)\n varGramatical.append('plalias ::= ID ALIAS FOR DOLAR ENTERO PTCOMA')\n varSemantico.append('plalias = Alias(ID,ENTERO,None)')\n else:\n t[0] = Alias(t[1],None,t[4],lexer.lineno,columna)\n varGramatical.append('plalias ::= ID ALIAS FOR ID PTCOMA')\n varSemantico.append('plalias = Alias(ID,None,ID)')\n\n#L_PARAM ********************************\ndef p_l_param(t):\n '''l_param : l_param COMA param '''\n t[1].append(t[3])\n t[0] = t[1]\n varGramatical.append('l_param ::= l_param COMA param')\n varSemantico.append('l_param.apped(param); l_param = l_param')\n\ndef p_l_param1(t):\n 'l_param : param'\n t[0] = [t[1]]\n varGramatical.append('l_param ::= param')\n varSemantico.append('l_param = param')\n\ndef p_l_param11(t):\n 'l_param : '\n t[0] = None\n varGramatical.append('l_param ::= param')\n varSemantico.append('l_param = param')\n\ndef p_param(t):\n ''' param : ID typeparam '''\n t[0] = Parametro(t[1], t[2])\n varGramatical.append('param ::= ID typeparam')\n varSemantico.append('param = Parametro(1,ID,typeparam)')\n\ndef p_typeparam(t):\n '''typeparam : tipo'''\n t[0] = t[1]\n varGramatical.append('typeparam ::= tipo')\n varSemantico.append('typeparam = tipo')\n\n#PLRETURNS ******************************************\ndef p_plreturns(t):\n '''plreturns : typeparam'''\n t[0] = t[1]\n varGramatical.append('plreturns ::= typeparam')\n varSemantico.append('plreturns = typeparam')\n\n#PLASIGNACION ******************************************\ndef p_plasignacion(t):\n '''plasignacion : ID pasigvalor PTCOMA '''\n t[0] = Plasignacion(t[1],t[2])\n varGramatical.append('plasignacion ::= ID pasigvalor PTCOMA')\n varSemantico.append('plasignacion = Plasignacion(ID,pasigvalor)')\n\ndef p_pasigvalor(t):\n '''pasigvalor : DOSPT IGUAL E\n | IGUAL E'''\n if t[1] == ':':\n t[0] = t[3]\n varGramatical.append('pasigvalor ::= DOSPT IGUAL E')\n varSemantico.append('pasigvalor = E')\n else:\n t[0] = t[2]\n varGramatical.append('pasigvalor ::= IGUAL E')\n varSemantico.append('pasigvalor = E')\n\n# FUNCTION *********************************************\ndef p_plfunction(t):\n '''instruccion : CREATE FUNCTION ID PARIZQ l_param PARDR RETURNS plreturns AS DOLAR DOLAR blodecla blobegin DOLAR DOLAR LANGUAGE PLPGSQL PTCOMA '''\n t[0] = CreateFunction(t[3],t[5],t[8],t[12],t[13])\n varGramatical.append('instruccion ::= CREATE FUNCTION ID PARIZQ l_param PARDR RETURNS plreturns AS DOLAR DOLAR blodecla blobegin DOLAR DOLAR LANGUAGE PLPGSQL PTCOMA')\n varSemantico.append('instruccion = CreateFunction(ID,l_param,plreturns,blodecla,blobegin')\n\ndef p_plfunction1(t):\n '''instruccion : CREATE FUNCTION ID PARIZQ PARDR RETURNS plreturns AS DOLAR DOLAR blodecla blobegin DOLAR DOLAR LANGUAGE PLPGSQL PTCOMA '''\n t[0] = CreateFunction(t[3],None,t[7],t[11],t[12])\n varGramatical.append('instruccion ::= CREATE FUNCTION ID PARIZQ PARDR RETURNS plreturns AS DOLAR DOLAR blodecla blobegin DOLAR DOLAR LANGUAGE PLPGSQL PTCOMA')\n varSemantico.append('instruccion = CreateFunction(ID,None,plreturns,blodecla,blobegin')\n\ndef p_plfunction2(t):\n '''instruccion : CREATE FUNCTION ID PARIZQ l_param PARDR RETURNS plreturns AS DOLAR DOLAR blobegin DOLAR DOLAR LANGUAGE PLPGSQL PTCOMA '''\n t[0] = CreateFunction(t[3],t[5], t[8],None, t[12])\n varGramatical.append('instruccion ::= CREATE FUNCTION ID PARIZQ l_param PARDR RETURNS plreturns AS DOLAR DOLAR blobegin DOLAR DOLAR LANGUAGE PLPGSQL PTCOMA')\n varSemantico.append('instruccion = CreateFunction(ID,l_param,plreturns,None,blobegin')\n\ndef p_plfunction3(t):\n '''instruccion : CREATE FUNCTION ID PARIZQ PARDR RETURNS plreturns AS DOLAR DOLAR blobegin DOLAR DOLAR LANGUAGE PLPGSQL PTCOMA '''\n t[0] = CreateFunction(t[3],None, t[7], None, t[11])\n varGramatical.append('instruccion ::= CREATE FUNCTION ID PARIZQ PARDR RETURNS plreturns AS DOLAR DOLAR blobegin DOLAR DOLAR LANGUAGE PLPGSQL PTCOMA')\n varSemantico.append('instruccion = CreateFunction(ID,None,plreturns,None,blobegin')\n\n#DROP FUNCTION *********************************************\ndef p_dropfunction(t):\n 'instruccion : DROP FUNCTION ID PTCOMA'\n t[0] = DropFunction(t[3])\n varGramatical.append('instruccion ::= DROP FUNCTION ID PTCOMA')\n varSemantico.append('instruccion = DropFunction(ID)')\n\ndef p_dropfunction1(t):\n 'instruccion : DROP FUNCTION IF EXISTS ID PTCOMA'\n t[0] = DropFunction(t[5])\n varGramatical.append('instruccion ::= DROP FUNCTION IF EXISTS ID PTCOMA')\n varSemantico.append('instruccion = DropFunction(ID)')\n\n#BLODECLA *************************************\ndef p_blodecla(t):\n '''blodecla : DECLARE l_pldeclare'''\n t[0] = t[2]\n varGramatical.append('blodecla ::= DECLARE l_pldeclare')\n varSemantico.append('blodecla ::= l_pldeclare')\n\ndef p_blodecla1(t):\n '''blodecla : '''\n t[0] = None\n varGramatical.append('blodecla ::= ')\n varSemantico.append('blodecla ::= None')\n\ndef p_l_pldeclare(t):\n '''l_pldeclare : l_pldeclare pldecla'''\n t[1].append(t[2])\n t[0] = t[1]\n varGramatical.append('l_pldeclare ::= pldecla')\n varSemantico.append('l_pldeclare.append(pldecla); l_pldeclare = pldecla')\n\ndef p_l_pldeclare1(t):\n '''l_pldeclare : pldecla'''\n t[0] = [t[1]]\n varGramatical.append('l_pldeclare ::= pldecla')\n varSemantico.append('l_pldeclare = pldecla')\n\n#BLOBEGIN ***********************************\ndef p_blobegin(t):\n '''blobegin : BEGIN l_plsen END PTCOMA'''\n t[0] = t[2]\n varGramatical.append('blobegin ::= BEGIN l_plsen END PTCOMA')\n varSemantico.append('blobegin = l_plsen')\n\ndef p_l_plsen(t):\n '''l_plsen : l_plsen plsen '''\n t[1].append(t[2])\n t[0] = t[1]\n varGramatical.append('l_plsen ::= l_plsen plsen')\n varSemantico.append('l_plsen.append(plsen); l_plsen = plsen')\n\ndef p_l_plsen1(t):\n '''l_plsen : plsen '''\n t[0] = [t[1]]\n varGramatical.append('l_plsen ::= plsen')\n varSemantico.append('l_plsen = [plsen]')\n\n#PLSEN *************************************\ndef p_plsen(t):\n '''plsen : plasignacion\n | plretu\n | plIf\n | pl_Case\n | plCall\n | instruccion\n | blobegin '''\n t[0] = t[1]\n varGramatical.append('plsen ::= plinstruccion')\n varSemantico.append('plsen = plinstruccion')\n\n#PLRETU *************************************\ndef p_plretu(t):\n '''plretu : RETURN E PTCOMA'''\n t[0] = Return(t[2])\n varGramatical.append('plretu ::= RETURN E PTCOMA')\n varSemantico.append('plretu = Return(E)')\n\n# --------------- Instrucciones Query -------------------------\n# PLSELECT**************************\ndef p_plselect(t):\n '''plsen : SELECT select_list INTO ID FROM subquery inner WHERE complemSelect orderby limit PTCOMA'''\n\ndef p_plselect1(t):\n '''plsen : SELECT select_list INTO STRICT ID FROM subquery inner WHERE complemSelect orderby limit PTCOMA'''\n\ndef p_plselect2(t):\n '''plsen : SELECT select_list INTO ID FROM subquery inner orderby limit PTCOMA'''\n\ndef p_plselect3(t):\n '''plsen : SELECT select_list INTO STRICT ID FROM subquery inner orderby limit PTCOMA'''\n\n# PLINSERT**************************\ndef p_plinsert(t):\n 'plsen : INSERT INTO ID PARIZQ listaID PARDR VALUES value RETURNING plreturning INTO ID PTCOMA'\n global columna\n t[0] = plinsert(1,t[3],t[5],t[8],t[10],t[12],lexer.lineno,columna)\n varGramatical.append('plsen ::= INSERT INTO ID PARIZQ listaID PARDR VALUES value RETURNING plreturning INTO ID PTCOMA')\n varSemantico.append('plsen = plinsert(1,ID,listaID,value,plreturning,ID)')\n\ndef p_plinsert1(t):\n 'plsen : INSERT INTO ID PARIZQ listaID PARDR VALUES value RETURNING plreturning INTO STRICT ID PTCOMA'\n global columna\n t[0] = plinsert(1,t[3],t[5],t[8],t[10],t[13],lexer.lineno,columna)\n varGramatical.append('plsen ::= INSERT INTO ID PARIZQ listaID PARDR VALUES value RETURNING plreturning INTO ID PTCOMA')\n varSemantico.append('plsen = plinsert(1,ID,listaID,value,plreturning,ID)')\n\ndef p_plinsert2(t):\n 'plsen : INSERT INTO ID VALUES value RETURNING plreturning INTO ID PTCOMA'\n global columna\n t[0] = plinsert(2,t[3],None,t[5],t[7],t[9],lexer.lineno,columna)\n varGramatical.append('plsen ::= INSERT INTO ID VALUES value RETURNING plreturning INTO ID PTCOMA')\n varSemantico.append('plsen = plinsert(2,ID,None,value,plreturning,ID)')\n\ndef p_plinsert3(t):\n 'plsen : INSERT INTO ID VALUES value RETURNING plreturning INTO STRICT ID PTCOMA'\n global columna\n t[0] = plinsert(2,t[3],None,t[5],t[7],t[10],lexer.lineno,columna)\n varGramatical.append('plsen ::= INSERT INTO ID VALUES value RETURNING plreturning INTO STRICT ID PTCOMA')\n varSemantico.append('plsen = plinsert(2,ID,None,value,plreturning,ID)')\n\n# PLUPDATE**************************\ndef p_pludapte(t):\n 'plsen : UPDATE ID SET asignaciones WHERE where andOr RETURNING plreturning INTO ID PTCOMA'\n\ndef p_pludapte1(t):\n 'plsen : UPDATE ID SET asignaciones WHERE where andOr RETURNING plreturning INTO STRICT ID PTCOMA'\n\n# PLDELETE *************************\ndef p_pldelete(t):\n 'plsen : DELETE FROM ID WHERE where andOr RETURNING plreturning INTO ID PTCOMA'\n\ndef p_pldelete1(t):\n 'plsen : DELETE FROM ID WHERE where andOr RETURNING plreturning INTO STRICT ID PTCOMA'\n\n# PLRETURNING ************************\ndef p_plreturning(t):\n '''plreturning : l_plid\n | MULT '''\n t[0] = t[1]\n if t[1] == '*':\n varGramatical.append('plreturning ::= MULT')\n varSemantico.append('plreturning = MULT')\n else:\n varGramatical.append('plreturning ::= l_plid')\n varSemantico.append('plreturning = l_plid')\n\ndef p_l_plid(t):\n '''l_plid : l_plid COMA plid '''\n t[1].append(t[3])\n t[0] = t[1]\n varGramatical.append('l_plid ::= l_plid COMA plid')\n varSemantico.append('l_plid.append(plid); l_plid = l_plid')\n\ndef p_l_plid1(t):\n '''l_plid : plid '''\n t[0] = [t[1]]\n varGramatical.append('l_plid ::= plid')\n varSemantico.append('l_plid = plid ')\n\ndef p_plid(t):\n '''plid : ID\n | ID AS ID'''\n global columna\n if t[2].upper() == 'AS':\n t[0] = IdAsId(t[1],t[3],lexer.lineno,columna)\n varGramatical.append('plid ::= ID AS ID')\n varSemantico.append('plid = IdAsId(ID,ID)')\n else:\n t[0] = Id(t[1],lexer.lineno,columna)\n varGramatical.append('plid ::= ID')\n varSemantico.append('plid = Id(ID)')\n\n# ------------- Estructuras de Control ---------------------\n\ndef p_Call1(t):\n ''' instruccion : plCall\n | plIf\n | pl_Case '''\n t[0] = t[1]\n\n# Call *************************\ndef p_CAll(t):\n 'plCall : EXECUTE ID PARIZQ l_plval PARDR PTCOMA'\n global columna\n t[0] = plCall(t[2],t[4],lexer.lineno,columna)\n varGramatical.append('plCall ::= EXECUTE ID PARIZQ l_plval PARDR PTCOMA')\n varSemantico.append('plCall = ')\n\ndef p_l_plval(t):\n ' l_plval : l_plval COMA plval '\n t[0] = t[1] + [t[2]]\n varGramatical.append('l_plval ::= l_plval COMA plval')\n varSemantico.append('l_plval = t[1] + [t[2]]')\n\ndef p_l_plval1(t):\n ' l_plval : '\n t[0] = None\n varGramatical.append('l_plval ::= ')\n varSemantico.append('l_plval = None')\n\ndef p_l_plval2(t):\n '''l_plval : plval '''\n t[0] = [t[1]]\n varGramatical.append('l_plval ::= plval')\n varSemantico.append('l_plval = [t[1]]')\n\ndef p_plval(t):\n '''plval : valores\n | TRUE\n | FALSE\n | ID'''\n t[0] = t[1]\n varGramatical.append('plval ::= valores')\n varSemantico.append('plval = valores ')\n\n# IF ****************************\ndef p_If(t):\n ''' plIf : IF E THEN l_plsen END IF PTCOMA '''\n global columna\n t[0] = Ifpl(1, t[2], t[4], None, None, lexer.lineno, columna)\n varGramatical.append('plIf ::= IF E THEN plsen END IF PTCOMA')\n varSemantico.append('plIf = If(1, t[2], [t[4]], None, None, None, lexer.lineno, columna) ')\n\ndef p_If1(t):\n ''' plIf : IF E THEN l_plsen ELSE l_plsen END IF PTCOMA '''\n global columna\n t[0] = Ifpl(3, t[2], t[4], None, t[6], lexer.lineno, columna)\n varGramatical.append('plIf ::= IF E THEN plsen ELSE plsen END IF PTCOMA')\n varSemantico.append('plIf = If(3, t[2], [t[4]], None, None, [t[6]], lexer.lineno, columna) ')\n\ndef p_If2(t):\n ''' plIf : IF E THEN l_plsen plelsif ELSE l_plsen END IF PTCOMA '''\n global columna\n t[0] = Ifpl(2, t[2], t[4], t[5], t[7], lexer.lineno, columna)\n varGramatical.append('plIf ::= IF E THEN plsen plelsif ELSE plsen END IF PTCOMA')\n varSemantico.append('plIf = If(5, str(t[3]), str(t[5]), t[9], None, lexer.lineno, columna) ')\n\ndef p_plelsif(t):\n ''' plelsif : plelsif elsif '''\n t[0] = t[1]+[t[2]]\n\ndef p_plelsif1(t):\n ''' plelsif : elsif '''\n t[0] = [t[1]]\n\ndef p_elsif(t):\n 'elsif : ELSIF E THEN plsen'\n t[0] = {'exp':t[2],'sent':t[4]}\n\n# Case *******************\ndef p_case(t):\n 'pl_Case : CASE ID opcCase elseCase END CASE PTCOMA'\n global columna\n t[0] = CasePL(1,t[2], t[3], t[4],lexer.lineno,columna)\n varGramatical.append('pl_Case ::= CASE ID opcCase elseCase END CASE PTCOMA')\n varSemantico.append('pl_Case = ')\n\ndef p_case1(t):\n 'pl_Case : CASE opcCase elseCase END CASE PTCOMA'\n global columna\n t[0] = CasePL(2,None,t[2],t[3],lexer.lineno,columna)\n varGramatical.append('pl_Case ::= CASE opcCase elseCase END CASE PTCOMA')\n varSemantico.append('pl_Case = ')\n\ndef p_case2(t):\n '''opcCase : opcCase case '''\n t[0] = t[1]+[t[2]]\n varGramatical.append('opcCase ::= opcCase case')\n varSemantico.append(' opcCase = ')\n\ndef p_casee2(t):\n '''opcCase : case '''\n t[0] = [t[1]]\n varGramatical.append('opcCase ::= case')\n varSemantico.append('opcCase = ')\n\ndef p_case3(t):\n ''' case : WHEN listaExpresiones THEN l_plsen'''\n t[0] = {'exp': t[2], 'sent': t[4]}\n varGramatical.append('case ::= WHEN listaExpresiones THEN plsen ')\n varSemantico.append('case = ')\n\ndef p_case4(t):\n ''' elseCase : ELSE l_plsen '''\n t[0] = t[2]\n varGramatical.append('elseCase ::= ELSE plsen ')\n varSemantico.append('elseCase = ')\n\ndef p_case41(t):\n ''' elseCase : '''\n t[0] = []\n varGramatical.append('elseCase ::= ')\n varSemantico.append('elseCase = ')\n\n# -------------- Transaction Managament ---------------------\n\ndef p_Procedure(t):\n 'instruccion : CREATE PROCEDURE ID PARIZQ l_param PARDR LANGUAGE PLPGSQL AS DOLAR DOLAR blodecla blobegin DOLAR DOLAR'\n global columna\n t[0] = CreateProcedure(t[3],t[5],t[12],t[13],lexer.lineno,columna)\n varGramatical.append(' instruccion ::= CREATE PROCEDURE ID PARIZQ opc_param PARDR LANGUAGE PLPGSQL AS DOLAR DOLAR bloq1 BEGIN bloq3 END PTCOMA DOLAR DOLAR ')\n varSemantico.append(' instruccion = ')\n\ndef p_dropProcedure(t):\n ' instruccion : DROP PROCEDURE ID PARIZQ PARDR PTCOMA '\n global columna\n t[0] = DropProcedure(t[3], lexer.lineno, columna)\n varGramatical.append('instruccion ::= DROP PROCEDURE ID PARIZQ PARDR PTCOMA')\n varSemantico.append('instruccion = DropProcedure(t[3], lexer.lineno, columna) ')\n\n# ----- INDICES ------------------------------------\ndef p_Indice(t):\n 'instruccion : CREATE INDEX ID ON ID PARIZQ listaID PARDR whereIndice'\n global columna\n t[0] = Index(1, str(t[3]), str(t[5]), t[7], t[9], None, lexer.lineno, columna)\n varGramatical.append('instruccion ::= CREATE INDEX ID ON ID PARIZQ listaID PARDR whereIndice')\n varSemantico.append('instruccion = Index(1,t[3],t[5],t[7],t[9], lexer.lineno, columna) ')\n\ndef p_IndiceHash(t):\n 'instruccion : CREATE INDEX ID ON ID USING HASH PARIZQ listaID PARDR PTCOMA'\n global columna\n t[0] = Index(2, str(t[3]), str(t[5]), t[9], None, None, lexer.lineno, columna)\n varGramatical.append('instruccion ::= CREATE INDEX ID ON ID USING HASH PARIZQ listaID PARDR PTCOMA')\n varSemantico.append('instruccion = Index(2, str(t[3]), str(t[5]), t[9], None, lexer.lineno, columna) ')\n\ndef p_IndiceUnique(t):\n 'instruccion : CREATE UNIQUE INDEX ID ON ID PARIZQ listaID PARDR PTCOMA'\n global columna\n t[0] = Index(3, str(t[4]), str(t[6]), t[8], None, None, lexer.lineno, columna)\n varGramatical.append('instruccion ::= CREATE UNIQUE INDEX ID ON ID PARIZQ listaID PARDR PTCOMA')\n varSemantico.append('instruccion = Index(3, str(t[4]), str(t[6]), t[8], None, lexer.lineno, columna) ')\n\ndef p_IndiceOrderBY(t):\n 'instruccion : CREATE INDEX ID ON ID PARIZQ ID opc_Order PARDR PTCOMA'\n global columna\n t[0] = Index(4, str(t[3]), str(t[5]), t[7], None, t[8], lexer.lineno, columna)\n varGramatical.append('instruccion ::= CREATE INDEX ID ON ID PARIZQ ID opc_Order PARDR PTCOMA')\n varSemantico.append('instruccion = Index(4, str(t[3]), str(t[5]), t[7], None, lexer.lineno, columna) ')\n\ndef p_opc_Order(t):\n '''opc_Order : ASC\n | DESC '''\n if t[1].lower() == 'asc':\n t[0] = 'asc'\n varGramatical.append('opc_Order ::= ASC')\n varSemantico.append('opc_Order = ')\n else:\n t[0] = 'desc'\n varGramatical.append('opc_Order ::= DESC')\n varSemantico.append('opc_Order = ')\n\ndef p_opc_Order1(t):\n ''' opc_Order : ASC NULLS FIRST\n | DESC NULLS FIRST '''\n if t[1].lower() == 'asc':\n t[0] = 'asc nulls first'\n varGramatical.append('opc_Order ::= ASC NULLS FIRST')\n varSemantico.append('opc_Order = ')\n else:\n t[0] = 'desc nulls first'\n varGramatical.append('opc_Order ::= DESC NULLS FIRST')\n varSemantico.append('opc_Order = ')\n\ndef p_opc_Order2(t):\n ''' opc_Order : ASC NULLS LAST\n | DESC NULLS LAST '''\n if t[1].lower() == 'asc' :\n t[0] = 'asc nulls last'\n varGramatical.append('opc_Order ::= ASC NULLS LAST')\n varSemantico.append('opc_Order = ')\n else:\n t[0] = 'desc nulls last'\n varGramatical.append('opc_Order ::= DESC NULLS LAST')\n varSemantico.append('opc_Order = ')\n\ndef p_opc_Order3(t):\n ''' opc_Order : NULLS LAST\n | NULLS FIRST '''\n if t[2].lower() == 'last' :\n t[0] = 'nulls last'\n varGramatical.append('opc_Order ::= NULLS LAST')\n varSemantico.append('opc_Order = ')\n else:\n t[0] = 'nulls first'\n varGramatical.append('opc_Order ::= NULLS FIRST')\n varSemantico.append('opc_Order = ')\n\ndef p_IndiceLower(t):\n 'instruccion : CREATE INDEX ID ON ID PARIZQ LOWER PARIZQ ID PARDR PARDR PTCOMA'\n global columna\n t[0] = Index(5, str(t[3]), str(t[5]), t[9], None, None, lexer.lineno, columna)\n varGramatical.append('instruccion ::= CREATE INDEX ID ON ID PARIZQ LOWER PARIZQ ID PARDR PARDR PTCOMA')\n varSemantico.append('instruccion = Index(5, str(t[3]), str(t[5]), t[9], None, lexer.lineno, columna) ')\n\ndef p_IndiceLower1(t):\n 'instruccion : CREATE INDEX ID ON ID PARIZQ PARIZQ LOWER PARIZQ ID PARDR PARDR PARDR PTCOMA'\n global columna\n t[0] = Index(5, str(t[3]), str(t[5]), t[10], None, None, lexer.lineno, columna)\n varGramatical.append('instruccion ::= CREATE INDEX ID ON ID PARIZQ LOWER PARIZQ ID PARDR PARDR PTCOMA')\n varSemantico.append('instruccion = Index(5, str(t[3]), str(t[5]), t[9], None, lexer.lineno, columna) ')\n\ndef p_IndiceLower2(t):\n 'instruccion : CREATE INDEX ID ON ID PARIZQ listaID PARDR PTCOMA'\n global columna\n t[0] = Index(5, str(t[3]), str(t[5]), t[7], None, None, lexer.lineno, columna)\n varGramatical.append('instruccion ::= CREATE INDEX ID ON ID PARIZQ LOWER PARIZQ ID PARDR PARDR PTCOMA')\n varSemantico.append('instruccion = Index(5, str(t[3]), str(t[5]), t[9], None, lexer.lineno, columna) ')\n\ndef p_IndiceWhere2(t):\n 'whereIndice : PTCOMA'\n t[0] = None\n\ndef p_IndiceWhere3(t):\n 'whereIndice : WHERE NOT PARIZQ E valores AND E valores PARDR PTCOMA'\n t[0] = t[4]\n varGramatical.append('whereIndice ::= WHERE NOT PARIZQ E valores AND E valores PARDR PTCOMA')\n varSemantico.append('whereIndice = Expresion(t[4],t[7]) ')\n\ndef p_IndiceWhere4(t):\n 'whereIndice : WHERE PARIZQ E valores AND E valores PARDR PTCOMA'\n t[0] = t[3]\n varGramatical.append('whereIndice ::= WHERE PARIZQ E valores AND E valores PARDR PTCOMA')\n varSemantico.append('whereIndice = Expresion(t[3], t[6]) ')\n\ndef p_IndiceWhere(t):\n 'whereIndice : WHERE where PTCOMA'\n t[0] = t[2]\n varGramatical.append('whereIndice ::= WHERE where PTCOMA')\n varSemantico.append('whereIndice = Where(t[2]) ')\n\ndef p_DropIndice(t):\n 'instruccion : DROP INDEX ID PTCOMA '\n global columna\n t[0] = DropIndex(t[3],lexer.lineno, columna)\n varGramatical.append('instruccion ::= DROP INDEX ID PTCOMA')\n varSemantico.append('instruccion = DropIndex(t[2],lexer.lineno, columna) ')\n\ndef p_AlterIndice111(t):\n 'instruccion : ALTER INDEX ifIndice ID ALTER COLUMN ID ID PTCOMA'\n global columna\n t[0] = AlterIndex(1,t[3],t[4],t[6],t[7],lexer.lineno,columna)\n varGramatical.append('instruccion ::= ALTER INDEX ifIndice name ALTER ID ENTERO PTCOMA ')\n varSemantico.append('instruccion = ')\n\ndef p_AlterIndice(t):\n 'instruccion : ALTER INDEX ifIndice ID ALTER COLUMN ID ENTERO PTCOMA'\n global columna\n t[0] = AlterIndex(2,t[3],t[4],t[6],t[7],lexer.lineno,columna)\n varGramatical.append('instruccion ::= ALTER INDEX ifIndice name ALTER ID ENTERO PTCOMA ')\n varSemantico.append('instruccion = ')\n\ndef p_ifIndice(t):\n 'ifIndice : IF EXISTS'\n t[0] = 'true'\n varGramatical.append('ifIndice ::= IF EXISTS')\n varSemantico.append('ifIndice = if exists ')\n\ndef p_IfIndice1(t):\n 'ifIndice : '\n t[0] = 'false'\n varGramatical.append('ifIndice ::= epsilon')\n varSemantico.append(' ifIndice = []')\n\n\n# MODO PANICO ***************************************\ndef p_error(t):\n if not t:\n #print(\"Fin del Archivo!\")\n return\n\n global L_errores_sintacticos\n #print(\"Error sintáctico en '%s'\" % t.value)\n colum = contador_columas(columna)\n #print(\"Columna \", colum)\n #print(\"columna lexer pos \", lexer.lexpos)\n data = Error(str(\"Error Sintactico\"), str(t.value), str(t.lexer.lineno), str(colum))\n L_errores_sintacticos.append(data)\n\n # Read ahead looking for a closing '}'\n '''while True:\n tok = parser.token() # Get the next token\n if not tok or tok.type == 'PTCOMA':\n #print(\"Se recupero con ;\")\n break'''\n\n # Read ahead looking for a terminating \";\"\n while True:\n tok = parser.token() # Get the next token\n if not tok or tok.type == 'PTCOMA': break\n parser.errok()\n\n # Return SEMI to the parser as the next lookahead token\n return tok\n # parser.restart()\n\n\ndef contador_columas(args):\n columna = args + 3\n return columna\n\n\ndef graphstack(stack, stack2):\n varGramatical.append('PRODUCCIONES')\n\n varSemantico.append('SEMANTICO')\n\n s = Digraph('structs', filename='reporteGramatica.gv', node_attr={'shape': 'plaintext'})\n u = len(stack)\n g = 'stack [label = <'\n for x in range(0, u):\n g += '' + '\\n' + '' + '\\n' + '' + '\\n' + ''\n\n g += '
          ' + str(stack.pop()) + '' + str(\n stack2.pop()) + '
          >, ];'\n\n # s.node( g + \"}\")\n s.body.append(g)\n s.render('reporteGramatica.gv', view=False)\n # s.view()\n\n\nimport ply.yacc as yacc\n\n# import reportes.AST.AST as AST\n# import Tabla_simbolos.TablaSimbolos as TS\nimport Analisis_Ascendente.reportes.AST.AST as AST\nfrom Analisis_Ascendente.Tabla_simbolos.TablaSimbolos import Simbolo\nfrom Analisis_Ascendente.Instrucciones.Select.Select3 import Selectp4\nfrom Analisis_Ascendente.Instrucciones.Select.Select4 import Selectp7\n\nparser = yacc.yacc()\n\n\n# analisis semantico\ndef procesar_instrucciones(instrucciones, ts):\n ## lista de instrucciones recolectadas\n global consola\n global exceptions\n\n if instrucciones == None:\n MessageBox.showinfo(\"Errores Sintacticos\", \"Revisa el reporte de errores sintacticos\")\n return\n\n for instr in instrucciones:\n if isinstance(instr, CreateReplace):\n CreateReplace.ejecutar(instr, ts, consola, exceptions)\n elif isinstance(instr, Select):\n if instr.caso == 1:\n consola.append('caso 1')\n selectTime.ejecutar(instr, ts, consola,exceptions,True)\n elif instr.caso == 2:\n consola.append('caso 2')\n variable = SelectDist.Select_Dist()\n SelectDist.Select_Dist.ejecutar(variable, instr, ts, consola, exceptions)\n elif instr.caso == 3:\n consola.append('caso 3')\n variable = selectInst.Select_inst()\n selectInst.Select_inst.ejecutar(variable, instr, ts, consola, exceptions)\n elif instr.caso == 4:\n consola.append('caso 4')\n Selectp3.ejecutar(instr, ts, consola, exceptions,True)\n elif instr.caso == 5:\n consola.append('caso 5')\n Selectp4.ejecutar(instr, ts, consola, exceptions,True)\n elif instr.caso == 6:\n consola.append('caso 6')\n elif isinstance(instr, CreateTable):\n CreateTable.ejecutar(instr, ts, consola, exceptions)\n elif isinstance(instr, Use):\n Use.ejecutar(instr, ts, consola, exceptions)\n elif isinstance(instr, InsertInto):\n InsertInto.ejecutar(instr,ts,consola,exceptions)\n elif isinstance(instr, Drop):\n Drop.ejecutar(instr, ts, consola, exceptions)\n elif isinstance(instr, AlterDatabase):\n AlterDatabase.ejecutar(instr, ts, consola, exceptions)\n elif isinstance(instr, AlterTable):\n AlterTable.ejecutar(instr, ts, consola, exceptions)\n elif isinstance(instr, Delete):\n Delete.ejecutar(instr, ts, consola, exceptions)\n elif isinstance(instr, Update):\n Update.ejecutar(instr, ts, consola, exceptions)\n elif isinstance(instr,CreateType):\n CreateType.ejecutar(instr,ts,consola,exceptions)\n elif isinstance(instr,Show):\n Show.ejecutar(instr,ts,consola,exceptions)\n elif isinstance(instr, Index):\n Index.ejecutar(instr, ts, consola, exceptions)\n elif isinstance(instr,CreateFunction):\n CreateFunction.ejecutar(instr,ts,consola,exceptions)\n elif isinstance(instr,DropFunction):\n DropFunction.ejecutar(instr,ts,consola,exceptions)\n elif isinstance(instr,DropIndex):\n DropIndex.ejecutar(instr,ts,consola,exceptions)\n elif isinstance(instr,AlterIndex):\n AlterIndex.ejecutar(instr,ts,consola,exceptions)\n elif isinstance(instr,DropProcedure):\n DropProcedure.ejecutar(instr,ts,consola,exceptions)\n elif isinstance(instr, CreateProcedure):\n CreateProcedure.ejecutar(instr,ts,consola,exceptions)\n elif isinstance(instr, Ifpl):\n Ifpl.ejecutar(instr,ts,consola,exceptions)\n elif isinstance(instr, CasePL):\n CasePL.ejecutar(instr,ts,consola,exceptions)\n elif isinstance(instr,plCall):\n plCall.ejecutar(instr,ts,consola,exceptions)\n else:\n return\n\ndef generar_Codigo3D(instrucciones, ts, codigo_3d_generado):\n if instrucciones is not None:\n for instruccion in instrucciones:\n if isinstance(instruccion, Select):\n if instruccion.caso == 1:\n consola.append('caso 1')\n codigo_3d_generado += selectTime.getC3D(instruccion, ts, lista_optimizaciones_C3D)\n elif instruccion.caso == 2:\n consola.append('caso 2')\n variable = SelectDist.Select_Dist()\n SelectDist.Select_Dist.ejecutar(variable, instruccion, ts, lista_optimizaciones_C3D)\n elif instruccion.caso == 3:\n variable = selectInst.Select_inst()\n codigo_3d_generado += selectInst.Select_inst.get3D(variable, instruccion, ts, lista_optimizaciones_C3D)\n elif instruccion.caso == 4:\n codigo_3d_generado += Selectp3.get3D(instruccion, ts, lista_optimizaciones_C3D)\n elif instruccion.caso == 5:\n codigo_3d_generado += Selectp4.getC3D(instruccion, ts, lista_optimizaciones_C3D)\n elif instruccion.caso == 6:\n consola.append('caso 6')\n else:\n codigo_3d_generado += instruccion.getC3D(lista_optimizaciones_C3D)\n return codigo_3d_generado\n\n\n\ndef ejecutarAnalisis(entrada):\n global L_errores_lexicos\n global L_errores_sintacticos\n global consola\n global exceptions\n global lexer\n global ts_global\n # limpiar\n lexer.input(\"\")\n lexer.lineno = 0\n #dropAll()\n consola = []\n exceptions = []\n L_errores_lexicos = []\n L_errores_sintacticos = []\n # realiza analisis lexico y semantico\n instrucciones = parser.parse(entrada)\n reporte = AST.AST(instrucciones)\n reporte.ReportarAST()\n\n procesar_instrucciones(instrucciones, ts_global)\n '''print(\"-----------------------------------\")\n print(\"Simbolos: \\n\",ts_global)\n for simbolo in ts_global.simbolos:\n print(ts_global.simbolos.get(simbolo).id)\n entorno = ts_global.simbolos.get(simbolo).Entorno\n print(entorno)\n if entorno != None:\n for data in entorno.simbolos:\n print(\" -> \",data)'''\n\n #print(\"Lista Lexico\\n\", L_errores_lexicos)\n #rint(\"Lista Sintactico\\n\", L_errores_sintacticos)\n # Reporte de analisis lexico y sintactico\n reportes = RealizarReportes()\n reportes.generar_reporte_lexicos(L_errores_lexicos)\n reportes.generar_reporte_sintactico(L_errores_sintacticos)\n reportes.generar_reporte_tablaSimbolos(ts_global.simbolos)\n reportes.generar_reporte_semanticos(exceptions)\n\n #print(\"Fin de analisis\")\n #print(\"Realizando reporte gramatical\")\n graphstack(varGramatical, varSemantico)\n return consola\n\n\ndef crear_Codido3D(entrada):\n global L_errores_lexicos\n global L_errores_sintacticos\n global consola\n global code3d\n global exceptions\n global lexer\n global ts_global\n global lista_optimizaciones_C3D\n # limpiar\n lexer.input(\"\")\n lexer.lineno = 0\n #dropAll()\n consola = []\n exceptions = []\n L_errores_lexicos = []\n L_errores_sintacticos = []\n lista_optimizaciones_C3D = []\n # realiza analisis lexico y semantico\n instrucciones = parser.parse(entrada) #\n reporte = AST.AST(instrucciones)\n reporte.ReportarAST()\n\n code3d = generar_Codigo3D(instrucciones, ts_global, '')\n '''print(\"-----------------------------------\")\n print(\"Simbolos: \\n\",ts_global)\n for simbolo in ts_global.simbolos:\n print(ts_global.simbolos.get(simbolo).id)\n entorno = ts_global.simbolos.get(simbolo).Entorno\n print(entorno)\n if entorno != None:\n for data in entorno.simbolos:\n print(\" -> \",data)'''\n\n #print(\"Lista Lexico\\n\", L_errores_lexicos)\n #rint(\"Lista Sintactico\\n\", L_errores_sintacticos)\n # Reporte de analisis lexico y sintactico\n reportes = RealizarReportes()\n reportes.generar_reporte_lexicos(L_errores_lexicos)\n reportes.generar_reporte_sintactico(L_errores_sintacticos)\n reportes.generar_reporte_tablaSimbolos(ts_global.simbolos)\n reportes.generar_reporte_semanticos(exceptions)\n reportes.generar_reporte_optimizacion(lista_optimizaciones_C3D)\n graphstack(varGramatical, varSemantico)\n return code3d\n\n\n","sub_path":"parser/fase2/team19/Analisis_Ascendente/ascendente.py","file_name":"ascendente.py","file_ext":"py","file_size_in_byte":115489,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"144647094","text":"\"\"\"Example wordpair palingrams are nurses run and stir grits.\r\n\r\nOur program will examine the core word - we can make the following inferences about the core word:\r\n 1. It can have either an odd or even number of letters.\r\n 2. One contiguous part of the word spells a real word when read backward.\r\n 3. This contiguous part can occupy part or all of the core word.\r\n 4. The other contiguous part contains a palindromic sequence of letters.\r\n 5. The palindromic sequence can occupy part or all of the core word.\r\n 6. The palindromic sequence does not have to be a real word (unless it occupies the whole word).\r\n 7. The two parts cannot overlap or share letters.\r\n 8. The sequence is reversible.\r\n\r\n\"\"\"\r\nimport load_dictionary\r\n\r\n\r\ndef find_palingrams_opt():\r\n file = \"2of4brif.txt\"\r\n word_list = load_dictionary.load(file)\r\n words = set(word_list)\r\n pali_list = []\r\n\r\n for word in words:\r\n end = len(word)\r\n rev_word = word[::-1]\r\n if end > 1:\r\n for i in range(end):\r\n if word[i:] == rev_word[: end - i] and rev_word[end - i :] in words:\r\n pali_list.append((word, rev_word[end - i :]))\r\n if word[:i] == rev_word[end - i :] and rev_word[: end - i] in words:\r\n pali_list.append((rev_word[: end - i], word))\r\n return pali_list\r\n\r\n\r\ndef main():\r\n palingrams = find_palingrams_opt()\r\n # sort on first word\r\n palingrams_sorted = sorted(palingrams)\r\n print(\"\\nNumber of palingrams = {}\\n\".format(len(palingrams_sorted)))\r\n for first, second in palingrams_sorted:\r\n print(\"{} {}\".format(first, second))\r\n\r\n\r\nif __name__ == \"__main__\":\r\n main()\r\n","sub_path":"impractical.python/02_1_palindromes/palingram_optimized.py","file_name":"palingram_optimized.py","file_ext":"py","file_size_in_byte":1704,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"200979211","text":"import numpy as np\n\ndef find(channel1, channel2):\n l, r = -15, -15\n max_corr = 0\n \n for i in range(-15,16):\n for j in range(-15,16):\n tmp = np.roll(channel2, j, axis=0)\n tmp = np.roll(tmp, i, axis=1)\n corr = (tmp * channel1).sum()\n if corr > max_corr:\n r = i\n l = j\n max_corr = corr\n \n return l, r\n\ndef cut_img(img, k):\n x, y = img.shape\n t_x, t_y = int(x * k), int(y * k)\n return img[t_x:x - t_x, t_y:y - t_y]\n\n\ndef align(img, g_coord):\n row_g, col_g = g_coord\n img = img_as_float(img)\n\n rows = img.shape[0] \n n = rows // 3\n\n b_row = 0, n\n g_row = n, 2 * n\n r_row = 2 * n, 3 * n\n \n #Обрезаем изображения (10%)\n b = cut_img(img[b_row[0]:b_row[1]], 0.1)\n g = cut_img(img[g_row[0]:g_row[1]], 0.1)\n r = cut_img(img[r_row[0]:r_row[1]], 0.1)\n \n \n b_row_new, b_col_new = find(g, b) #запоминаем сдвиги вертикали с наибольщей похожестью\n r_row_new, r_col_new = find(g, r)\n \n \n row_b, col_b = row_g - n - b_row_new, col_g - b_col_new\n row_r, col_r = row_g + n - r_row_new, col_g - r_col_new\n return (row_b, col_b), (row_r, col_r)\n\n\n\n\n","sub_path":"image device(week 2)/Сопоставление фотографий Прокудина-Горского.py","file_name":"Сопоставление фотографий Прокудина-Горского.py","file_ext":"py","file_size_in_byte":1287,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"91785595","text":"# -*- coding: utf-8 -*-\n__author__ = 'Konrad'\n\nimport threading\nfrom queue import Queue\nfrom random import randint\n\n\nTHREADS = 4 # number of threads\nTASKS = 10\n\n\nclass HistogramThreadPool(threading.Thread):\n def __init__(self, tid, queue, matrix):\n threading.Thread.__init__(self)\n self.tid = tid\n self.queue = queue\n self.matrix = matrix\n self.counter = 0\n\n def run(self):\n while True:\n req = self.queue.get()\n if req is None:\n self.queue.task_done()\n break\n\n symbol, results = req\n symbol = ord(symbol)\n for self.i in range(0, len(self.matrix)):\n for self.j in range(0, len(self.matrix[self.i])):\n if self.matrix[self.i][self.j] == symbol:\n self.counter += 1\n results.put((symbol, self.counter))\n self.counter = 0\n self.queue.task_done()\n\n\ndef main():\n matrix = []\n for i in range(0, 20):\n matrix.append([])\n for j in range(0, 40):\n matrix[i].append(randint(33, 64))\n tasks_queue = Queue()\n results_queue = Queue()\n for i in range(0, THREADS):\n HistogramThreadPool(i, tasks_queue, matrix).start()\n\n for _ in range(0, TASKS):\n tasks_queue.put((chr(randint(33, 64)), results_queue))\n for _ in range(0, THREADS):\n tasks_queue.put(None)\n tasks_queue.join()\n for _ in range(0, TASKS):\n symbol, counter = results_queue.get()\n print(chr(symbol) + \" - \", end=\"\")\n for __ in range(0, counter):\n print(\"#\", end=\"\")\n print(\" - \" + str(counter))\n\nmain()","sub_path":"histogram-threadpool.py","file_name":"histogram-threadpool.py","file_ext":"py","file_size_in_byte":1679,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"369008672","text":"import random\nimport multiprocessing\n\ndef gera_vector(int_range, count, return_dict=None, cpu_id=None):\n vector = []\n if return_dict != None:\n return_dict[cpu_id] = [random.randint(int_range[0], int_range[1]) for x in range(count)]\n else:\n vector = [random.randint(int_range[0], int_range[1]) for x in range(count)]\n return vector\n\ndef gera_vector_rapido(int_range, count):\n manager = multiprocessing.Manager()\n return_dict = manager.dict()\n cpu_count = multiprocessing.cpu_count()\n jobs = []\n count_slice = count//cpu_count\n for i in range(cpu_count-1):\n p = multiprocessing.Process(target=gera_vector, args=(int_range, count_slice, return_dict, i))\n p.start()\n jobs.append(p)\n p = multiprocessing.Process(target=gera_vector, args=(int_range, count-((cpu_count-1)*count_slice), return_dict, cpu_count-1))\n p.start()\n jobs.append(p)\n\n for job in jobs:\n job.join()\n vector = []\n for i in range(cpu_count):\n for j in return_dict[i]:\n vector.append(j)\n\n return vector\n","sub_path":"gera_vector.py","file_name":"gera_vector.py","file_ext":"py","file_size_in_byte":1076,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"483679141","text":"from commands import getstatusoutput\nfrom json import loads\n\n#Copied from das_client.py\ndef get_value(data, filters, base=10):\n \"\"\"Filter data from a row for given list of filters\"\"\"\n for ftr in filters:\n if ftr.find('>') != -1 or ftr.find('<') != -1 or ftr.find('=') != -1:\n continue\n row = dict(data)\n values = []\n keys = ftr.split('.')\n for key in keys:\n val = [v for v in extract_value(row, key, base)]\n if key == keys[-1]: # we collect all values at last key\n values += [json.dumps(i) for i in val]\n else:\n row = val\n if len(values) == 1:\n yield values[0]\n else:\n yield values\n\ndef get_data(query, limit=None, threshold=None, idx=None, host=None):\n cmd_opts = \"--format=json\"\n if threshold is not None: cmd_opts += \" --threshold=%s\" % threshold\n if limit is not None: cmd_opts += \" --limit=%s\" % limit\n if idx is not None: cmd_opts += \" --idx=%s\" % idx\n if host is not None: cmd_opts += \" --host=%s\" % host\n err, out = getstatusoutput(\"das_client %s --query '%s'\" % (cmd_opts, query))\n if not err: return loads(out)\n return {'status' : 'error', 'reason' : out}\n","sub_path":"Utilities/General/python/cmssw_das_client.py","file_name":"cmssw_das_client.py","file_ext":"py","file_size_in_byte":1182,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"365027662","text":"data = [2,4,5,7,8,9,12,14,17,19,22,25,27,28,33,37]\ntarget = 37\n\n# Linear Search\ndef linear_search(data, target):\n for i in range(len(data)):\n if data[i] == target:\n return True\n return False\n\n# Iterative Binary Search\ndef binary_search_iterative(data, target):\n low = 0\n high = len(data) - 1\n\n while low <= high:\n mid = (low + high) // 2\n if target == data[mid]:\n return True\n elif target < data[mid]:\n high = mid - 1\n else:\n low = mid + 1\n return False","sub_path":"Algorithm/BinarySearch.py","file_name":"BinarySearch.py","file_ext":"py","file_size_in_byte":550,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"620327749","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri Nov 20 15:20:19 2020\n\n@author: HOAREAU.LyseMay\n\"\"\"\nimport numpy as np\nimport matplotlib.pyplot as plt\n\n\"\"\"\nQuestion 1\n\"\"\"\n\nstate = [0,1] \n# 1 free place\n# 0 occupied space, we continue\n\ndef parking_space(proba,Nb_position): # as a parameter the probability of having a free space and the number of spaces in the car park\n return (np.random.choice(state, Nb_position, p = [1-proba,proba]),proba)\n\n# We generate several maps with different parking-space distributions\n\nPark1 = parking_space(0.1,10)\nPark2 = parking_space(0.2,15)\nPark3 = parking_space(0.3,20)\nPark4 = parking_space(0.8,20)\n\nprint(\"Park1 with p = 0.1 and 10 parking spaces\",Park1)\nprint(\"\\nPark2 with p = 0.2 and 15 parking spaces\",Park2)\nprint(\"\\nPark3 with p = 0.3 and 20 parking spaces\",Park3)\nprint(\"\\nPark4 with p = 0.8 and 20 parking spaces\",Park4)\n\n\"\"\"\nQuestion 2\n\"\"\"\n\ndef parking_strategy(park,D): # with D the cost of passing your destination without par\n Nb_position = len(park[0])\n p = park[1]\n q = 1-p\n park = park[0]\n park_car = park\n for s in range(Nb_position,1,-1):\n stop_condition = (D*p + 1) * q**s \n if (stop_condition >= 1) & park[s-1] == 1:\n #print(s,stop_condition)\n park_car[s-1] = 2\n return park_car,s-1\n \n\nStrategy1 = parking_strategy(Park1,100)\nStrategy2 = parking_strategy(Park2,100)\nStrategy3 = parking_strategy(Park3,1000)\nStrategy4 = parking_strategy(Park4,1000)\n\n# We implement the strategy for the different parkings\nif Strategy1 != None:\n print(\"\\nStrategy n°1 with Park1\",Strategy1[0])\nelse:\n print(\"The strategy didn't work for Park1\")\nif Strategy2 != None:\n print(\"Strategy n°2 with Park2\",Strategy2[0])\nelse:\n print(\"The strategy didn't work for Park2\")\nif Strategy3 != None:\n print(\"Strategy n°3 with Park3\",Strategy3[0])\nelse:\n print(\"The strategy didn't work for Park3\")\nif Strategy4 != None:\n print(\"Strategy n°4 with Park4\",Strategy4[0])\nelse:\n print(\"The strategy didn't work for Park4\")\n \n\"\"\"\nQuestion 3\n\"\"\"\n# We define three functions to vary the cost D, the probability p and the number of position\n\ndef variation_D(p,Nb_position,D_max):\n park = parking_space(p,Nb_position)\n count = np.zeros((D_max,1))\n D = np.linspace(0,D_max-1,D_max)\n #print(park)\n for d in range(0,D_max):\n Strat = parking_strategy(park,d)\n #print(Strat)\n if Strat != None:\n ind_car = Strat[1]\n Strat = Strat[0]\n for i in range(ind_car,Nb_position):\n if Strat[i] == 1:\n count[d] += 1\n plt.plot(D,count)\n plt.xlabel(\"D\")\n plt.ylabel(\"Count\")\n plt.title(\"Variation of D\")\n plt.show()\n return count # \"count\" represents the number of free spaces remaining, i.e. when the car has parked (stop condition).\n\n \nvariation_D(0.1,50,500)\n\ndef variation_p(Nb_position,Nb_proba,D):\n proba = np.linspace(0,1,Nb_proba)\n count = np.zeros((Nb_proba,1))\n for p in range(0,Nb_proba):\n park = parking_space(proba[p],Nb_position)\n Strat = parking_strategy(park,D)\n #print(Strat)\n if Strat != None:\n ind_car = Strat[1]\n Strat = Strat[0]\n for i in range(ind_car,Nb_position):\n if Strat[i] == 1:\n count[p] += 1\n plt.plot(proba,count)\n plt.xlabel(\"Probability p\")\n plt.ylabel(\"Count\")\n plt.title(\"Variation of p\")\n plt.show()\n return count\n\nvariation_p(50,500,100)\n\ndef variation_Nb_position(Nb_position_max,p,D):\n count = np.zeros((Nb_position_max,1))\n position = np.linspace(0,Nb_position_max-1,Nb_position_max)\n for pos in range(0,Nb_position_max):\n park = parking_space(p,pos)\n Strat = parking_strategy(park,D)\n #print(Strat)\n if Strat != None:\n ind_car = Strat[1]\n Strat = Strat[0]\n for i in range(ind_car,pos):\n if Strat[i] == 1:\n count[pos] += 1\n plt.plot(position,count)\n plt.xlabel(\"Number of position\")\n plt.ylabel(\"Count\")\n plt.title(\"Variation of the number of position\")\n plt.show()\n return count\n\nvariation_Nb_position(500,0.5,100)","sub_path":"TP4/TP4_AFKIR_CANNET_HOAREAU.py","file_name":"TP4_AFKIR_CANNET_HOAREAU.py","file_ext":"py","file_size_in_byte":4261,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"164331919","text":"import itertools\n\n\nclass CorrespondenceGenerator(object):\n '''A generator of ways to match items between two lists.'''\n\n def __init__(self, list1, list2):\n self.list1 = list1\n self.list2 = list2\n\n def __iter__(self):\n for reorderedList in itertools.permutations(self.list1,\n len(self.list2)):\n yield zip(reorderedList, self.list2)\n\n\nclass CorrespondenceGeneratorWithAddRemove(object):\n '''A generator of ways to match items between two lists.'''\n\n def __init__(self, list1, list2):\n self.list1 = list1\n self.list2 = list2\n\n def __iter__(self):\n if len(self.list1) > len(self.list2):\n longList = self.list1\n shortList = self.list2\n reverse = False\n else:\n longList = self.list2\n shortList = self.list1\n reverse = True\n for reorderedList in itertools.permutations(longList,\n len(longList)):\n if reverse:\n yield [i for i in itertools.izip_longest(shortList, reorderedList)]\n else:\n yield [i for i in itertools.izip_longest(reorderedList, shortList)]\n","sub_path":"projects/02-2x2-rpms/CorrespondenceGenerator.py","file_name":"CorrespondenceGenerator.py","file_ext":"py","file_size_in_byte":1252,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"188590762","text":"# defining paths and paramters\n\nRAWDATA_PATH = './data/data_train.csv'\nSAMPLECSV_PATH = './data/sampleSubmission.csv'\nOUTPUTCSV_PATH = './out.csv'\nMATRAW_PATH = './cache/matraw.npy'\nMASK_PATH = './cache/mask.npy'\nMATMEAN_PATH = './cache/matimpute.npy'\nMATCSR_PATH = './cache/matcsr.npy'\n\nMASK_TRAIN_PATH = './cache/train_mask.npy'\nMASK_VAL_PATH = './cache/val_mask.npy'\nNCOLS = 1000\nNROWS = 10000\n","sub_path":"rbm/parameters.py","file_name":"parameters.py","file_ext":"py","file_size_in_byte":397,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"524363040","text":"#coding:utf-8\n\n\"\"\"\n\\u6570\\u636E\\u5E93\\u8FDE\\u63A5\\u7BA1\\u7406\n\"\"\"\n\n__author__ = \"liangxiaokai@21cn.com\"\n__version__ = \"1.0\"\n__date__ = \"2011/04/14\"\n__copyright__ = \"Copyright (c) 2011\"\n__license__ = \"Python\"\n\nfrom connect import *\n\nfrom sqlalchemy import Table,Column,func\nfrom sqlalchemy.types import *\nfrom sqlalchemy.orm import Mapper\n\ntab_gift = Table(\"gift\", metadata,\n Column(\"id\",Integer, primary_key=True),\n Column(\"icon\",String(100)),\n Column(\"name\",String(20)),\n Column(\"description\",String(140)),\n Column(\"gold\",BigInteger),\n Column(\"create_time\", DateTime),\n )\n\n \nclass TGift(TableObject):\n def __init__(self):\n TableObject.__init__(self)\n\n # def __repr__(self):\n # return \"id=%id,icon=%s,name=%s,description=%s,gold=%d\" % \\\n # (self.id,self.icon,self.name,self.description,self.gold)\n \nmapper_gift = Mapper(TGift,tab_gift)\n\nif __name__==\"__main__\":\n pass","sub_path":"code/db/gift.py","file_name":"gift.py","file_ext":"py","file_size_in_byte":1036,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"182063129","text":"alphabets = \"abcdefghijklmnopqrstuvwxyz\"\ndef spellMistakes(word) :\n \"\"\"Function returns possible typos for a word\n Typos taken into account:\n Deletion of a single letter in a work\n Exchange of adjacent letters\n \"\"\"\n split = [ (word[:i],word[i:]) for i in range(len(word)+1) ]\n delete = [ a + b[1:] for a,b in split if b ]\n transpose = [ a + b[1] + b[0] + b[2:] for a,b in split if len(b) > 1 ]\n return set(delete+transpose)\n \n\n","sub_path":"state/spellMistakes.py","file_name":"spellMistakes.py","file_ext":"py","file_size_in_byte":465,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"53418254","text":"# Copyright 2017 Mario Graff Guerrero\n\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n\n# http://www.apache.org/licenses/LICENSE-2.0\n\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nimport os\nfrom b4msa.command_line import load_json\nfrom sklearn.model_selection import KFold\nfrom sklearn.preprocessing import LabelEncoder\nfrom EvoDAG.model import EvoDAGE\nimport importlib\nfrom sklearn.linear_model import LogisticRegression\nfrom .calibration import CalibrationLR\nfrom .model import Identity, BaseTextModel, BaseClassifier\nimport numpy as np\nimport logging\nfrom multiprocessing import Pool\ntry:\n from tqdm import tqdm\nexcept ImportError:\n def tqdm(x, **kwargs):\n return x\n\n\ndef kfold_decision_function(args):\n cl, X, y, tr, ts, seed = args\n c = cl(random_state=seed)\n c.fit([X[x] for x in tr], [y[x] for x in tr])\n _ = c.decision_function([X[x] for x in ts])\n return ts, _\n\n\ndef transform(args):\n k, m, t, X = args\n x = [t[_] for _ in X]\n df = m.decision_function(x)\n d = [EvoMSA.tolist(_) for _ in df]\n return (k, d)\n\n\ndef vector_space(args):\n k, t, X = args\n return k, [t[_] for _ in X]\n\n\nclass EvoMSA(object):\n def __init__(self, b4msa_params=None, evodag_args=dict(fitness_function='macro-F1'),\n b4msa_args=dict(), n_jobs=1, n_splits=5, seed=0, logistic_regression=False,\n models=[['EvoMSA.model.B4MSATextModel', 'EvoMSA.model.B4MSAClassifier']],\n evodag_class=\"EvoDAG.model.EvoDAGE\", logistic_regression_args=None, probability_calibration=False):\n if b4msa_params is None:\n b4msa_params = os.path.join(os.path.dirname(__file__),\n 'conf', 'default_parameters.json')\n b4msa_params = self.read_json(b4msa_params)\n b4msa_params.update(b4msa_args)\n self._b4msa_args = b4msa_params\n self._evodag_args = evodag_args\n self._n_jobs = n_jobs\n self._n_splits = n_splits\n self._seed = seed\n self._svc_models = None\n self._evodag_model = None\n self._logger = logging.getLogger('EvoMSA')\n self._le = None\n self._logistic_regression = None\n if logistic_regression:\n p = dict(random_state=self._seed, class_weight='balanced')\n if logistic_regression_args is not None:\n p.update(logistic_regression_args)\n self._logistic_regression = LogisticRegression(**p)\n self._exogenous = None\n self._exogenous_model = None\n self._probability_calibration = probability_calibration\n self.models = models\n self._evodag_class = self.get_class(evodag_class)\n\n def get_class(self, m):\n if isinstance(m, str):\n a = m.split('.')\n p = importlib.import_module('.'.join(a[:-1]))\n return getattr(p, a[-1])\n return m\n\n @property\n def models(self):\n return self._models\n\n @models.setter\n def models(self, models):\n if models is None:\n return\n if not isinstance(models, list):\n models = [models]\n self._models = []\n for m in models:\n if isinstance(m, list):\n textmodel, classifier = m\n tm = self.get_class(textmodel)\n cl = self.get_class(classifier)\n else:\n tm = Identity\n cl = self.get_class(m)\n assert issubclass(tm, BaseTextModel)\n assert issubclass(cl, BaseClassifier)\n self._models.append([tm, cl])\n\n @property\n def n_jobs(self):\n return self._n_jobs\n\n @n_jobs.setter\n def n_jobs(self, v):\n self._n_jobs = v\n\n def predict(self, X):\n pr = self.predict_proba(X)\n return self._le.inverse_transform(pr.argmax(axis=1))\n\n def predict_proba(self, X):\n X = self.transform(X)\n if self._logistic_regression is not None:\n X = self._evodag_model.raw_decision_function(X)\n return self._logistic_regression.predict_proba(X)\n return self._evodag_model.predict_proba(X)\n\n def raw_decision_function(self, X):\n X = self.transform(X)\n return self._evodag_model.raw_decision_function(X)\n\n def decision_function(self, X):\n X = self.transform(X)\n return self._evodag_model.decision_function(X)\n\n @property\n def exogenous_model(self):\n return self._exogenous_model\n\n @exogenous_model.setter\n def exogenous_model(self, v):\n if isinstance(v, list):\n for x in v:\n x.n_jobs = self.n_jobs\n else:\n v.n_jobs = self.n_jobs\n self._exogenous_model = v\n\n @property\n def exogenous(self):\n return self._exogenous\n\n @exogenous.setter\n def exogenous(self, a):\n self._exogenous = a\n\n def append_exogenous(self, d):\n e = self.exogenous\n if e is not None:\n return np.concatenate((d, e), axis=1)\n return d\n\n def append_exogenous_model(self, D, X):\n if self.exogenous_model is None:\n return D\n ex = self.exogenous_model\n if not isinstance(ex, list):\n ex = [ex]\n L = [D]\n for x in ex:\n _ = x.predict_proba(X)\n df = _ * 2 - 1\n df[df > 1] = 1\n df[df < -1] = -1\n L.append(df)\n return np.concatenate(L, axis=1)\n\n def model(self, X):\n if not isinstance(X[0], list):\n X = [X]\n m = []\n kwargs = self._b4msa_args\n self._logger.info(\"Starting TextModel\")\n self._logger.info(str(kwargs))\n for x in X:\n for tm, cl in self.models:\n m.append(tm(x, **kwargs))\n self._textModel = m\n\n def vector_space(self, X):\n if not isinstance(X[0], list):\n X = [X]\n args = []\n i = 0\n k = 0\n nmodels = len(self.models)\n for x in X:\n for _ in range(nmodels):\n t = self._textModel[k]\n k += 1\n args.append((i, t, x))\n i += 1\n if self.n_jobs > 1:\n p = Pool(self.n_jobs, maxtasksperchild=1)\n res = [x for x in tqdm(p.imap_unordered(vector_space, args), total=len(args))]\n res.sort(key=lambda x: x[0])\n p.close()\n else:\n res = [vector_space(x) for x in tqdm(args)]\n return [x[1] for x in res]\n\n def kfold_decision_function(self, cl, X, y):\n hy = [None for x in y]\n args = []\n for tr, ts in KFold(n_splits=self._n_splits,\n shuffle=True, random_state=self._seed).split(X):\n args.append([cl, X, y, tr, ts, self._seed])\n if self.n_jobs == 1:\n res = [kfold_decision_function(x) for x in tqdm(args, total=len(args))]\n else:\n p = Pool(self.n_jobs, maxtasksperchild=1)\n res = [x for x in tqdm(p.imap_unordered(kfold_decision_function, args),\n total=len(args))]\n p.close()\n for ts, df in res:\n [hy.__setitem__(k, self.tolist(v)) for k, v in zip(ts, df)]\n return hy\n \n def _transform(self, X, models, textModel):\n if len(models) == 0:\n return []\n args = [[i_m[0], i_m[1], t, X] for i_m, t in zip(enumerate(models), textModel) if i_m[1] is not None]\n if self.n_jobs > 1:\n p = Pool(self.n_jobs, maxtasksperchild=1)\n res = [x for x in tqdm(p.imap_unordered(transform, args), total=len(args))]\n res.sort(key=lambda x: x[0])\n p.close()\n else:\n res = [transform(x) for x in tqdm(args)]\n res = [x[1] for x in res]\n D = res[0]\n [[v.__iadd__(w) for v, w in zip(D, d)] for d in res[1:]]\n return D\n\n def transform(self, X, y=None):\n if y is None or self._svc_models[0] is None:\n D = self._transform(X, self._svc_models, self._textModel)\n else:\n cnt = len(self.models)\n D = self._transform(X, self._svc_models[cnt:], self._textModel[cnt:])\n Di = None\n for t_cl, t in zip(self.models, self._textModel):\n cl = t_cl[1]\n x = [t[_] for _ in X]\n d = self.kfold_decision_function(cl, x, y)\n if Di is None:\n Di = d\n else:\n [v.__iadd__(w) for v, w in zip(Di, d)]\n [v.__iadd__(w) for v, w in zip(Di, D)]\n D = Di\n _ = np.array(D)\n return self.append_exogenous_model(self.append_exogenous(_), X)\n\n def fit_svm(self, X, y):\n self.model(X)\n Xvs = self.vector_space(X)\n if not isinstance(y[0], list):\n y = [y]\n svc_models = []\n k = 0\n nmodels = len(self.models)\n for y0 in y:\n for j in range(nmodels):\n x = Xvs[k]\n cl = self.models[j][1]\n k += 1\n c = cl(random_state=self._seed)\n c.fit(x, y0)\n svc_models.append(c)\n self._svc_models = svc_models\n\n def fit(self, X, y, test_set=None):\n if isinstance(y[0], list):\n le = []\n Y = []\n for y0 in y:\n _ = LabelEncoder().fit(y0)\n le.append(_)\n Y.append(_.transform(y0).tolist())\n self._le = le[0]\n y = Y\n else:\n self._le = LabelEncoder().fit(y)\n y = self._le.transform(y).tolist()\n self.fit_svm(X, y)\n if isinstance(y[0], list):\n y = y[0]\n if isinstance(X[0], list):\n X = X[0]\n D = self.transform(X, y)\n if test_set is not None:\n if isinstance(test_set, list):\n test_set = self.transform(test_set)\n if self._probability_calibration:\n probability_calibration = CalibrationLR\n else:\n probability_calibration = None\n _ = dict(n_jobs=self.n_jobs, seed=self._seed,\n probability_calibration=probability_calibration)\n self._evodag_args.update(_)\n y = np.array(y)\n try:\n _ = self._evodag_class(**self._evodag_args).fit(D, y, test_set=test_set)\n self._evodag_model = _\n except TypeError:\n self._evodag_model = self._evodag_class().fit(D, y)\n if self._logistic_regression is not None:\n self._logistic_regression.fit(self._evodag_model.raw_decision_function(D), y)\n return self\n\n @staticmethod\n def tolist(x):\n if isinstance(x, list):\n return x\n elif isinstance(x, np.ndarray):\n return x.tolist()\n else:\n return [x]\n\n @staticmethod\n def read_json(fname):\n kw = load_json(fname)\n if isinstance(kw, list):\n kw = kw[0]\n return kw\n","sub_path":"EvoMSA/base.py","file_name":"base.py","file_ext":"py","file_size_in_byte":11326,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"138926745","text":"# -*- coding: utf-8 -*-\n'''\n Created by hushiwei on 2019/6/12\n Desc : \n Note : \n'''\n\nimport os\nfrom surprise import Dataset, Reader\nfrom surprise import KNNBasic, KNNWithMeans, KNNBaseline\n\n# data = Dataset.load_builtin(name='ml-100k')\nfile_path='/mnt/d/PycharmProjects/Python-AI/Recommend/datas/u.data'\nreader=Reader(line_format='user item rating timestamp',sep='\\t')\ndata=Dataset.load_from_file(file_path=file_path,reader=reader)\ntrainset = data.build_full_trainset()\n\nsim_options = {\n 'name': 'pearson',\n 'user_based': True\n}\n\nbsl_options={\n 'method':'sgd',\n 'n_epochs':50,\n 'reg':0.02,\n 'learning_rate':0.01\n}\n\nalgo=KNNBaseline(k=2,min_k=1,sim_options=sim_options,bsl_options=bsl_options)\n\n# 模型训练\nalgo.fit(trainset)\n\n# 模型预测\nuid='196'\niid='242'\npred=algo.predict(uid,iid,4)\nprint(\"评分:{}\".format(pred))\nprint(\"评分:{}\".format(pred.est))\n\n# same as below\n# algo.estimate(algo.trainset.to_inner_uid(uid),algo.trainset.to_inner_iid(iid))","sub_path":"Recommend/usercf_demo.py","file_name":"usercf_demo.py","file_ext":"py","file_size_in_byte":985,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"602958495","text":"# ! /usr/bin/env python\n# - * - coding:utf-8 - * -\n# __author__ : KingWolf\n# createtime : 2019/1/31 16:18\n\nfrom selenium import webdriver\nimport os\nimport time\n\n#启动浏览器\ndef browser():\n options = webdriver.ChromeOptions()\n options.add_argument('user-data-dir=F:\\profile')\n #找到driver的路径\n file_path = os.path.dirname(os.path.dirname(__file__))\n file_path = str(file_path)\n file_path = file_path.replace('\\\\','/')\n base = file_path.split('/test_case')[0]\n driver_path = base + \"/driver/chromedriver.exe\"\n driver = webdriver.Chrome(\n executable_path=driver_path, \\\n chrome_options=options)\n driver.maximize_window()\n return driver\n\nif __name__ == '__main__':\n dr = browser()\n url = 'https://email.163.com/'\n dr.get(url)\n time.sleep(5)\n dr.quit()","sub_path":"exercise_learn/selenium_project/selenium_study_aotumation/unittest_learn/Test_163email/test_case/util/driver.py","file_name":"driver.py","file_ext":"py","file_size_in_byte":821,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"502003846","text":"from setuptools import setup\nimport os\nwith open(os.path.join(os.path.dirname(__file__), 'bin', 'VERSION')) as version_file:\n version = version_file.read().strip()\n\nwith open('requirements.txt') as requires_file:\n requires = requires_file.read().split('\\n')\n\nsetup(\n name='CAVA',\n version=version,\n description='CAVA (Clinical Annotation of VAriants)',\n url='https://github.com/Steven-N-Hart/CAVA',\n author='Steven-N-Hart',\n author_email='hart.steven@mayo.edu',\n license='MIT',\n packages=['cava_', 'ensembldb'],\n scripts=[\n 'bin/CAVA.py',\n 'bin/EnsemblDB.py',\n 'bin/dbSNPDB.py',\n ],\n install_requires=requires,\n zip_safe=False,\n include_package_data=True\n)\n","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":723,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"547827703","text":"from logger import logger\nimport settings\nimport requests\nimport json\nimport math\n\nTAKE_DUMP = True\nEARTH_RADIUS = 6371e3\n\ndef distance_in_miles(lat1, lon1, lat2, lon2):\n phi1 = lat1*math.pi/180 # phi, lambda in radians\n phi2 = lat2*math.pi/180\n del_phi = (lat2 - lat1)*math.pi/180\n del_lambda = (lon2 - lon1)*math.pi/180\n a = math.sin(del_phi/2)*math.sin(del_phi/2) + \\\n math.cos(phi1)*math.cos(phi2)*math.sin(del_lambda/2)*math.sin(del_lambda/2)\n\n c = 2*math.atan2(math.sqrt(a), math.sqrt(1 - a))\n d = EARTH_RADIUS*c; # in meters\n return d/1600 # convert to miles\n\nclass Point:\n def __init__(self, x, y):\n self.x = float(x)\n self.y = float(y)\n\nclass Scanner:\n '''\n Returns a list of opensky_api StateVector's\n containing data on nearby planes\n '''\n def nearby():\n raise NotImplementedError\n\n def distance_from_window(self, flight):\n window_lat, window_lng = settings.coords['window']\n return distance_in_miles(window_lat, window_lng, flight['latitude'], flight['longitude'])\n\n def closest(self):\n flights = self.nearby()\n return min(flights, key=self.distance_from_window) if flights else None\n\nclass RtlScanner(Scanner):\n ENDPOINT = settings.data_endpoint\n class RtlException(Exception):\n def __init__(self):\n message = \"\"\"\n Failed to establish connection to dump1090 json endpoint.\n Check that ./dump1090 --net is running, or that SSH tunnel\n is set up if dump1090 running on another computer.\n \"\"\"\n super(RtlScanner.RtlException, self).__init__(message)\n\n def __init__(self, assert_conn=True):\n if assert_conn:\n try:\n data = requests.get(self.ENDPOINT)\n except requests.exceptions.ConnectionError:\n raise RtlScanner.RtlException()\n\n def _as_state_vector(self, data_pt):\n vector = {}\n vector['icao24'] = data_pt['hex'] # icao24\n vector['callsign'] = data_pt['flight'] # callsign\n vector['longitude'] = data_pt['lon'] # longitude\n vector['latitude'] = data_pt['lat'] # latitude\n alt = 0\n for alt_key in ['altitude', 'alt_geom', 'alt_baro']:\n if alt_key in data_pt:\n alt = data_pt[alt_key]\n break\n vector['altitude'] = alt #altitude \n if vector['altitude'] == 'ground': vector['altitude'] = 0\n vector['on_ground'] = False # on ground\n return vector\n\n def _valid_data(self, data_pt):\n reqs_keys = ['hex', 'flight', 'lon', 'lat']\n for k in reqs_keys:\n if k not in data_pt:\n return False\n if data_pt.get('seen', 1000) > 35: # 'seen' == seconds since last msg\n return False # stale messages == out of sight (ideally)\n if len(data_pt.get('hex', '').strip()) == 0:\n return False # invalid hex code\n return True\n\n def nearby(self):\n res = requests.get(self.ENDPOINT) # localhost:8080/data.json\n data = json.loads(res.text)['aircraft']\n data = filter(self._valid_data, data)\n logger.info(data)\n return [self._as_state_vector(v) for v in data]\n\ndef get_scanner():\n return RtlScanner()\n\ndef nearby(scanner):\n logger.info('scanning')\n flight = scanner.closest()\n dist = scanner.distance_from_window(flight)\n logger.info('chose flight {}'.format(flight))\n return (flight, dist) if flight else (None, None)\n\nif __name__ == '__main__':\n scanner = get_scanner()\n nearby(scanner)\n","sub_path":"nearby.py","file_name":"nearby.py","file_ext":"py","file_size_in_byte":3632,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"190418648","text":"from django.conf.urls import url, include\nfrom . import views\n\nurlpatterns = [\n url(r'^$', views.index, name='index'),\n url(r'^program', views.program, name='program'),\n url(r'^lists', views.lists, name='lists'),\n url(r'^alumni', views.alumni, name='alumni'),\n url(r'^faculty', views.faculty, name='faculty'),\n url(r'^research', views.research, name='research'),\n url(r'^Research/(?P\\d+)$', views.research_item, name='research_item'),\n url(r'^contactus', views.contactus, name='contactus'),\n url(r'^outreach', views.outreach, name='outreach'),\n url(r'^Outreaches/(?P\\d+)$', views.item_outreach, name='outreach_item'),\n url(r'^news', views.news, name='news'),\n url(r'^upcomingevents', views.upcomingevents, name='upcomingevents'),\n url(r'^News/(?P\\d+)$', views.news_item, name='news_item'),\n url(r'^education', views.education, name='education'),\n url(r'^business', views.business, name='business'),\n url(r'^administration', views.administration, name='administration'),\n url(r'^management', views.management, name='management'),\n url(r'^computer_studies', views.computer_studies, name='computer_studies'),\n url(r'^criminal_justice', views.criminal_justice, name='criminal_justice'),\n url(r'^psychology', views.psychology, name='psychology'),\n url(r'^nursing', views.nursing, name='nursing'),\n url(r'^Programs/(?P\\d+)$', views.program_item, name='program_item'),\n url(r'^login', views.login, name='login'),\n url(r'^dashboard', views.dashboard, name='dashboard'),\n url(r'^postlist/(?P[\\w\\-]+)/$', views.postlist, name='postlist'),\n url(r'^search', views.search, name='search'),\n url(r'^admission', views.admission, name='admission'),\n\n]","sub_path":"website/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1739,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"27624529","text":"BAG_COLLECTION = 'bags'\nBAG_LISTING_PER_PAGE = 25\nBAG_SEAL_PATTERN = \"^BAG[\\w]{7,15}$\"\nBAG_STATUSES = [u'WIP', u'In Transit', u'Discarded', u'Complete', u'Pending']\n# bag statuses for which the bag destination change can be allowed\nBAG_STATUSES_ALLOW_EDIT = [u'WIP', u'In Transit', u'Pending']\nBAG_STATUS = {\n u'UD': [u'WIP', u'In Transit', u'Dispatched', u'Pending', u'Discarded',\n u'Complete'], # undelivered\n u'DL': [u'Delivered'], # delivered: to the destination\n}\n\nPERMISSIONS = (\n ('can_pop_package', 'Can pull a package out of a sealed bag'),\n ('can_add_package', 'Can add a package to a bag'),\n ('can_view_bag_contents', 'Can view the contents of a bag'),\n ('can_edit_bag_dest', 'Can edit bag destination'),\n ('can_close_bag', 'Can close bag'),\n ('can_add_edit_bag_lbh', 'Can add/edit bag volumetric dimensions'),\n ('can_view_bag_packages', 'Can view bag content'),\n ('can_view_auto_reconcile','Can View Auto Reconcile'),\n ('can_custody_scan', 'Can Custody Scan'),\n ('can_upload_bag_matrix', 'Can Upload Bag Matrix'),\n ('can_edit_bag_matrix', 'Can Edit Bag Matrix'),\n ('can_add_bag_type', 'Can add bag type'),\n ('can_view_bag_dashboard', 'Can view bag dashboard'),\n)\n# ################################ NEW IN LINEHAUL ############################\n# STATUS MAP linking linehaul readable status to STATUSES\nSTATUS_MAP = {\n # On Creation\n 'create': 'WIP',\n # On dispatch/seal\n 'freeze': 'In Transit',\n # On receiving\n 'open': 'Pending',\n # On cancel\n 'cancel': 'Discarded',\n # On complete\n 'close': 'Complete',\n}\n\n# STATUS ACTION MAP linking user action to allowed Statuses\nSTATUS_ACTION_MAP = {\n 'edit': ['WIP'],\n 'outscan': ['In Transit'],\n 'freeze': ['In Transit'],\n 'unfreeze': ['In Transit'],\n 'inscan': ['In Transit'],\n 'pull': ['In Transit', 'Pending'],\n}\n\nSUBITEM_ACTIONS = {\n 'pull': 'Pull selected items to their last scan location'\n}\n# ################################ NEW IN LINEHAUL ############################\n\nPARENT_KEY = {\n 'ist': 'ist'\n}\n\nBAG_STATUS = {\n 'default': {\n u'UD': [\n u'WIP', u'In Transit', u'Dispatched', u'Pending'], # undelivered\n u'DL': [u'Discarded', u'Complete'], # bag lifecycle end\n u'RT': [u'Returned', ], # returned: with Delhivery\n u'RTO': [u'RTO'], # rto: with client\n u'LT': [u'LOST'],\n },\n 'COD': {\n u'UD': [u'WIP', u'In Transit', u'Dispatched', u'Pending', ],\n # undelivered\n u'DL': [u'Delivered'], # delivered: with Customer\n u'RT': [u'Returned'], # returned: with Delhivery\n u'RTO': [u'RTO'], # rto: with client\n u'LT': [u'LOST'],\n u'EOD': [u'Delivered', u'Returned', u'Pending', u'Dispatched'],\n },\n 'Pre-paid': {\n u'UD': [u'WIP', u'In Transit', u'Dispatched', u'Pending', ],\n # undelivered\n u'DL': [u'Delivered'], # delivered: with Customer\n u'RT': [u'Returned', ], # returned: with Delhivery\n u'RTO': [u'RTO'], # rto: with client\n u'LT': [u'LOST'],\n u'EOD': [u'Delivered', u'Returned', u'Pending', u'Dispatched'],\n },\n 'Pickup': {\n u'PP': [u'Open', u'Scheduled', u'Dispatched'],\n u'PU': [u'Picked Up'],\n u'CN': [u'Canceled', u'Closed'],\n u'DTO': [u'DTO'],\n u'LT': [u'LOST'],\n u'EOD': [u'Picked Up', u'Canceled', u'Scheduled', u'Dispatched'],\n },\n}\n\n# valid statuses for actions\nVALID_STATUS = {\n 'Dispatch': [],\n 'IST': ['In Transit', 'Pending'],\n 'RTO': [],\n 'Bag': ['In Transit', 'Pending'],\n}\n\nINCOMING = {\n 'rmk': 'Bag received',\n 's': 'Pending',\n}\n\n\nfrom django.conf import settings\n\nEMAIL_FROM = getattr(\n settings, 'INCOMING_EMAIL_SENDER', 'noreply@delhivery.com')\n\nOTP_RECEPIENTS = getattr(\n settings, 'OTP_RECEPIENTS', (\n 'Tracking Team', 'tracking@delhivery.com'))\n\nALLOW_ADDING_BAGS_TO_BAG = False\n\nCSV_DUMP = {\n 'key': [\n 'bs', 't', 'oc', 'cn', 'rgn', 'cd', 'ed', 'cs.sl', 'cs.ss', 'cs.sd',\n 'pid', 'dd.t', 'dd.id', 'cs.dwbn', 'cs.pid', 'wbns', 'len(wbns)',\n 'inc.wbn.u', 'len(inc.wbn.u)', 'vl', 'vb', 'vh', 'gm', 'fpd',\n 'function.bag_received()', 'heavy', 'mixed', 'bi', 'bpd', 'bpc', 'ar'\n ],\n 'label': [\n 'Bag Seal#', 'Bag Type', 'Origin Center', 'Destination Center',\n 'Region', 'Creation Time', 'Seal Time', 'Last Scan Location',\n 'Current Status', 'Last Scan Date', 'Parent IST No.', 'Dispatch Type',\n 'Dispatch Id', 'Prealert No.', 'LH IST No.', 'Package wbns',\n 'Package count', 'Unexpected Package wbns', 'Unexpected Package count',\n 'Length', 'Breadth', 'Height', 'Gross Wt.', 'First Pending',\n 'Bag Received', 'Heavy', 'Mixed', 'Bag Identifier', 'Bag Priority Date',\n 'Bag Priority Count', 'Bag Air Restricted Flag'\n ]\n}\n\n# config to specify which center (return/dispatch center) info\n# about the package to be shown to the user while incoming/creation of a bag\nINCOMING_SHOW_RCN_STATUSES = [u'In Transit', u'Pending', u'Picked Up',\n u'Rejected', u'DTO', u'RTO', u'Dispatched']\nINCOMING_SHOW_RCN_AND_CN_STATUSES = [u'Pending']\nBAG_ADD_SHOW_RCN_STATUSES = [u'Returned', u'Picked Up']\n\n\nPACKAGE_ALLOWED_STATUS = ['In Transit', 'Returned', 'Pending', 'Picked Up']\n\n\nBAG_REPORT_DATE_FORMAT = '%Y-%m-%d %H:%M'\n\nBAG_STATUS_CHECK = [\"Complete\", \"Discarded\", \"Pending\"]\n","sub_path":"bag/settings.py","file_name":"settings.py","file_ext":"py","file_size_in_byte":5450,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"566035521","text":" \r\nimport numpy as np\r\nfrom flask import Flask, request, jsonify, render_template\r\nimport joblib\r\nimport pandas as pd\r\n\r\napp = Flask(__name__)\r\nMW_Model = joblib.load('MW_model_initial.pk1')\r\n\r\n##constants for scaling data\r\nmeans = np.array([7.43397211e-01, 3.76955848e+02, 4.80482260e+01, 4.69922617e-01,\r\n 1.34120357e+00, 2.84791054e+00, 1.18276824e+00, 3.98294118e+00,\r\n 2.81976599e+00, 4.11944025e+00, 7.58886332e+00, 7.56022984e+01,\r\n 4.06212388e-02, 4.26465459e-03, 1.20023040e+02])\r\nvars = np.array([2.81408733e-04, 2.66845279e+04, 1.49823072e+03, 2.58834946e-01,\r\n 9.33931029e-01, 2.00304368e+00, 1.42230878e-01, 1.52871978e+00,\r\n 2.78359637e-01, 5.08976571e-01, 1.96979357e+00, 2.34725629e+01,\r\n 1.65692805e-03, 3.28087242e-05, 1.11131253e+02])\r\nnames = ['SG', 'Pressure', 'Temp', 'C1', 'C2', 'C3', 'iC4', 'C4', 'iC5', 'C5', 'C6', 'C7', 'CO2', 'N2', 'MW']\r\n\r\n#equations for scaling + unscaling data\r\ndef scale_data(array,means=means,stds=vars**0.5):\r\n return (array-means)/stds\r\n\r\ndef unscale_data(Trans_data, means=means, stds=vars**0.5):\r\n return Trans_data*stds+means\r\n\r\n\r\n\r\n@app.route('/')\r\ndef home():\r\n return render_template('index.html')\r\n\r\n@app.route('/predict',methods=['POST'])\r\ndef predict():\r\n '''\r\n For rendering results on HTML GUI\r\n '''\r\n\r\n file = request.files['inputFile']\r\n Input = pd.read_csv(file)\r\n\r\n Input_value = Input.drop(['Sample Name', 'Sample Number'], axis=1)\r\n Scaled_Input = scale_data(Input_value.values)\r\n Input_df2 = pd.DataFrame(Scaled_Input,columns = names)\r\n Input_df3 = Input_df2.drop(['MW','N2','Pressure','Temp','CO2','iC5'],axis=1)\r\n Prediction = MW_Model.predict(Input_df3.values)\r\n\r\n rows = len(Input_df3)\r\n #dataframe of zeros (x_rows, x_columns)\r\n Zeros = pd.DataFrame(np.zeros((rows, 15)),columns = names)\r\n Zeros2 = Zeros.drop(['MW'],axis=1)\r\n Zeros2.insert(14,\"MW\",Prediction)\r\n MW_Prediction = unscale_data(Zeros2.values)\r\n MW_DF = pd.DataFrame(MW_Prediction,columns = names)\r\n\r\n\r\n MW_DF = MW_DF.pop('MW')\r\n Sample_name = Input.pop('Sample Name')\r\n Sample_number = Input.pop('Sample Number')\r\n output = pd.concat([Sample_name, Sample_number, MW_DF,], axis=1, sort=False)\r\n\r\n return render_template('index.html', table1 = output.to_html(header = 'true'))\r\n\r\n\r\nif __name__ == \"__main__\":\r\n app.run(debug=True)\r\n\r\n","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":2391,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"574031661","text":"from collections import deque\n\ndef BFS(y, x):\n global N\n q = deque()\n q.append([y, x])\n dx = [1, 0, -1, 0]\n dy = [0, 1, 0, -1]\n result = -1\n\n while q:\n result += 1\n for i in range(len(q)):\n vy, vx = q.popleft()\n \n if not visited[vy][vx]:\n visited[vy][vx] = 1\n\n for i in range(4):\n ny = vy + dy[i]\n nx = vx + dx[i]\n if 0 <= nx < N and 0 <= ny < N:\n if not visited[ny][nx]:\n if field[ny][nx] == 3:\n return result\n elif field[ny][nx] == 0:\n visited[ny][nx] = 1\n q.append([ny, nx])\n return 0\n\nT = int(input())\nfor t in range(1, T+1):\n N = int(input())\n field = [[i for i in list(map(int, input()))] for j in range(N)]\n visited = [[0 for i in range(N)] for j in range(N)]\n\n for i in range(N):\n for j in range(N):\n if field[i][j] == 2:\n x, y = j, i\n\n print('#{0} {1}'.format(t, BFS(y,x)))\n\n # for _ in range(len(field)):\n # print(field[_])","sub_path":"python/swea/intermediate/Queue_2.py","file_name":"Queue_2.py","file_ext":"py","file_size_in_byte":1175,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"489325943","text":"#\n# Pyserini: Reproducible IR research with sparse and dense representations\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n\nfrom collections import defaultdict\nfrom string import Template\n\nimport yaml\n\nfrom scripts.repro_matrix.defs_beir import beir_keys, trec_eval_metric_definitions\n\n\ndef format_run_command(raw):\n return raw.replace('--topics', '\\\\\\n --topics')\\\n .replace('--index', '\\\\\\n --index')\\\n .replace('--encoder-class', '\\\\\\n --encoder-class')\\\n .replace('--output ', '\\\\\\n --output ')\\\n .replace('--output-format trec', '\\\\\\n --output-format trec \\\\\\n ') \\\n .replace('--hits ', '\\\\\\n --hits ')\n\n\ndef format_eval_command(raw):\n return raw.replace('-c ', '\\\\\\n -c ')\\\n .replace('run.', '\\\\\\n run.')\n\n\ndef read_file(f):\n fin = open(f, 'r')\n text = fin.read()\n fin.close()\n\n return text\n\n\nif __name__ == '__main__':\n table = defaultdict(lambda: defaultdict(lambda: defaultdict(lambda: 0.0)))\n commands = defaultdict(lambda: defaultdict(lambda: ''))\n eval_commands = defaultdict(lambda: defaultdict(lambda: ''))\n\n html_template = read_file('scripts/repro_matrix/beir_html.template')\n row_template = read_file('scripts/repro_matrix/beir_html_row.template')\n\n with open('pyserini/resources/beir.yaml') as f:\n yaml_data = yaml.safe_load(f)\n for condition in yaml_data['conditions']:\n name = condition['name']\n cmd_template = condition['command']\n\n for datasets in condition['datasets']:\n dataset = datasets['dataset']\n\n runfile = f'run.beir-{name}.{dataset}.txt'\n cmd = Template(cmd_template).substitute(dataset=dataset, output=runfile)\n commands[dataset][name] = format_run_command(cmd)\n\n for expected in datasets['scores']:\n for metric in expected:\n eval_cmd = f'python -m pyserini.eval.trec_eval ' + \\\n f'{trec_eval_metric_definitions[metric]} beir-v1.0.0-{dataset}-test {runfile}'\n eval_commands[dataset][name] += format_eval_command(eval_cmd) + '\\n\\n'\n\n table[dataset][name][metric] = expected[metric]\n\n row_cnt = 1\n html_rows = []\n for dataset in beir_keys:\n s = Template(row_template)\n s = s.substitute(row_cnt=row_cnt,\n dataset=dataset,\n s1=f'{table[dataset][\"bm25-flat\"][\"nDCG@10\"]:8.4f}',\n s2=f'{table[dataset][\"bm25-flat\"][\"R@100\"]:8.4f}',\n s3=f'{table[dataset][\"bm25-multifield\"][\"nDCG@10\"]:8.4f}',\n s4=f'{table[dataset][\"bm25-multifield\"][\"R@100\"]:8.4f}',\n s5=f'{table[dataset][\"splade-distil-cocodenser-medium\"][\"nDCG@10\"]:8.4f}',\n s6=f'{table[dataset][\"splade-distil-cocodenser-medium\"][\"R@100\"]:8.4f}',\n s7=f'{table[dataset][\"contriever\"][\"nDCG@10\"]:8.4f}',\n s8=f'{table[dataset][\"contriever\"][\"R@100\"]:8.4f}',\n s9=f'{table[dataset][\"contriever-msmarco\"][\"nDCG@10\"]:8.4f}',\n s10=f'{table[dataset][\"contriever-msmarco\"][\"R@100\"]:8.4f}',\n cmd1=commands[dataset][\"bm25-flat\"],\n cmd2=commands[dataset][\"bm25-multifield\"],\n cmd3=commands[dataset][\"splade-distil-cocodenser-medium\"],\n cmd4=commands[dataset][\"contriever\"],\n cmd5=commands[dataset][\"contriever-msmarco\"],\n eval_cmd1=eval_commands[dataset][\"bm25-flat\"].rstrip(),\n eval_cmd2=eval_commands[dataset][\"bm25-multifield\"].rstrip(),\n eval_cmd3=eval_commands[dataset][\"splade-distil-cocodenser-medium\"].rstrip(),\n eval_cmd4=eval_commands[dataset][\"contriever\"].rstrip(),\n eval_cmd5=eval_commands[dataset][\"contriever-msmarco\"].rstrip(),\n )\n\n html_rows.append(s)\n row_cnt += 1\n\n all_rows = '\\n'.join(html_rows)\n print(Template(html_template).substitute(title='BEIR', rows=all_rows))\n","sub_path":"scripts/repro_matrix/generate_html_beir.py","file_name":"generate_html_beir.py","file_ext":"py","file_size_in_byte":4876,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"429255332","text":"#!/usr/bin/env python\n\nimport numpy as np\nimport sys\n\ndef alist2numpy(atoms_list):\n try:\n atoms_list[0].get_potential_energy()\n except:\n e_info = False\n else:\n e_info = True\n try:\n atoms_list[0].get_forces()\n except:\n f_info = False\n else:\n f_info = True\n try:\n atoms_list[0].get_stress()\n except:\n s_info = False\n else:\n s_info = True\n box = []\n coord = []\n energy = []\n force = []\n stress = []\n if e_info and f_info and s_info:\n for atoms in atoms_list:\n box.append(np.array(atoms.get_cell(), dtype='float64'))\n coord.append(np.array(atoms.get_positions(), dtype='float64'))\n energy.append(atoms.get_potential_energy())\n force.append(np.array(atoms.get_forces(), dtype='float64'))\n stress.append(np.array(atoms.get_stress(), dtype='float64'))\n elif e_info and f_info and not s_info:\n for atoms in atoms_list:\n box.append(np.array(atoms.get_cell(), dtype='float64'))\n coord.append(np.array(atoms.get_positions(), dtype='float64'))\n energy.append(atoms.get_potential_energy())\n force.append(np.array(atoms.get_forces(), dtype='float64'))\n elif e_info and not f_info and s_info:\n for atoms in atoms_list:\n box.append(np.array(atoms.get_cell(), dtype='float64'))\n coord.append(np.array(atoms.get_positions(), dtype='float64'))\n energy.append(atoms.get_potential_energy())\n stress.append(np.array(atoms.get_stress(), dtype='float64'))\n elif e_info and not f_info and not s_info:\n for atoms in atoms_list:\n box.append(np.array(atoms.get_cell(), dtype='float64'))\n coord.append(np.array(atoms.get_positions(), dtype='float64'))\n energy.append(atoms.get_potential_energy())\n elif not e_info and f_info and s_info:\n for atoms in atoms_list:\n box.append(np.array(atoms.get_cell(), dtype='float64'))\n coord.append(np.array(atoms.get_positions(), dtype='float64'))\n force.append(np.array(atoms.get_forces(), dtype='float64'))\n stress.append(np.array(atoms.get_stress(), dtype='float64'))\n elif not e_info and f_info and not s_info:\n for atoms in atoms_list:\n box.append(np.array(atoms.get_cell(), dtype='float64'))\n coord.append(np.array(atoms.get_positions(), dtype='float64'))\n force.append(np.array(atoms.get_forces(), dtype='float64'))\n elif not e_info and not f_info and s_info:\n for atoms in atoms_list:\n box.append(np.array(atoms.get_cell(), dtype='float64'))\n coord.append(np.array(atoms.get_positions(), dtype='float64'))\n stress.append(np.array(atoms.get_stress(), dtype='float64'))\n elif not e_info and not f_info and not s_info:\n for atoms in atoms_list:\n box.append(np.array(atoms.get_cell(), dtype='float64'))\n coord.append(np.array(atoms.get_positions(), dtype='float64'))\n box = np.array(box, dtype='float64')\n coord = np.array(coord, dtype='float64')\n if e_info:\n energy = np.array(energy, dtype='float64')\n if f_info:\n force = np.array(force, dtype='float64')\n if s_info:\n stress = np.array(stress, dtype='float64')\n return box, coord, energy, force, stress\n\nif __name__ == '__main__':\n print(\"\\n\\n\")\n print(\"$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$\".center(100))\n print(\" ___________________________ \".center(100))\n print(\" __________| C o d e b y Y.J. Choi |_________ \".center(100))\n print(\"|______________ ssrokyz@gmail.com _______________|\".center(100))\n print(\"\")\n print(\"******* This code will generate npy files from trajectory file *******\".center(100))\n print(\"useage ==> ./traj2npy.py 'file'\".center(100))\n print(\"EXAMPLE) ./traj2npy.py GST_ran.traj\".center(100))\n print(\"\")\n print(\"$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$\".center(100))\n print(\"\")\n if len(sys.argv) is 2:\n print((\"The Number of arguments(= %d) is correct.\" %(len(sys.argv)-1)).center(100))\n print(\"\\n\")\n else:\n print(\"*****ERROR***** The number of arguments is not correct *****ERROR*****\".center(100))\n print(\"\\n\")\n sys.exit(1)\n\n traj_file = sys.argv[1]\n # t2v_ratio = float(sys.argv[2])\n # if sys.argv[3] == 'o':\n # shuffle = True\n # elif sys.argv[3] == 'x':\n shuffle = False\n # else:\n # raise ValueError('Shuffle argument you gave is somehow wrong. It should be o or x. Please check.')\n\n print(\"$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$\".center(100))\n print('')\n print(('file name: '+traj_file).center(100))\n # print(('training/validation set ratio: '+str(t2v_ratio)).center(100))\n # print(('shuffle: '+str(shuffle)).center(100))\n print('')\n print(\"$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$\".center(100))\n print('')\n\n from time import time\n time_i = time()\n\n import subprocess as sp\n sp.call(['rm -rf old_npys'], shell=True)\n sp.call(['mv npys old_npys'], shell=True)\n sp.call(['mkdir -p npys'], shell=True)\n\n from ase.io import read\n atoms_list = read(traj_file, index=':', format='traj')\n image_num = len(atoms_list)\n # valid_num = int(image_num/(t2v_ratio+1))\n # train_num = image_num - valid_num\n\n log_f = open('npys/log.txt', 'w')\n log_f.write('Made by file, ../'+traj_file+'\\n')\n log_f.write('Number of total images: '+str(image_num)+'\\n')\n # log_f.write('Ratio of training/validation sets: ('+str(t2v_ratio)+' : 1)\\n')\n # log_f.write('Shuffle: '+str(shuffle)+'\\n')\n # log_f.write('Number of training sets: '+str(train_num)+'\\n')\n # log_f.write('Number of validation sets: '+str(valid_num)+'\\n')\n # if shuffle:\n # from random import shuffle as sffl\n # sffl(atoms_list)\n # else:\n # log_f.write('################################## Caution ####################################\\n')\n # log_f.write(\" You didn't have order shuffled. Please be aware of what you're doing! \\n\")\n # log_f.write('################################## Caution ####################################\\n\\n')\n\n box, coord, energy, force, stress = alist2numpy(atoms_list)\n\n np.save('npys/box.npy', box[:])\n np.save('npys/coord.npy', coord[:])\n if len(energy) != 0:\n np.save('npys/energy.npy', energy[:])\n else:\n log_f.write(' *** energy information not exist *** \\n')\n print(' *** energy information not exist *** '.center(100))\n if len(force) != 0:\n np.save('npys/force.npy', force[:])\n else:\n log_f.write(' *** forces information not exist *** \\n')\n print(' *** forces information not exist *** '.center(100))\n if len(stress) != 0:\n np.save('npys/stress.npy', stress[:])\n else:\n log_f.write(' *** stress information not exist *** \\n')\n print(' *** stress information not exist *** '.center(100))\n print('')\n log_f.close()\n\n type_txt = open('npys/type.txt', 'w')\n from ss_util import list2numlist as l2nl\n symbols = atoms_list[0].get_chemical_symbols()\n if atoms_list[-1].get_chemical_symbols() != symbols:\n raise ValueError(\"Chemical symbols seem to be not consistent btw images. Please check\")\n symbols_num = l2nl(list(symbols))\n for nums in symbols_num:\n type_txt.write(str(nums)+\" \")\n type_txt.write('\\n')\n for symbol in symbols:\n type_txt.write(str(symbol)+\" \")\n type_txt.close()\n # sp.call(['cp npys/type.txt npys/training/'], shell=True)\n # sp.call(['cp npys/type.txt npys/validation/'], shell=True)\n\n # from ase.io.trajectory import Trajectory as Traj \n # train_traj = Traj('npys/training/training_set.traj', 'w')\n # valid_traj = Traj('npys/validation/validation_set.traj', 'w')\n # for i in range(train_num):\n # train_traj.write(atoms_list[i])\n # for i in range(train_num,train_num+valid_num):\n # valid_traj.write(atoms_list[i])\n\n time_f = time()\n time_d = time_f - time_i\n print(('Total time used: '+str(time_d)+' sec ').center(100))\n print('\\n\\n')\n log_f = open('npys/log.txt', 'a')\n log_f.write('Total time used: '+str(time_d)+' sec ')\n log_f.close()\n","sub_path":"t-SNE/traj2npy.py","file_name":"traj2npy.py","file_ext":"py","file_size_in_byte":8471,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"129727762","text":"import os\nfrom setuptools import setup, find_packages\n\ndef read(fname):\n return open(os.path.join(os.path.dirname(__file__), fname)).read()\n\nsetup(\n name = \"MF - Matrix Factorization Techniques for Data Mining\",\n version = \"1.0\",\n author = \"Marinka Zitnik\",\n author_email = \"marinka@zitnik.si\", \n description = \"Python Matrix Factorization Techniques for Data Mining\",\n url = \"http://helikoid.si/mf/index.html\",\n packages = find_packages(),\n package_dir = { \"mf\": \"./mf\"},\n license = \"OSI Approved :: GNU General Public License (GPL)\",\n long_description = read(\"README.rst\"),\n classifiers = [\n \"Development Status :: 3 - Alpha\",\n \"License :: OSI Approved :: GNU General Public License (GPL)\",\n \"Natural Language :: English\",\n \"Programming Language :: Python\",\n \"Topic :: Scientific/Engineering\"\n ]\n )\n","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":878,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"120855894","text":"\"\"\"\n This spider is a WerkNL spider created on top of the ATSSpider\n scrapy crawl werk_nl -a mining_job_id=9999 -a iteration=1 -a extract=1 -a url=\"https://www.werk.nl/werk_nl/werknemer/vacatures?_piref36_5081279_36_5035292_5035292.__ora_navigState=execution%3De1s2&_piref36_5081279_36_5035292_5035292.__ora_navigValues=\"\n\n sample job url:\n https://www.werk.nl/werk_nl/werknemer/vacatures?_piref36_5081279_36_5035292_5035292.__ora_navigState=execution%3De2s4&_piref36_5081279_36_5035292_5035292.__ora_navigValues=\n\"\"\"\nimport logging\n\nfrom re import compile\nfrom scrapy.http import Request\nfrom scrapy.selector import Selector\n\nfrom brightcorp.base.atsspiders import ATSSpider\nfrom brightcorp.items import BrightcorpItemLoader\nfrom brightcorp.processors import Prefix, ConvertDateString,\\\n NormalizedJoin, HtmlFormatter, Strip\nfrom brightcorp.lib.utils import extract_first\n\n\nclass WerkNL(ATSSpider):\n\n name = \"werk_nl\"\n download_delay = 6\n page = 0\n current_pages = set([])\n Cur_Page_Num = compile(r\"De1s(\\d+)\")\n COOKIES_ENABLED = False\n\n def parse(self, response):\n selector = Selector(response)\n self.page += 1\n cur_pages = extract_first(selector.xpath('//span[@class=\"current\"]/text()'))\n if cur_pages:\n if cur_pages.strip() in self.current_pages:\n self.log(\"Search Page already visited\", level=logging.INFO)\n else:\n self.current_pages.add(cur_pages.strip())\n details_xpath = './/th[text()=\"%s\"]/following-sibling::td//text()'\n if not self.expected_job_count_set:\n job_count = selector.xpath('//div[@class=\"zoeken-resultaten\"]/h3/strong/text()').extract()\n if job_count:\n self.expected_job_count = job_count\n\n jobs = selector.xpath(\n '//div[@class=\"zoeken-resultaten\"]/div[@class=\"zoekresultaat\"]'\n )\n for job in jobs:\n url = job.xpath('./h3/a/@href').extract()\n if url:\n meta = {\n 'title': job.xpath('./h3/a/text()').extract(),\n 'education': job.xpath(details_xpath % \"Opleidingsniveau\").extract(),\n 'post_date': job.xpath(details_xpath % \"Wijzigingsdatum\").extract(),\n 'exp_date': job.xpath(details_xpath % \"Vervaldatum\").extract(),\n 'ref_num': job.xpath(details_xpath % \"Referentienummer\").extract(),\n 'company': job.xpath('.//p[@class=\"werklocatie\"]/span[@class=\"highLightResult\"]/text()').extract(),\n 'location': job.xpath('.//p[@class=\"werklocatie\"]/text()').extract(),\n 'job_url': url[0],\n }\n yield Request(\n callback=self.parse_job_callback(),\n meta=meta,\n url=url[0],\n )\n\n next_page_url = selector.xpath(\n '//div[@class=\"zoekresultaat\"]/following-sibling::div//div[@class=\"pagination_next\"]/a/@href'\n ).extract()\n if next_page_url:\n yield Request(\n callback=self.parse_jobs_list,\n priority=1,\n dont_filter=True,\n url=next_page_url[0]\n )\n else:\n # Gave this as start url so that it starts from begining\n yield Request(\n callback=self.parse,\n priority=1,\n dont_filter=True,\n url=self.start_urls[0]\n )\n\n def parse_job(self, response):\n selector = Selector(response)\n loader = BrightcorpItemLoader(selector=selector)\n details_xpath = '//td[text()=\"%s\"]/following-sibling::td/text()'\n\n postalcode_location = selector.xpath(details_xpath % \"Postcode en plaats\").extract()\n if postalcode_location and len(postalcode_location[0].split(' ')) == 2:\n postalcode_location = postalcode_location[0].split(' ')\n loader.add_value('zip_code', postalcode_location[0])\n loader.add_value('location', postalcode_location[-1])\n\n if not loader.get_output_value('location'):\n loader.add_value('location', response.meta.get('location'))\n\n loader.add_xpath(\n 'baseSalary',\n [\n details_xpath % \"Soort salaris\",\n details_xpath % \"Salarisindicatie\",\n ],\n NormalizedJoin(\", \")\n )\n loader.add_xpath(\n 'benefits',\n '//th[text()=\"Toelichting arbeidsvoorwaarden\"]/../following-sibling::tr',\n )\n loader.add_xpath(\n 'description',\n [\n '//div[@id=\"sw_top\"]/following-sibling::div[@class=\"overview\"][1]',\n '//table[@id=\"algemeen\"]',\n '//table[@id=\"opleidingen\"]'\n ],\n HtmlFormatter()\n )\n loader.add_xpath(\n 'experiencerequirements', '//table[@id=\"werkervaring\"]'\n )\n loader.add_xpath(\n 'requirements', '//table[@id=\"wensen\"]', HtmlFormatter()\n )\n loader.add_xpath('jobtype', details_xpath % \"Soort contract\")\n loader.add_xpath('workhours', details_xpath % \"Werktijd per week\")\n\n loader.add_value(\n 'date', response.meta.get('post_date'),\n ConvertDateString('%d-%m-%Y')\n )\n loader.add_value(\n 'expiration_date', response.meta.get('exp_date'),\n ConvertDateString('%d-%m-%Y')\n )\n loader.add_value(\n 'referencenumber', response.meta.get('ref_num'),\n Strip(), Prefix('%s-' % self.name)\n )\n loader.add_value(\n 'educationrequirements', response.meta.get('education')\n )\n loader.add_value('company', response.meta.get('company'))\n loader.add_value('title', response.meta.get('title'))\n loader.add_value('url', response.meta.get('job_url'))\n\n yield loader.load_item()\n","sub_path":"brightcorp/brightcorp/spiders/werk_nl.py","file_name":"werk_nl.py","file_ext":"py","file_size_in_byte":6238,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"426887348","text":"## Fibonnaci\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\nn = int(input(\"n = \"))\r\nt = np.zeros(n+1)\r\nt[0] = 0\r\nt[1] = 1\r\nfor i in range(2,n+1):\r\n t[i] = t[i-1] + t[i-2]\r\nplt.plot(t)\r\nplt.show()\r\n\"\"\"\r\nLa croissance a une allure exponentielle\r\n\"\"\"","sub_path":"TP 4/ex 7.py","file_name":"ex 7.py","file_ext":"py","file_size_in_byte":257,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"435783176","text":"\"\"\" app.py\n\nCopyright (C) 2018 Steven Wong \n\nMIT License\n\n\"\"\"\n\nimport logging\nimport core.config as config\nimport core.database as database\n\nclass _App(object):\n\t\"\"\" Singleton class that initialises all the settings.\n\n\tAttributes:\n\t\tconfig (core.Config): System config dict.\n\n\t\"\"\"\n\n\tdef __init__(self, *args, **kwargs):\n\t\tsuper(_App, self).__init__(*args, **kwargs)\n\t\t\n\t\tself.config = config.Config('config.json')\n\n\t\tself.logger = logging.getLogger()\n\t\tself.logger.setLevel(logging.INFO)\n\n\t\tfh = logging.FileHandler(self.config.get_value('logging.log_file'))\n\t\t# fh.setLevel(logging.DEBUG)\n\n\t\tch = logging.StreamHandler()\n\t\t# ch.setLevel(logging.DEBUG)\n\n\t\tformatter = logging.Formatter('[%(levelname)s] [%(asctime)s] %(message)s')\n\t\tfh.setFormatter(formatter)\n\t\tch.setFormatter(formatter)\n\n\t\tself.logger.addHandler(fh)\n\t\tself.logger.addHandler(ch)\n\n\t\tself.cxns = {}\n\t\tself.default_db = 'local'\n\n\t\tself.dbs = self.config.get_value(\"database\")\n\ndef connect(db):\n\t\"\"\" Connect to database based on config.\n\n\t\"\"\"\n\tglobal _instance\n\n\tname = db['name']\n\tdb_str = db['connection_str']\n\n\t_instance.cxns[name] = database.PostgreDatabase(db_str)\n\ndef cxn(datasource=None):\n\t\"\"\" Get default database connection\"\n\n\t\"\"\"\n\tglobal _instance\n\n\tif datasource:\n\t\treturn _instance.cxns[datasource]\n\telse:\n\t\treturn _instance.cxns[_instance.default_db]\n\n_instance = _App()\nfor db in _instance.dbs:\n\tconnect(db)\n\t","sub_path":"core/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1416,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"171622553","text":"from flask import Flask, flash, request, redirect, url_for, render_template, send_from_directory, make_response, session, jsonify, abort\nfrom werkzeug.utils import secure_filename\nimport os\nimport datetime\nimport urllib\nimport google\n#from google.cloud import firestore\n\nALLOWED_EXTENSIONS = set(['xlsx'])\nTAX = 1.095\nCOMISSION = 1\n\napp = Flask(__name__)\napp.secret_key = os.urandom(2**16)\n\nimport firebase_admin\nfrom firebase_admin import credentials, storage, auth, firestore\n\ncred=credentials.Certificate('/Users/nathanwong/Desktop/FoodCampus-5de64d4d1920.json')\nfirebase_admin.initialize_app(cred, {\n 'projectID': 'foodcampus',\n 'storageBucket': 'foodcampus.appspot.com'\n})\n\ndb = firestore.client()\n\ndef allowed_file(filename):\n return '.' in filename and filename.rsplit('.', 1)[1].lower() in ALLOWED_EXTENSIONS\n\n@app.route('/')\ndef home():\n if not checkLoggedIn():\n return render_template(\"splash.html\")\n return redirect(url_for('homepage'))\n\n@app.route('/login')\ndef login():\n return render_template(\"index.html\", top_nav = top_nav, side_nav=side_nav, default_packages=default_packages)\n\n@app.route('/user')\ndef homepage():\n if not checkLoggedIn():\n return redirect(url_for('login'))\n try:\n bucket = storage.bucket()\n blob = bucket.blob('hello.txt')\n outfile='./hello.txt'\n blob.upload_from_filename(outfile)\n except:\n print(\"ok\")\n def displayAllData():\n user_refs = db.collection(\"available options\")\n docs = user_refs.stream()\n options = [doc.to_dict() for doc in docs]\n ret_dict = {}\n\n for option in options:\n product = option['item']\n price = option['cash_price']\n place = option[\"\\ufeffname\"]\n popularity = option[\"popularity\"]\n if place in ret_dict:\n ret_dict[place].append({product: [price, popularity]})\n else:\n ret_dict[place] = [{product: [price, popularity]}]\n return ret_dict\n display_dict = displayAllData()\n\n\n return render_template(\"buyer.html\", options=display_dict)\n\n\n@app.route('/login/notes')\ndef session_login():\n id_token = request.headers.get(\"Authorization\")\n expires_in = datetime.timedelta(days=5)\n try:\n # Create the session cookie. This will also verify the ID token in the process.\n # The session cookie will have the same claims as the ID token.\n session_cookie = auth.create_session_cookie(id_token, expires_in=expires_in)\n response = jsonify({'status': 'success'})\n # Set cookie policy for session cookie.\n expires = datetime.datetime.now() + expires_in\n response.set_cookie(\n 'session', session_cookie, expires=expires, secure=True)\n resp = make_response(render_template(\"analysis.html\"))\n resp.set_cookie('session', session_cookie)\n return resp\n except:\n return abort(401, 'Failed to create a session cookie')\n\ndef checkLoggedIn():\n session_cookie = request.cookies.get('session')\n if not session_cookie:\n # Session cookie is unavailable. Force user to login.\n return False\n\n # Verify the session cookie. In this case an additional check is added to detect\n # if the user's Firebase session was revoked, user deleted/disabled, etc.\n try:\n decoded_claims = auth.verify_session_cookie(session_cookie, check_revoked=True)\n return True\n except:\n # Session cookie is invalid, expired or revoked. Force user to login.\n return False\n#For testing\n@app.route(\"/\")\ndef images(path):\n resp = make_response(open(path).read())\n resp.content_type = \"image/png\"\n return resp\n\n@app.route(\"/submit\", methods=['POST'])\ndef submit():\n if request.method == 'POST':\n user_refs = db.collection(\"available options\")\n docs = user_refs.stream()\n options = [doc.to_dict() for doc in docs]\n ret_dict = {}\n\n for option in options:\n product = option['item']\n price = option['cash_price']\n place = option[\"\\ufeffname\"]\n popularity = option[\"popularity\"]\n if place in ret_dict:\n ret_dict[product].append([place, price, popularity]) #NEED TO FIX THIS LOL\n else:\n ret_dict[product] = [place, price, popularity]\n\n raw_cost = 0.0\n reqs = []\n item_dict = {}\n for item in request.form:\n if len(request.form[item]) > 0 and item != \"example_length\":\n reqs += [item] * int(request.form[item])\n for item in reqs:\n if item in ret_dict:\n raw_cost += float(ret_dict[item][1])\n if item not in item_dict:\n item_dict[item] = 1\n else:\n item_dict[item] += 1\n post_tax = raw_cost * TAX\n with_commission = max(post_tax + 1, post_tax + 0.08 *raw_cost)\n raw, actual = round(post_tax, 2), round(with_commission, 2)\n comission = round(actual-raw, 2)\n return render_template('buyer.html', raw=raw, comission=comission, actual=actual, item_dict=item_dict)\n\n@app.route(\"/payment\")\ndef payment():\n return render_template(\"payment.html\")\n\n@app.route(\"/deliver\")\ndef deliver():\n return render_template(\"deliver.html\")\n@app.route(\"/mapbox\")\ndef mapbox():\n return render_template(\"mapbox.html\")\n\ntop_nav = ''''''\n\nside_nav = ''''''\n\ndefault_packages = ''' \n \n \n \n\n \n \n \n\n \n \n \n \n \n\n \n \n \n\n '''\n","sub_path":"webapp/lookup.py","file_name":"lookup.py","file_ext":"py","file_size_in_byte":7478,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"539080484","text":"import StringIO\n\ndef extraer_nombre(ruta):\n ''' Extra el nombre de un archivo de una ruta. '''\n a = -1\n for i in range(len(ruta)):\n if ruta[i] == \"/\" or ruta[i] == \"\\\\\":\n a = i\n if a == -1:\n return ruta\n return ruta[a + 1:]","sub_path":"ejemplos/aventurapygame_completo/utilidades/archivo.py","file_name":"archivo.py","file_ext":"py","file_size_in_byte":263,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"571956718","text":"from django.conf.urls import patterns, include, url\n\n# Uncomment the next two lines to enable the admin:\nfrom django.contrib import admin\nadmin.autodiscover()\n\nurlpatterns = patterns('',\n # Examples:\n\t#url(r'^$', 'disuku.views.home', name='home'),\t\n # url(r'^disuku/', include('disuku.foo.urls')),\n\n\turl(r'^$', 'catalogue.views.indice', name = 'index'),\n\turl(r'^login/', 'catalogue.views.do_login', name = 'user_login'),\n\turl(r'^logout/', 'catalogue.views.do_logout', name = 'user_logout'),\n\turl(r'^signin/', 'catalogue.views.loginProcess'),\n\turl(r'^main/', 'catalogue.views.main', name = 'user_area'),\n\turl(r'^last_upload/', 'catalogue.views.lastupload', name = 'last_csv_upload'),\n\n\n\turl(r'^media/(?P.*)$', 'django.views.static.serve', {\n 'document_root': '/home/pandi/disuku/media',\n }),\n\turl(r'^static/(?P.*)$', 'django.views.static.serve', {\n 'document_root': '/home/pandi/disuku/static',\n }),\n\n\n\n\turl(r'^admin/', include(admin.site.urls)),\n\turl(r'^admin_tools/', include('admin_tools.urls')),\n\t\n\n\n)\n\n\n","sub_path":"disuku/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1061,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"444124831","text":"#!/usr/bin/env python3\n\n\"\"\"\nCreated on 30 Apr 2021\n\n@author: Bruno Beloff (bruno.beloff@southcoastscience.com)\n\"\"\"\n\nimport json\n\nfrom scs_core.data.json import JSONify\nfrom scs_core.sys.modem import ModemConnection\n\n\n# --------------------------------------------------------------------------------------------------------------------\n# run...\n\nlines = [\n 'modem.generic.state : connected',\n 'modem.generic.state-failed-reason : --',\n 'modem.generic.signal-quality.value : 67',\n 'modem.generic.signal-quality.recent : yes'\n]\n\nconnection1 = ModemConnection.construct_from_mmcli(lines)\nprint(connection1)\n\njstr = JSONify.dumps(connection1)\nprint(jstr)\nprint(\"-\")\n\nconnection2 = ModemConnection.construct_from_jdict(json.loads(jstr))\nprint(connection2)\n\njstr = JSONify.dumps(connection2)\nprint(jstr)\nprint(\"-\")\n\nprint(connection1 == connection2)\n","sub_path":"tests/sys/modem_connection_test.py","file_name":"modem_connection_test.py","file_ext":"py","file_size_in_byte":919,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"611728188","text":"from flask import Flask, render_template, request\r\nimport csv\r\nimport json\r\n\r\napp = Flask(__name__)\r\nfilename = 'results.csv'\r\n\r\n@app.route('/')\r\ndef main_page():\r\n return render_template('index.html')\r\n\r\n@app.route('/index', methods=['POST'])\r\ndef csv_saver():\r\n if request.method == 'POST':\r\n username = request.form['username']\r\n gender = request.form['gender']\r\n ling = request.form['ling']\r\n one = request.form['one']\r\n two = request.form['two']\r\n three = request.form['three']\r\n fieldnames = ['username','gender','ling','one','two','three']\r\n with open(filename, 'a+', encoding='utf-8') as csvfile:\r\n writer = csv.Dictwriter(csvfile, fieldnames=fieldnames)\r\n writer.writerow({'username':username, 'gender':gender, 'ling':ling,\r\n 'one':one, 'two':two, 'three':three})\r\n return render_template('index.html', fin_form=fin_form)\r\n \r\n\r\n \r\nif __name__ == '__main__':\r\n app.run(debug=False)\r\n \r\n","sub_path":"site-anketa/firstdec.py","file_name":"firstdec.py","file_ext":"py","file_size_in_byte":1089,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"593956766","text":"import os\nfrom django.conf.urls import patterns, include, url\n\nfrom django.contrib import admin\nadmin.autodiscover()\n\nfrom livestack.apphome import views\n\nPROJECT_ROOT = os.path.dirname(os.path.abspath(__file__))\n\nurlpatterns = patterns('',\n # Examples:\n # url(r'^$', 'livestack.views.home', name='home'),\n # url(r'^blog/', include('blog.urls')),\n\n url(r'^admin/', include(admin.site.urls)),\n\n url(r'^images/(?P.*)$',\n 'django.views.static.serve',\n {\"document_root\": os.path.join(PROJECT_ROOT, \"apphome/templates/images\").replace(\"\\\\\", \"/\")}),\n\n url(r'^css/(?P.*)$',\n 'django.views.static.serve',\n {\"document_root\": os.path.join(PROJECT_ROOT, \"apphome/templates/css\").replace(\"\\\\\", \"/\")}),\n\n url(r'^js/(?P.*)$',\n 'django.views.static.serve',\n {\"document_root\": os.path.join(PROJECT_ROOT, \"apphome/templates/js\").replace(\"\\\\\", \"/\")}),\n\n url(r'^lib/(?P.*)$',\n 'django.views.static.serve',\n {\"document_root\": os.path.join(PROJECT_ROOT, \"apphome/templates/lib\").replace(\"\\\\\", \"/\")}),\n\n url(r'^plugin/(?P.*)$',\n 'django.views.static.serve',\n {\"document_root\": os.path.join(PROJECT_ROOT, \"apphome/templates/plugin\").replace(\"\\\\\", \"/\")}),\n)\n\n\nurlpatterns += patterns('',\n url(\"^$\", views.Index.as_view(), name=\"index\"),\n url(\"^download/$\", views.Download.as_view(), name=\"download\")\n)\n","sub_path":"livestack/livestack/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1411,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"105817494","text":"\"\"\"\nA python file to hold all the endpoints exposed using paso.\n\"\"\"\nfrom flask import jsonify, request, Blueprint\nfrom paso import __version__\n\nfrom airflow.www.app import csrf\nfrom airflow.configuration import conf as airflow_config\n\nfrom paso.helper import upload_files_to_zip\n\nAPI_BLUEPRINT = Blueprint(\n 'paso_api',\n __name__)\n\n\n@API_BLUEPRINT.route(\"/paso_api/info\", methods=['GET'])\ndef info():\n \"\"\"\n A temp endpoint.\n :return:\n \"\"\"\n response = jsonify(github_link=\"https://github.com/sreenathkamath/Paso\",\n author=\"Sreenath Kamath\",\n version=__version__)\n return response\n\n\n@csrf.exempt\n@API_BLUEPRINT.route(\"/paso_api/deploy\", methods=['POST'])\ndef deploy():\n \"\"\"\n A deploy REST endpoint.\n :return:\n \"\"\"\n files = request.files\n if len(files) == 0:\n response = jsonify(\n status=\"error\",\n message=\"At least one file should be selected\")\n return response, 400\n if 'zip_file_name' not in request.form:\n response = jsonify(\n status=\"error\",\n message=\"Zip File Name not passed in the request.\")\n return response, 400\n try:\n input_files = request.files\n zip_file_name = request.form['zip_file_name']\n dags_directory = airflow_config.get(\"core\", \"dags_folder\")\n target_file_name = \"{0}/{1}.zip\".format(dags_directory, zip_file_name)\n upload_files_to_zip(input_files=input_files,\n zip_file_name=target_file_name)\n response = jsonify(\n status=\"SUCCESS\",\n files_uploaded=len(input_files))\n return response\n except Exception as e:\n response = jsonify(\n status=\"error\",\n message=e.message)\n return response, 400\n","sub_path":"paso/endpoints.py","file_name":"endpoints.py","file_ext":"py","file_size_in_byte":1809,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"491347320","text":"import plotly\r\n\r\nimport numpy as np\r\n\r\nplotly.tools.set_credentials_file(username='easwaran6192', api_key='hoNKkTAaHj3mUdBHEm1x')\r\n\r\nreorientAndBearing = np.loadtxt('stats_dump/reorientation_speed_and_bearing.txt', delimiter = \",\")\r\n\r\ntrace = plotly.graph_objs.Scatter(\r\n x = reorientAndBearing[:,0],\r\n y = reorientAndBearing[:,1],\r\n mode = 'markers'\r\n)\r\n\r\ndata = [trace]\r\n\r\n# Plot and embed in ipython notebook!\r\nplotly.plotly.iplot(data, filename='reorient_and_bearing_new_arena')\r\n\r\n\r\nrun_lengths = np.loadtxt('stats_dump/run_lengths.txt')\r\n\r\ndata = [\r\n plotly.graph_objs.Histogram(\r\n x=run_lengths,\r\n histnorm='probability'\r\n )\r\n]\r\n\r\nplotly.plotly.iplot(data, filename='run_lengths_new_arena')\r\n\r\n\r\nbearing_freqs_left_turn = np.loadtxt('stats_dump/bearing_freqs_left_turn.txt')\r\n\r\ndata = [\r\n plotly.graph_objs.Histogram(\r\n x=bearing_freqs_left_turn,\r\n histnorm='probability'\r\n )\r\n]\r\n\r\nplotly.plotly.iplot(data, filename='bearing_freqs_left_turn_new_arena')\r\n\r\n\r\nabsolute_bearing_freqs_turn = np.loadtxt('stats_dump/absolute_bearing_freqs_turn.txt')\r\n\r\ndata = [\r\n plotly.graph_objs.Histogram(\r\n x=absolute_bearing_freqs_turn,\r\n histnorm='probability'\r\n )\r\n]\r\n\r\nplotly.plotly.iplot(data, filename='absolute_bearing_freqs_turn_new_arena')\r\n\r\nabsolute_bearing_freqs_turn = np.loadtxt('stats_dump/absolute_bearing_freqs_turn.txt')\r\n","sub_path":"plot_data.py","file_name":"plot_data.py","file_ext":"py","file_size_in_byte":1400,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"167184503","text":"import matplotlib.pyplot as plt\r\n\r\nimport numpy as np\r\n\r\nt = np.linspace(0,2*np.pi) # linearly space t between 0 and 2*pi for a full circle.\r\nx = np.cos(t)\r\ny = np.sin(t)\r\nplt.plot(x,y)\r\nplt.gca().set_aspect('equal') # make the aspect ratio equal so it appears 'true'\r\nplt.show()\r\n\r\n# 2. Plot Fermat's Spiral, as in https://en.wikipedia.org/wiki/Fermat%27s_spiral\r\nlength = 40 # change the length to change the number of times the curve spirals\r\nt = np.linspace(0,length,10000)\r\nx0 = (t**0.5)*np.cos(t)\r\ny0 = (t**0.5)*np.sin(t)\r\nx1 = (-t**0.5)*np.cos(t)\r\ny1 = (-t**0.5)*np.sin(t)\r\nx = np.concatenate((x0[::-1],x1)) # stick the two vectors together.\r\ny = np.concatenate((y0[::-1],y1))\r\nplt.plot(x,y)\r\nplt.gca().set_aspect('equal')\r\nplt.show()\r\n# 3. Plot the Butterfly Curve, as in\r\nt = np.linspace(0,12*np.pi,10000) # the curve is specified from 0 to 12*pi.\r\nx = np.sin(t)*( np.exp(np.cos(t)) - 2*np.cos(4*t) - (np.sin(t/12))**5 )\r\ny = np.cos(t)*( np.exp(np.cos(t)) - 2*np.cos(4*t) - (np.sin(t/12))**5 )\r\nplt.plot(x,y)\r\nplt.gca().set_aspect('equal')\r\nplt.show()\r\n# Extension: using colormaps on the butterfly curve (and, LineCollection for faster plotting)\r\nfrom matplotlib import cm\r\nfrom matplotlib.collections import LineCollection\r\nr = np.sqrt(x**2 + y**2) # find the radius, then normalise.\r\nr = r/max(r)\r\nfig,ax = plt.subplots()\r\nsegments = []\r\nfor i in range(len(x)-1):\r\n segments.append([(x[i],y[i]),(x[i+1],y[i+1])])\r\ncoll = LineCollection(segments, cmap=plt.cm.rainbow,linewidth=0.8)\r\ncoll.set_array(r)\r\nax.add_collection(coll)\r\nax.set_aspect('equal')\r\nax.autoscale_view()\r\nax.axis('off')\r\nplt.show()\r\n","sub_path":"Python Challenge 1 - Can a computer create art.py","file_name":"Python Challenge 1 - Can a computer create art.py","file_ext":"py","file_size_in_byte":1611,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"398707102","text":"#!/usr/bin/env python3\n# Bearbeitungszeit: 4.0h\nimport math\nimport sys\n\ntestArray = [2,3,3,3,3]\n\ndef quality_function(intArray):\n mean = sum(intArray)/len(intArray)\n quality = 0.\n for n in intArray:\n quality += (n - mean)**2\n quality = math.sqrt(quality)\n return quality\n\n#print(quality_function(testArray))\n\n\n\n\ndef split_number_rec(terms_of_sum, best_split, remain, terms_idx, l):\n '''\n terms_of_sum : list of positiv int numbers\n best_split : list with 2 values\n [0] = additive split with min quality value (default: None)\n [1] = quality value of [0] (default: None)\n remain : number for which the additive split from terms_of_sum[terms_idx:]\n suppose to be calculated\n l : is a list containing sums from terms_of_sum\n\n The result is saved in best_split\n '''\n\n print(remain,terms_of_sum[terms_idx])\n if remain == 0:\n l.append(terms_of_sum[terms_idx])\n print(l)\n if best_split[1] is None or quality_function(l) < best_split[1]:\n best_split[0] = l\n best_split[1] = quality_function(l)\n\n elif remain > terms_of_sum[terms_idx]:\n remain -= terms_of_sum[terms_idx]\n l.append(terms_of_sum[terms_idx])\n split_number_rec(terms_of_sum, best_split, remain, terms_idx, l)\n\n else:\n print(l)\n\n\ndef split_number(number, terms_of_sum):\n best_split = [None, None]\n l = []\n #split_number_rec(terms_of_sum, best_split, remain, terms_idx, l)\n split_number_rec(terms_of_sum, best_split, number, 0, []) # [2,2,2,2,3]\n split_number_rec(terms_of_sum, best_split, 5, 1, [2,2,2]) # [2,2,2,5]\n split_number_rec(terms_of_sum, best_split, 7, 1, [2,2]) # []\n split_number_rec(terms_of_sum, best_split, 9, 1, [2]) # [2,3,3,3]\n\n return best_split\n\n\nn = 11\nl = []\nS = [2, 3, 5]\nfor i in range(len(S)):\n split_number_rec(S, [None,None], n, i, l) # [2,2,2,2,3]\n l_ = l[:]\nsys.exit()\n\nn = 32\nl = []\nS = [7, 11, 13]\nsplit_number_rec(S, [None, None], n, 0, l) # [7,7,7,11]]\n\nn = 38\nl = []\nS = [7, 11, 13]\nsplit_number_rec(S, [None, None], n, 0, l) # [7,7,11,13]\n\nn = 45\nl = []\nS = [8, 9]\nsplit_numbers_test(S, [None, None], n, 0, l) # [9,9,9,9,9]]\nn = 47\nl = []\nS = [11, 12, 13, 14]\nsplit_numbers_test(S, [None, None], n, 0, l) # [11,12,12,12]]\nn = 47\nl = []\nS = [13, 14]\nsplit_numbers_test(S, [None, None], n, 0, l) # None\n\n\n\n\n\n\n'''\nS = [s1, s2, ..., si, ..., sk]\nsort(S)\nn = n1*s1 + n2*s2 + n3*s3 + ... + nk*sk\n\n\nSCAN PARAMETER SPACE ALGORITHM:\n 0 <= n1, n2, ..., ni, ..., nk <= N # parameter space for ni\n N = max( ni | ni*min(S) = ni*s1 <= n )\n go through all ni in parameter space to find n\n\n\nINT DIVIDE ALGORITHM:\n nk = n//sk\n rk = n%sk\n n(k-1) = rk//s(k-1)\n r(k-1) = rk%s(k-1)\n ...\n n2 = r3//s2\n r2 = r3%s2\n n1 = r2//s1\n r1 = r2%s1 = 0\n\n save quality\n\n n(k-1) = rk//s(k-1)\n r(k-1) = rk%s(k-1)\n ...\n n1 = r2//s1\n r1 = r2%s1 = 0\n\n compare quality\n replace with parameters with better quality\n\n n(k-2) = rk//s(k-2)\n r(k-2) = rk%s(k-2)\n ...\n\n'''\n","sub_path":"Uebungen/Blatt08.Harkov.Lehmann.Music/Aufgabe1/Splitnumber/splitnumber.py","file_name":"splitnumber.py","file_ext":"py","file_size_in_byte":3126,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"53480748","text":"from pepper.framework.abstract import AbstractBackend\nfrom pepper import logger\n\n\nclass ComponentDependencyError(Exception): pass\n\n\nclass AbstractComponent(object):\n def __init__(self, backend):\n \"\"\"\n Construct Component\n\n Parameters\n ----------\n backend: AbstractBackend\n \"\"\"\n super(AbstractComponent, self).__init__()\n\n self._backend = backend\n self._log = logger.getChild(self.__class__.__name__)\n\n @property\n def log(self):\n return self._log\n\n @property\n def backend(self):\n \"\"\"\n Returns\n -------\n backend: AbstractBackend\n \"\"\"\n return self._backend\n\n def require_dependency(self, cls, dependency):\n \"\"\"\n Require Component\n\n Parameters\n ----------\n cls: type\n dependency: type\n\n Returns\n -------\n dependency: AbstractComponent\n \"\"\"\n if not isinstance(self, dependency):\n raise ComponentDependencyError(\"{} depends on {}, which is not a superclass of {}\".format(\n cls.__name__, dependency.__name__, self.__class__.__name__))\n\n if self.__class__.mro().index(cls) > self.__class__.mro().index(dependency):\n raise ComponentDependencyError(\"{0} depends on {1}, but {1} is not initialized before {0} in {2}\".format(\n cls.__name__, dependency.__name__, self.__class__.__name__))\n\n return self\n","sub_path":"pepper/framework/abstract/component.py","file_name":"component.py","file_ext":"py","file_size_in_byte":1463,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"652965142","text":"# Copyright (c) 2020 BlenderNPR and contributors. MIT license. \n\nimport ctypes\nimport cProfile, pstats, io\n\nimport bpy\n\nfrom mathutils import Vector,Matrix,Quaternion\n\nfrom Malt import Scene, Pipeline\nfrom Malt.GL import GL\nfrom Malt.GL.Texture import Texture\n\nfrom BlenderMalt import MaltPipeline, MaltMeshes, MaltMaterial, CBlenderMalt\n\nPROFILE = False\n\nclass MaltRenderEngine(bpy.types.RenderEngine):\n # These three members are used by blender to set up the\n # RenderEngine; define its internal name, visible name and capabilities.\n bl_idname = \"MALT\"\n bl_label = \"Malt\"\n bl_use_preview = False\n bl_use_postprocess = True\n bl_use_shading_nodes_custom = False\n\n # Init is called whenever a new render engine instance is created. Multiple\n # instances may exist at the same time, for example for a viewport and final\n # render.\n def __init__(self):\n self.display_draw = None\n self.scene = Scene.Scene()\n self.view_matrix = None\n self.request_new_frame = True\n self.request_scene_update = True\n self.profiling_data = io.StringIO()\n self.bridge = MaltPipeline.get_bridge()\n self.bridge_id = self.bridge.get_viewport_id() if self.bridge else None\n\n def __del__(self):\n try:\n self.bridge.free_viewport_id(self.bridge_id)\n self.bridge = None\n except:\n # Sometimes Blender seems to call the destructor on unitialiazed instances (???)\n pass\n \n def get_scene(self, context, depsgraph, request_scene_update, overrides):\n def flatten_matrix(matrix):\n return [e for v in matrix.transposed() for e in v]\n \n materials = {}\n meshes = {}\n resources = {\n 'materials': materials,\n }\n \n if request_scene_update == True:\n scene = Scene.Scene()\n self.scene = scene\n scene = self.scene\n scene.parameters = depsgraph.scene_eval.malt_parameters.get_parameters(overrides, resources)\n scene.world_parameters = depsgraph.scene_eval.world.malt_parameters.get_parameters(overrides, resources)\n\n scene.frame = depsgraph.scene_eval.frame_current\n r = depsgraph.scene_eval.render\n fps = r.fps / r.fps_base\n remap = r.frame_map_new / r.frame_map_old\n scene.time = (scene.frame / fps) * remap\n \n #Camera\n if depsgraph.mode == 'VIEWPORT':\n view_3d = context.region_data \n camera_matrix = flatten_matrix(view_3d.view_matrix)\n projection_matrix = flatten_matrix(view_3d.window_matrix)\n if view_3d.perspective_matrix != self.view_matrix:\n self.view_matrix = view_3d.perspective_matrix.copy()\n self.request_new_frame = True\n scene.camera = Scene.Camera(camera_matrix, projection_matrix)\n else:\n camera = depsgraph.scene_eval.camera\n camera_matrix = flatten_matrix(camera.matrix_world.inverted())\n projection_matrix = flatten_matrix(\n camera.calc_matrix_camera( depsgraph, \n x=depsgraph.scene_eval.render.resolution_x, \n y=depsgraph.scene_eval.render.resolution_y\n ))\n scene.camera = Scene.Camera(camera_matrix, projection_matrix)\n \n if request_scene_update == False:\n return scene\n\n #Objects\n def add_object(obj, matrix):\n if obj.display_type in ['TEXTURED','SOLID'] and obj.type in ('MESH','CURVE','SURFACE','FONT'):\n name = MaltMeshes.get_mesh_name(obj)\n if depsgraph.mode == 'RENDER':\n name = '___F12___' + name\n \n if name not in meshes:\n # (Uses obj.original) Malt Parameters are not present in the evaluated mesh\n parameters = obj.original.data.malt_parameters.get_parameters(overrides, resources)\n malt_mesh = None\n \n if depsgraph.mode == 'VIEWPORT':\n malt_mesh = MaltMeshes.get_mesh(obj)\n else: #always load the mesh for final renders\n malt_mesh = MaltMeshes.load_mesh(obj, name)\n \n if malt_mesh:\n meshes[name] = [Scene.Mesh(submesh, parameters) for submesh in malt_mesh]\n else:\n meshes[name] = None\n\n mesh = meshes[name]\n if mesh is None:\n return\n \n scale = matrix.to_scale()\n mirror_scale = scale[0]*scale[1]*scale[2] < 0.0\n matrix = flatten_matrix(matrix)\n\n obj_parameters = obj.malt_parameters.get_parameters(overrides, resources)\n \n if len(obj.material_slots) > 0:\n for i, slot in enumerate(obj.material_slots):\n material = None\n if slot.material:\n material_name = slot.material.name_full\n if material_name not in materials.keys():\n shader = {\n 'path': bpy.path.abspath(slot.material.malt.shader_source, library=slot.material.library),\n 'parameters': slot.material.malt.parameters.get_parameters(overrides, resources)\n }\n parameters = slot.material.malt_parameters.get_parameters(overrides, resources)\n materials[material_name] = Scene.Material(shader, parameters)\n material = materials[material_name]\n result = Scene.Object(matrix, mesh[i], material, obj_parameters, mirror_scale)\n scene.objects.append(result)\n else:\n result = Scene.Object(matrix, mesh[0], None, obj_parameters, mirror_scale)\n scene.objects.append(result)\n \n elif obj.type == 'LIGHT':\n if obj.data.type == 'AREA':\n return #Not supported\n\n malt_light = obj.data.malt\n\n light = Scene.Light()\n light.color = tuple(malt_light.color)\n light.position = tuple(obj.matrix_world.translation)\n light.direction = tuple(obj.matrix_world.to_quaternion() @ Vector((0.0,0.0,-1.0)))\n light.radius = malt_light.radius\n light.spot_angle = malt_light.spot_angle\n light.spot_blend = malt_light.spot_blend_angle\n light.parameters = obj.data.malt_parameters.get_parameters(overrides, resources)\n\n types = {\n 'SUN' : 1,\n 'POINT' : 2,\n 'SPOT' : 3,\n }\n light.type = types[obj.data.type]\n\n if light.type == types['SUN']:\n light.matrix = flatten_matrix(matrix.to_quaternion().to_matrix().to_4x4().inverted())\n else:\n #Scaling too ????\n light.matrix = flatten_matrix(matrix.inverted())\n \n scene.lights.append(light)\n\n is_f12 = depsgraph.mode == 'RENDER'\n\n for obj in depsgraph.objects:\n if is_f12 or obj.visible_in_viewport_get(context.space_data):\n add_object(obj, obj.matrix_world)\n\n for instance in depsgraph.object_instances:\n if instance.instance_object:\n if is_f12 or instance.parent.visible_in_viewport_get(context.space_data):\n add_object(instance.instance_object, instance.matrix_world)\n \n #TODO: \n for i, obj in enumerate(scene.objects):\n obj.parameters['ID'] = i+1\n \n scene.meshes = list(meshes.values())\n scene.materials = list(materials.values())\n \n return scene\n \n def update_render_passes(self, scene=None, renderlayer=None):\n self.register_pass(scene, renderlayer, \"Combined\", 4, \"RGBA\", 'COLOR')\n self.register_pass(scene, renderlayer, \"Depth\", 1, \"R\", 'VALUE')\n\n def render(self, depsgraph):\n scene = depsgraph.scene_eval\n scale = scene.render.resolution_percentage / 100.0\n\n self.size_x = int(scene.render.resolution_x * scale)\n self.size_y = int(scene.render.resolution_y * scale)\n resolution = (self.size_x, self.size_y)\n\n overrides = ['Final Render']\n\n if self.bridge is not MaltPipeline.get_bridge(depsgraph.scene.world):\n self.bridge = MaltPipeline.get_bridge()\n self.bridge_id = self.bridge.get_viewport_id()\n\n scene = self.get_scene(None, depsgraph, True, overrides)\n MaltPipeline.get_bridge().render(0, resolution, scene, True)\n\n buffers = None\n finished = False\n\n import time\n while not finished:\n buffers, finished, read_resolution = MaltPipeline.get_bridge().render_result(0)\n time.sleep(0.1)\n if finished: break\n \n size = self.size_x * self.size_y\n\n result = self.begin_result(0, 0, self.size_x, self.size_y, layer=depsgraph.view_layer.name)\n passes = result.layers[0].passes\n\n if 'Combined' in passes:\n combined_pass = passes['Combined']\n rect_ptr = CBlenderMalt.get_rect_ptr(combined_pass.as_pointer())\n ctypes.memmove(rect_ptr, buffers['COLOR'], size*4*4)\n\n if 'Depth' in passes:\n depth_pass = passes['Depth']\n rect_ptr = CBlenderMalt.get_rect_ptr(depth_pass.as_pointer())\n ctypes.memmove(rect_ptr, buffers['DEPTH'], size*4)\n \n self.end_result(result)\n # Delete the scene. Otherwise we get memory leaks.\n # Blender never deletes RenderEngine instances ???\n del self.scene\n\n # For viewport renders, this method gets called once at the start and\n # whenever the scene or 3D viewport changes. This method is where data\n # should be read from Blender in the same thread. Typically a render\n # thread will be started to do the work while keeping Blender responsive.\n def view_update(self, context, depsgraph):\n self.request_new_frame = True\n self.request_scene_update = True\n\n # Test which datablocks changed\n for update in depsgraph.updates:\n if update.is_updated_geometry:\n if 'Object' in str(update.id.__class__):\n MaltMeshes.unload_mesh(update.id)\n\n # For viewport renders, this method is called whenever Blender redraws\n # the 3D viewport. The renderer is expected to quickly draw the render\n # with OpenGL, and not perform other expensive work.\n # Blender will draw overlays for selection and editing on top of the\n # rendered image automatically.\n def view_draw(self, context, depsgraph):\n profiler = cProfile.Profile()\n global PROFILE\n if PROFILE:\n profiler.enable()\n if self.request_new_frame:\n self.profiling_data = io.StringIO()\n \n if self.bridge is not MaltPipeline.get_bridge():\n #The Bridge has been reset\n self.bridge = MaltPipeline.get_bridge()\n self.bridge_id = self.bridge.get_viewport_id()\n self.request_new_frame = True\n self.request_scene_update = True\n \n overrides = []\n if context.space_data.shading.type == 'MATERIAL':\n overrides.append('Preview')\n\n scene = self.get_scene(context, depsgraph, self.request_scene_update, overrides)\n resolution = context.region.width, context.region.height\n\n if self.request_new_frame:\n self.bridge.render(self.bridge_id, resolution, scene, self.request_scene_update)\n self.request_new_frame = False\n self.request_scene_update = False\n\n buffers, finished, read_resolution = self.bridge.render_result(self.bridge_id)\n pixels = buffers['COLOR']\n\n if not finished:\n self.tag_redraw()\n if pixels is None or resolution != read_resolution:\n # Only render if resolution is the same as read_resolution.\n # This avoids visual glitches when the viewport is resizing.\n # The alternative would be locking when writing/reading the pixel buffer.\n return\n \n for region in context.area.regions:\n if region.type == 'UI':\n region.tag_redraw()\n\n fbo = GL.gl_buffer(GL.GL_INT, 1)\n GL.glGetIntegerv(GL.GL_FRAMEBUFFER_BINDING, fbo)\n \n render_texture = Texture(resolution, GL.GL_RGBA32F, GL.GL_FLOAT, pixels)\n \n self.bind_display_space_shader(depsgraph.scene_eval)\n if self.display_draw is None or self.display_draw.resolution != resolution:\n if self.display_draw:\n self.display_draw.gl_delete()\n self.display_draw = DisplayDraw(resolution)\n self.display_draw.draw(fbo, render_texture)\n self.unbind_display_space_shader()\n\n if PROFILE:\n profiler.disable()\n stats = pstats.Stats(profiler, stream=self.profiling_data)\n stats.strip_dirs()\n stats.sort_stats(pstats.SortKey.CUMULATIVE)\n stats.print_stats()\n print('PROFILE BEGIN--------------------------------------')\n print(self.profiling_data.getvalue())\n print('PROFILE END--------------------------------------')\n\n#Boilerplate code to draw an OpenGL texture to the viewport using Blender color management\nclass DisplayDraw(object):\n def __init__(self, resolution):\n self.resolution = resolution\n width, height = resolution\n\n shader_program = GL.gl_buffer(GL.GL_INT, 1)\n GL.glGetIntegerv(GL.GL_CURRENT_PROGRAM, shader_program)\n\n self.vertex_array = GL.gl_buffer(GL.GL_INT, 1)\n GL.glGenVertexArrays(1, self.vertex_array)\n GL.glBindVertexArray(self.vertex_array[0])\n\n texturecoord_location = GL.glGetAttribLocation(shader_program[0], \"texCoord\")\n position_location = GL.glGetAttribLocation(shader_program[0], \"pos\")\n\n GL.glEnableVertexAttribArray(texturecoord_location)\n GL.glEnableVertexAttribArray(position_location)\n\n position = [0.0, 0.0, width, 0.0, width, height, 0.0, height]\n position = GL.gl_buffer(GL.GL_FLOAT, len(position), position)\n texcoord = [0.0, 0.0, 1.0, 0.0, 1.0, 1.0, 0.0, 1.0]\n texcoord = GL.gl_buffer(GL.GL_FLOAT, len(texcoord), texcoord)\n\n self.vertex_buffer = GL.gl_buffer(GL.GL_INT, 2)\n\n GL.glGenBuffers(2, self.vertex_buffer)\n GL.glBindBuffer(GL.GL_ARRAY_BUFFER, self.vertex_buffer[0])\n GL.glBufferData(GL.GL_ARRAY_BUFFER, 32, position, GL.GL_STATIC_DRAW)\n GL.glVertexAttribPointer(position_location, 2, GL.GL_FLOAT, GL.GL_FALSE, 0, None)\n\n GL.glBindBuffer(GL.GL_ARRAY_BUFFER, self.vertex_buffer[1])\n GL.glBufferData(GL.GL_ARRAY_BUFFER, 32, texcoord, GL.GL_STATIC_DRAW)\n GL.glVertexAttribPointer(texturecoord_location, 2, GL.GL_FLOAT, GL.GL_FALSE, 0, None)\n\n GL.glBindBuffer(GL.GL_ARRAY_BUFFER, 0)\n GL.glBindVertexArray(0)\n\n def __del__(self):\n # We can't guarantee that the descructor runs on the correct OpenGL context.\n # This can cause driver crashes.\n # So it's better to just return early and let the memory leak. :(\n return\n self.gl_delete()\n \n def gl_delete(self):\n GL.glDeleteBuffers(2, self.vertex_buffer)\n GL.glDeleteVertexArrays(1, self.vertex_array)\n GL.glBindTexture(GL.GL_TEXTURE_2D, 0)\n\n def draw(self, fbo, texture):\n GL.glBindFramebuffer(GL.GL_FRAMEBUFFER, fbo[0])\n GL.glActiveTexture(GL.GL_TEXTURE0)\n texture.bind()\n GL.glBindVertexArray(self.vertex_array[0])\n GL.glDrawArrays(GL.GL_TRIANGLE_FAN, 0, 4)\n GL.glBindVertexArray(0)\n GL.glBindTexture(GL.GL_TEXTURE_2D, 0)\n\n\nimport bpy_extras\n\nclass OT_MaltProfileFrameReport(bpy.types.Operator, bpy_extras.io_utils.ExportHelper):\n bl_idname = \"wm.malt_profile_frame_report\"\n bl_label = \"Malt Profile Frame Report\"\n\n filename_ext = \".txt\" # ExportHelper mixin class uses this\n\n filter_glob : bpy.props.StringProperty(\n default=\"*.txt\",\n options={'HIDDEN'},\n maxlen=255, # Max internal buffer length, longer would be clamped.\n )\n\n def execute(self, context):\n global REPORT_PATH\n REPORT_PATH = self.filepath\n global PROFILE\n PROFILE = True\n context.space_data.shading.type = 'SOLID'\n context.space_data.shading.type = 'RENDERED'\n return{'FINISHED'}\n\nclasses = [\n MaltRenderEngine,\n OT_MaltProfileFrameReport,\n]\n\n# RenderEngines also need to tell UI Panels that they are compatible with.\n# We recommend to enable all panels marked as BLENDER_RENDER, and then\n# exclude any panels that are replaced by Malt panels registered by the\n# render engine, or that are not supported.\ndef get_panels():\n exclude_panels = {\n 'VIEWLAYER_PT_filter',\n 'VIEWLAYER_PT_layer_passes',\n 'DATA_PT_area',\n }\n\n panels = []\n for panel in bpy.types.Panel.__subclasses__():\n if hasattr(panel, 'COMPAT_ENGINES') and 'BLENDER_RENDER' in panel.COMPAT_ENGINES:\n if panel.__name__ not in exclude_panels:\n panels.append(panel)\n\n return panels\n\ndef register():\n for cls in classes:\n bpy.utils.register_class(cls)\n\n for panel in get_panels():\n panel.COMPAT_ENGINES.add('MALT')\n\ndef unregister():\n for cls in classes:\n bpy.utils.unregister_class(cls)\n\n for panel in get_panels():\n if 'MALT' in panel.COMPAT_ENGINES:\n panel.COMPAT_ENGINES.remove('MALT')\n\nif __name__ == \"__main__\":\n register()\n","sub_path":"BlenderMalt/MaltRenderEngine.py","file_name":"MaltRenderEngine.py","file_ext":"py","file_size_in_byte":18021,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"351743104","text":"CHUNK_SIZE = 40960\n\ndef download_into(session, url, file, process_func=None):\n r = session.get(url, stream=True)\n length = int(r.headers.get('Content-Length') or 0)\n received = 0\n for chunk in r.iter_content(CHUNK_SIZE):\n received += len(chunk)\n file.write(chunk)\n if process_func:\n process_func(received, length)\n if not length and process_func:\n process_func(received, received)\n\ndef download_into_with_progressbar(url, dest):\n import time\n from functools import partial\n import requests\n from termutils import download_process, get_terminal_size\n\n w = get_terminal_size()[1]\n with open(dest, 'wb') as f:\n download_into(requests, url, f, partial(\n download_process, dest, time.time(), width=w))\n\nif __name__ == '__main__':\n from sys import argv, exit\n\n if len(argv) != 3:\n exit('URL and output file not given.')\n\n try:\n download_into_with_progressbar(argv[1], argv[2])\n except KeyboardInterrupt:\n exit(2)\n","sub_path":"pylib/requestsutils.py","file_name":"requestsutils.py","file_ext":"py","file_size_in_byte":956,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"74056177","text":"\"\"\"Day 2\"\"\"\n\nfrom collections import Counter\n\nfilename = 'input'\n\n#Part 1\n\ntwo_times = sum([1 for line in open(filename)\\\n if 2 in Counter(line).values()])\nthree_times = sum([1 for line in open(filename)\\\n if 3 in Counter(line).values()])\n\nprint(two_times * three_times)\n\n#Part 2\n\nlines = [line.strip() for line in open(filename)]\nsearch = False\nfor i in range(0, len(lines)):\n a = lines[i]\n for j in range(i + 1, len(lines)):\n b = lines[j]\n diff = 0\n k = 0\n letters = ''\n while diff <= 1 and k < len(a):\n if a[k] != b[k]:\n diff += 1\n else:\n letters += a[k]\n k += 1\n if diff == 1:\n print(letters)\n search = True\n break\n","sub_path":"aoc2018/day2/solution.py","file_name":"solution.py","file_ext":"py","file_size_in_byte":801,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"653695647","text":"# Locust load test\n\nfrom locust import HttpUser, task\n\n\nclass LoadTest(HttpUser):\n @task\n def send_form(self):\n headers = {\n 'Content-Type': 'application/x-www-form-urlencoded'\n }\n payload = 'number=100'\n self.client.post(\"/\", headers=headers, data=payload)\n","sub_path":"loadtest/int_loadtest.py","file_name":"int_loadtest.py","file_ext":"py","file_size_in_byte":299,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"251035004","text":"import os\nimport pandas as pd\nfrom tkinter import Tcl\n\ndef create_clean_data(df_value,video_num,list_sec,list_val,list_index):\n for index, row in df_value.iterrows():\n temp = df_value[1][index]\n if \",\" in temp:\n vid,value,sec = temp.split(\",\")\n if \"[\" in vid:\n unnec, vid = vid.split(\"[\")\n if \"[\" in value:\n unnec, value = value.split(\"[\")\n if \"]\" in sec:\n sec,unnec1,unnec2 = sec.split(\"]\")\n video_num.append(vid)\n list_sec.append(sec)\n list_val.append(value)\n list_index.append(index)\n\ndef convert_to_int (video_num):\n int_video_num =[]\n for i in range (len(video_num)):\n temp = video_num[i]\n int_video_num.append(int(temp))\n return int_video_num\n\ndef sort_all_parameters_by_video_number(video_num, list_sec, list_val,list_index, sorted_video_num, sorted_list_sec,\n sorted_list_val,sorted_index):\n int_video_num = convert_to_int (video_num)\n i = 0\n while i in range(len(int_video_num)):\n min_value = min(int_video_num)\n min_index = int_video_num.index(min_value)\n sorted_video_num.append(min_value)\n sorted_list_sec.append(list_sec[min_index])\n sorted_list_val.append(list_val[min_index])\n sorted_index.append(list_index[min_index])\n del int_video_num[min_index]\n del list_sec[min_index]\n del list_val[min_index]\n del list_index[min_index]\n\n\nif __name__ == '__main__':\n path = 'C:/Users/yael/Documents/GitHub/highway/TOP 30 HIGHLIGHT'\n files = os.listdir(path)\n df_value = pd.read_csv('results_file_highlights.csv', header =None)\n video_num = []\n list_sec = []\n list_val = []\n list_index = []\n create_clean_data(df_value,video_num,list_sec,list_val,list_index)\n sorted_video_num = []\n sorted_list_sec = []\n sorted_list_val = []\n sorted_index = []\n sort_all_parameters_by_video_number(video_num, list_sec, list_val,list_index, sorted_video_num, sorted_list_sec,\n sorted_list_val,sorted_index)\n for index,file in enumerate(files):\n original_name = file\n os.rename(os.path.join(path, file), os.path.join(path, ''.join([str(sorted_index[index]),'_',str(sorted_video_num[index]),'_',str(sorted_list_sec[index]),'.mp4'])))","sub_path":"rename_video_highlights.py","file_name":"rename_video_highlights.py","file_ext":"py","file_size_in_byte":2380,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"354806693","text":"# -*- coding: utf-8 -*-\n##----------------------------------------------------------------------\n## EdgeCore.ES.get_chassis_id\n##----------------------------------------------------------------------\n## Copyright (C) 2007-2012 The NOC Project\n## See LICENSE for details\n##----------------------------------------------------------------------\n\"\"\"\n\"\"\"\n## Python modules\nimport re\n## NOC modules\nfrom noc.sa.script import Script as NOCScript\nfrom noc.sa.interfaces import IGetChassisID\n\n\nclass Script(NOCScript):\n name = \"EdgeCore.ES.get_chassis_id\"\n cache = True\n implements = [IGetChassisID]\n rx_mac_4626 = re.compile(r\"\\d+\\s+(?P\\S+).*?System\\s+CPU\",\n re.IGNORECASE | re.MULTILINE)\n rx_mac = re.compile(r\"MAC Address[^:]*?:\\s*(?P\\S+)\",\n re.IGNORECASE | re.MULTILINE)\n\n ##\n ## ES4626\n ##\n @NOCScript.match(platform__contains=\"4626\")\n def execute_4626(self):\n v = self.cli(\"show mac-address-table static\")\n match = self.re_search(self.rx_mac_4626, v)\n mac = match.group(\"id\")\n return {\n \"first_chassis_mac\": mac,\n \"last_chassis_mac\": mac\n }\n\n ##\n ## Other\n ##\n @NOCScript.match()\n def execute_other(self):\n v = self.cli(\"show system\")\n match = self.re_search(self.rx_mac, v)\n first_mac = match.group(\"id\")\n v = self.cli(\"show int statu\")\n for l in v.splitlines():\n match = self.rx_mac.search(l)\n if match:\n if match.group(\"id\")!= first_mac:\n last_mac = match.group(\"id\")\n if not last_mac:\n last_mac = first_mac\n return {\n \"first_chassis_mac\": first_mac,\n \"last_chassis_mac\": last_mac\n }\n","sub_path":"sa/profiles/EdgeCore/ES/get_chassis_id.py","file_name":"get_chassis_id.py","file_ext":"py","file_size_in_byte":1754,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"104401439","text":"# coding: utf-8\n\nfrom the_tale.linguistics.lexicon.relations import VARIABLE as V\nfrom the_tale.linguistics.lexicon.groups.relations import LEXICON_GROUP\n\nKEYS = [(u'QUEST_HELP_FRIEND_ACTION_AFTER_HELP', 440000, u'Активность: после выполнения задания на помощь', LEXICON_GROUP.QUEST_HELP_FRIEND,\n u'Краткое суммарное описание действий героя после выполнения взятого задания.',\n [V.RECEIVER_POSITION, V.HERO, V.RECEIVER], None),\n\n (u'QUEST_HELP_FRIEND_ACTION_BEFORE_HELP', 440001, u'Активность: перед взятием задания на помощь', LEXICON_GROUP.QUEST_HELP_FRIEND,\n u'Краткое суммарное описание действий героя перед взятием задания на помощь.',\n [V.RECEIVER_POSITION, V.HERO, V.RECEIVER], None),\n\n (u'QUEST_HELP_FRIEND_ACTION_INTRO', 440002, u'Активность: интро', LEXICON_GROUP.QUEST_HELP_FRIEND,\n u'Краткое суммарное описание действий героя в момент получения задания.',\n [V.RECEIVER_POSITION, V.HERO, V.RECEIVER], None),\n\n (u'QUEST_HELP_FRIEND_ACTOR_RECEIVER', 440003, u'Актёр: Соратник', LEXICON_GROUP.QUEST_HELP_FRIEND,\n u'Название роли, соратника.',\n [V.RECEIVER_POSITION, V.HERO, V.RECEIVER], None),\n\n (u'QUEST_HELP_FRIEND_DIARY_FINISH_MEETING_ARTIFACT', 440004, u'Дневник: награда за удавшуюся помощь (артефакт)', LEXICON_GROUP.QUEST_HELP_FRIEND,\n u'Герой получает награду за удавшуюся попытку помочь (артефакт).',\n [V.RECEIVER_POSITION, V.HERO, V.ARTIFACT, V.RECEIVER], None),\n\n (u'QUEST_HELP_FRIEND_DIARY_FINISH_MEETING_MONEY', 440005, u'Дневник: награда за удавшуюся помощь (деньги)', LEXICON_GROUP.QUEST_HELP_FRIEND,\n u'Герой получает награду за удавшуюся попытку помочь (деньги).',\n [V.RECEIVER_POSITION, V.COINS, V.HERO, V.RECEIVER], u'hero#N +coins#G'),\n\n (u'QUEST_HELP_FRIEND_DIARY_INTRO', 440006, u'Дневник: начало задания', LEXICON_GROUP.QUEST_HELP_FRIEND,\n u'Герой получил задание.',\n [V.RECEIVER_POSITION, V.HERO, V.RECEIVER], None),\n\n (u'QUEST_HELP_FRIEND_NAME', 440007, u'Название', LEXICON_GROUP.QUEST_HELP_FRIEND,\n u'Краткое название задания.',\n [V.RECEIVER_POSITION, V.HERO, V.RECEIVER], None),\n\n ]\n","sub_path":"the_tale/linguistics/lexicon/groups/quest_help_friend.py","file_name":"quest_help_friend.py","file_ext":"py","file_size_in_byte":2750,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"394598946","text":"import tensorflow as tf\nfrom tensorflow.python.keras.losses import binary_crossentropy, sparse_categorical_crossentropy\nfrom tensorflow.python.keras.losses import LossFunctionWrapper\n\n\nclass YoloLoss(LossFunctionWrapper):\n def __init__(self,\n anchors,\n num_classes=80,\n ignore_thresh=0.5,\n name='yolo_loss'):\n super(YoloLoss, self).__init__(\n yolo_loss,\n name=name,\n anchors=anchors,\n num_classes=num_classes,\n ignore_thresh=ignore_thresh)\n\n\ndef yolo_loss(y_true, y_pred, anchors, num_classes=80, ignore_thresh=0.5):\n \"\"\"\n\n :param y_true: labels inputs\n :param y_pred: predict outputs\n :param anchors: layer anchors size:(3, 2)\n :param num_classes: number of classes in dataset\n :param ignore_thresh: if (IoU < threshold) and ignore\n :return: total loss (xy_loss + wh_loss + confidence_loss + class_loss)\n \"\"\"\n # 1. transform all pred outputs\n # y_pred: (batch_size, grid, grid, anchors, (x, y, w, h, obj, ...cls))\n pred_box, pred_obj, pred_class, pred_xywh = tf.split(y_pred, (4, 1, num_classes, 4), axis=-1)\n\n # pred_box, pred_obj, pred_class, pred_xywh = yolo_boxes(y_pred, anchors, classes)\n pred_xy = pred_xywh[..., 0:2]\n pred_wh = pred_xywh[..., 2:4]\n\n # 2. transform all true outputs\n # y_true: (batch_size, grid, grid, anchors, (x1, y1, x2, y2, obj, cls))\n # true_box: (batch_size, grid, grid, anchors, (x1, y1, x2, y2)) # 0~1\n true_box, true_obj, true_class_idx = tf.split(y_true, (4, 1, 1), axis=-1)\n true_xy = (true_box[..., 0:2] + true_box[..., 2:4]) / 2\n true_wh = true_box[..., 2:4] - true_box[..., 0:2]\n\n # give higher weights to small boxes\n box_loss_scale = 2 - true_wh[..., 0] * true_wh[..., 1]\n\n # 3. inverting the pred box equations\n grid_h, grid_w = tf.shape(y_true)[1], tf.shape(y_true)[2]\n grid = tf.meshgrid(tf.range(grid_w), tf.range(grid_h))\n grid = tf.expand_dims(tf.stack(grid, axis=-1), axis=2)\n # ex: (0.5, 0.5) * (13, 13) - (6, 6)\n true_xy = true_xy * (grid_h, grid_w) - tf.cast(grid, true_xy.dtype)\n true_wh = tf.math.log(true_wh / anchors)\n true_wh = tf.where(tf.math.is_inf(true_wh), tf.zeros_like(true_wh), true_wh)\n\n # 4. calculate all masks\n obj_mask = tf.squeeze(true_obj, -1) # (batch_size, grid, grid, anchors)\n # ignore false positive when iou is over threshold\n # true_box_flat (N, (x1, y1, x2, y2))\n true_box_flat = tf.boolean_mask(true_box, tf.cast(obj_mask, tf.bool))\n best_iou = tf.reduce_max(broadcast_iou(pred_box, true_box_flat), axis=-1)\n ignore_mask = tf.cast(best_iou < ignore_thresh, tf.float32)\n\n # 5. calculate all losses\n xy_loss = obj_mask * box_loss_scale * tf.reduce_sum(tf.square(true_xy - pred_xy), axis=-1)\n wh_loss = obj_mask * box_loss_scale * tf.reduce_sum(tf.square(true_wh - pred_wh), axis=-1)\n obj_loss = binary_crossentropy(true_obj, pred_obj)\n confidence_loss = obj_mask * obj_loss + (1 - obj_mask) * ignore_mask * obj_loss\n class_loss = obj_mask * sparse_categorical_crossentropy(true_class_idx, pred_class)\n\n # 6. sum over (batch, gridx, gridy, anchors) => (batch, 1)\n xy_loss = tf.reduce_sum(xy_loss, axis=(1, 2, 3))\n wh_loss = tf.reduce_sum(wh_loss, axis=(1, 2, 3))\n confidence_loss = tf.reduce_sum(confidence_loss, axis=(1, 2, 3))\n class_loss = tf.reduce_sum(class_loss, axis=(1, 2, 3))\n\n return xy_loss + wh_loss + confidence_loss + class_loss\n\n\ndef broadcast_iou(pred_box, true_box):\n \"\"\"\n\n :param pred_box: size(b, gx, gy, 3, 4)\n :param true_box: size(n, 4)\n :return: Intersection over Union(IoU)\n \"\"\"\n # broadcast boxes\n pred_box = tf.expand_dims(pred_box, -2) # (b, gx, gy, 3, 1, 4)\n true_box = tf.expand_dims(true_box, 0) # (1, n, 4)\n # new_shape: (b, gx, gy, 3, n, 4)\n new_shape = tf.broadcast_dynamic_shape(tf.shape(pred_box), tf.shape(true_box))\n pred_box = tf.broadcast_to(pred_box, new_shape)\n true_box = tf.broadcast_to(true_box, new_shape)\n\n # Overlap: (b, gx, gy, 3, n)\n int_w = tf.maximum(tf.minimum(pred_box[..., 2], true_box[..., 2]) -\n tf.maximum(pred_box[..., 0], true_box[..., 0]), 0)\n int_h = tf.maximum(tf.minimum(pred_box[..., 3], true_box[..., 3]) -\n tf.maximum(pred_box[..., 1], true_box[..., 1]), 0)\n int_area = int_w * int_h\n\n # box size: w * h\n box_1_area = (pred_box[..., 2] - pred_box[..., 0]) * \\\n (pred_box[..., 3] - pred_box[..., 1])\n box_2_area = (true_box[..., 2] - true_box[..., 0]) * \\\n (true_box[..., 3] - true_box[..., 1])\n return int_area / (box_1_area + box_2_area - int_area)","sub_path":"Lab12/losses/yolo_loss.py","file_name":"yolo_loss.py","file_ext":"py","file_size_in_byte":4699,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"477842698","text":"from werkzeug.exceptions import abort\n\n\ndef put_request_check(request):\n if 'value' not in request.json.keys():\n abort(400, 'value missing in request')\n\n # Check if users have entered id and if so, that it is of type Int\n try:\n user_id = request.json['id']\n try:\n int(user_id)\n except ValueError:\n abort(400, 'id should be of type Int')\n except KeyError:\n user_id = None\n\n value = request.json['value']\n expire_in = request.args.get('expire_in')\n\n # Check if users have entered expire_in, if so, that it is of type Int.\n if expire_in is not None:\n try:\n int(expire_in)\n except ValueError:\n abort(400, 'expire_in should be of type Int')\n\n return user_id, value, expire_in","sub_path":"application/controller/checks.py","file_name":"checks.py","file_ext":"py","file_size_in_byte":790,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"18901083","text":"\"\"\"\na mock up of plane game\n\"\"\"\n\nimport pygame\nimport random\nfrom pygame.locals import(\n K_UP,\n K_DOWN,\n K_LEFT,\n K_RIGHT,\n K_ESCAPE,\n KEYDOWN,\n QUIT\n)\n\nclass Player(pygame.sprite.Sprite):\n def __init__(self):\n super(Player, self).__init__()\n self.surf = pygame.Surface((75,75))\n self.surf.fill((255,255,255))\n\n self.center = ((WIDTH-self.surf.get_width())/2,(HEIGHT-self.surf.get_height())/2)\n self.rect = self.surf.get_rect()\n self.rect.left = self.center[0]\n self.rect.top = self.center[1]\n\n self.step = 1\n\n def update(self, k):\n #print(k)\n if k[K_UP] and self.rect.top>0:\n self.rect.move_ip(0,-self.step)\n if k[K_DOWN] and self.rect.bottom < HEIGHT:\n self.rect.move_ip(0,self.step)\n if k[K_LEFT] and self.rect.left > 0:\n self.rect.move_ip(-self.step,0)\n if k[K_RIGHT] and self.rect.right < WIDTH:\n self.rect.move_ip(self.step,0)\n\nclass Enemy (pygame.sprite.Sprite):\n def __init__(self):\n super(Enemy, self).__init__()\n self.surf = pygame.Surface((20,10))\n self.surf.fill((0,255,0))\n self.rect = self.surf.get_rect(\n center = (\n random.randint(WIDTH+20,WIDTH+100),\n random.randint(0,HEIGHT)\n )\n )\n self.speed = random.randint(1,2)\n def update(self):\n self.rect.move_ip(-self.speed,0)\n if self.rect.right < 0:\n self.kill()\n\n\n\nWIDTH = 1000\nHEIGHT = 800\n\nADD_ENEMY = pygame.USEREVENT+1\npygame.time.set_timer(ADD_ENEMY, 250)\n\n_screen = pygame.display.set_mode([WIDTH,HEIGHT])\n\n_player = Player()\n_enemies = pygame.sprite.Group()\n_all_sprites = pygame.sprite.Group()\n_all_sprites.add(_player)\n_running=True\n\npygame.init()\n\nwhile _running:\n\n for e in pygame.event.get():\n if e.type == KEYDOWN:\n if e.key == K_ESCAPE:\n _running = False\n elif e.type == QUIT:\n _running=False\n #...more events\n elif e.type == ADD_ENEMY:\n enemy = Enemy()\n _enemies.add(enemy)\n _all_sprites.add(enemy)\n \n pressed_keys = pygame.key.get_pressed()\n _player.update(pressed_keys)\n _enemies.update()\n\n _screen.fill((0,255,255))\n\n for obj in _all_sprites:\n _screen.blit(obj.surf, obj.rect)\n \n if pygame.sprite.spritecollideany(_player, _enemies):\n _player.kill()\n _running = False\n\n pygame.display.flip()\n\npygame.quit()\n\n\n\n\n\n","sub_path":"01/game3.py","file_name":"game3.py","file_ext":"py","file_size_in_byte":2529,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"336968762","text":"\n\"\"\" \nLearning collections: high-performing container data \n1.1 Counter objects \ncount=counter()\nfor word in ['hello','goodbye']\n count[word]+=1 \nprint(count)\n\ndef(wordcounter): \n words=re.findall(r'\\w+',open('hello.txt').read().lower())\n return words \n\n\"\"\"\n\n\n\nfrom statistics import mean,median,mode\nN = int(input().strip())\n\n# this gets rid of the trailing whitespace before and after the numeric input \n\narr = [int(i) for i in input().strip().split(' ')]\narr.sort()\n\n# split the array of numbers into unique numbers and insert into a list \n\n#print('{0:.1f}'.format(sum(arr)/N))\n\nprint(sum(arr)/N)\n\nif N % 2 == 1:\n print(arr[int((N-1)/2)])\nelse:\n print(0.5*(arr[int(N/2)-1]+arr[int(N/2)]))\n \ncounts=[]\nfor i in arr:\n counts.append(arr.count(i))\nif max(counts) > 1:\n print(arr[counts.index(max(counts))])\nelse:\n print(min(arr))\n\n######################################\n\n#Weighted mean excercise \n\nfrom statistics import mean,median,mode\nN = int(input().strip())\narr = [int(i) for i in input().strip().split(' ')]\nweighhts = [int(i) for i in input().strip().split(\" \")]\n\ncount=0\ni=0 \nweighted=int(arr[i])*int(weights[i])\ncount=count+weighted\ni+=1\nprint(count)\n \nweightedmean=sum()\n\n","sub_path":"Leetcode and Hackerank/hackerank/1.py","file_name":"1.py","file_ext":"py","file_size_in_byte":1208,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"328493657","text":"#!/usr/bin/env python\r\n#coding:utf-8\r\nimport os,sys,json,socket,hashlib,shutil,socketserver\r\nBASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))\r\nsys.path.append(BASE_DIR)\r\nfrom cfg import config\r\n\r\nUSER_HOME_DIR = config.USER_HOME_DIR\r\nUSER_ACCOUNT_DIR = config.USER_ACCOUNT_DIR\r\n\r\nclass MyServer(socketserver.BaseRequestHandler):\r\n def handle(self):\r\n print(\"go connection from\",self.client_address)\r\n conn = self.request\r\n ret = {\"code\": \"\", \"msg\": \"\"}\r\n flag = True\r\n while flag:\r\n print(\"123\")\r\n client_user_info = json.loads(conn.recv(1024).decode())\r\n client_username = client_user_info[\"username\"]\r\n client_password = client_user_info[\"password\"]\r\n user_account_file = os.path.join(USER_ACCOUNT_DIR, client_username)\r\n user_home_dir = os.path.join(USER_HOME_DIR, client_username)\r\n if not os.path.isdir(user_home_dir): os.makedirs(user_home_dir)\r\n if os.path.isfile(user_account_file):\r\n server_user_info = json.load(open(user_account_file))\r\n if client_password == server_user_info[\"password\"]:\r\n ret[\"code\"] = \"200\"\r\n ret[\"msg\"] = \"login success\"\r\n conn.send(json.dumps(ret).encode())\r\n current_user_dir = user_home_dir\r\n while flag:\r\n client_user_input = conn.recv(1024).decode()\r\n if not client_user_input:break\r\n client_user_command = client_user_input.split()[0]\r\n if client_user_command.startswith(\"dir\"):\r\n cmd_res = os.popen(\"ls -lh %s\" % current_user_dir).read()\r\n if len(cmd_res) == 0:\r\n cmd_res = \"has not output\"\r\n conn.send(str(len(cmd_res)).encode())\r\n conn.recv(1024)\r\n conn.send(cmd_res.encode())\r\n elif client_user_command.startswith(\"get\"):\r\n client_filename = client_user_input.split()[1]\r\n file_path = os.path.join(user_home_dir,client_filename)\r\n if os.path.isfile(file_path):\r\n ret[\"code\"] = \"300\"\r\n ret[\"msg\"] = \"File exist\"\r\n conn.send(json.dumps(ret).encode())\r\n conn.recv(1024)\r\n file_size = os.stat(file_path).st_size\r\n conn.send(str(file_size).encode())\r\n conn.recv(1024)\r\n f = open(file_path,\"rb\")\r\n m = hashlib.md5()\r\n for line in f:\r\n conn.send(line)\r\n m.update(line)\r\n f.close()\r\n conn.send(m.hexdigest().encode())\r\n else:\r\n ret[\"code\"] = \"301\"\r\n ret[\"msg\"] = \"File not exist\"\r\n conn.send(json.dumps(ret).encode())\r\n conn.recv(1024)\r\n elif client_user_command.startswith(\"put\"):\r\n client_filename = client_user_input.split()[1]\r\n client_file_path = os.path.join(current_user_dir,client_filename)\r\n conn.send(b\"ready to received data\")\r\n client_file_size = int(conn.recv(1024).decode())\r\n f = open(client_file_path,\"wb\")\r\n m = hashlib.md5()\r\n received_size = 0\r\n while received_size != client_file_size:\r\n if client_file_size - received_size > 1024:\r\n size = 1024\r\n else:\r\n size = client_file_size - received_size\r\n data = conn.recv(size)\r\n f.write(data)\r\n m.update(data)\r\n received_size += len(data)\r\n else:\r\n print(\"put has done\")\r\n f.close()\r\n new_file_md5 = m.hexdigest()\r\n file_md5 = conn.recv(1024).decode()\r\n print(\"server file md5 is:\",new_file_md5)\r\n print(\"client file md5 is:\",file_md5)\r\n elif client_user_command.startswith(\"cd\"):\r\n client_des_dir = client_user_input.split()[1]\r\n if client_des_dir == \"..\":\r\n if current_user_dir == user_home_dir:\r\n client_des_path = current_user_dir\r\n else:\r\n client_des_path = os.path.dirname(current_user_dir)\r\n else:\r\n client_des_path = os.path.join(current_user_dir,client_des_dir)\r\n if os.path.isdir(client_des_path):\r\n conn.send(client_des_path.encode())\r\n current_user_dir = client_des_path\r\n elif client_user_command.startswith(\"pwd\"):\r\n conn.send(current_user_dir.encode())\r\n elif client_user_command.startswith(\"mkdir\"):\r\n client_mk_dir = client_user_input.split()[1]\r\n client_mk_path = os.path.join(current_user_dir,client_mk_dir)\r\n os.makedirs(client_mk_path)\r\n conn.send(str(\"%s dir has make\" % client_mk_dir).encode())\r\n elif client_user_command.startswith(\"rm\"):\r\n client_rm_dir = client_user_input.split()[1]\r\n client_rm_path = os.path.join(current_user_dir,client_rm_dir)\r\n if os.path.isdir(client_rm_path):\r\n shutil.rmtree(client_rm_path)\r\n temp = b\"Directory has removed\"\r\n elif os.path.isfile(client_rm_path):\r\n os.remove(client_rm_path)\r\n temp = b\"File has removed\"\r\n elif os.path.exists(client_rm_path):\r\n temp = b\"File or directory is not exist\"\r\n conn.send(temp)\r\n elif client_user_command.startswith(\"quit\"):\r\n flag = False\r\n else:\r\n ret[\"code\"] = \"207\"\r\n ret[\"msg\"] = \"password wrong\"\r\n conn.send(json.dumps(ret).encode())\r\n else:\r\n print(\"对不起你输入的账号【%s】不存在\" % client_username)\r\n ret[\"code\"] = \"205\"\r\n ret[\"msg\"] = \"user is not exist\"\r\n conn.send(json.dumps(ret).encode())\r\n\r\nif __name__ == \"__main__\":\r\n server = socketserver.ThreadingTCPServer((config.HOST,config.PORT),MyServer)\r\n server.serve_forever()","sub_path":"FTP实现/server/core/server_main.py","file_name":"server_main.py","file_ext":"py","file_size_in_byte":7643,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"493059464","text":"#!/usr/bin/env python\n\nfrom google.protobuf.internal import encoder\nfrom sys import exit\nimport protocol_pb2\nimport socket\n\ndef seriallize(message):\n serrialized = message.SerializeToString()\n delimeter = encoder._VarintBytes(len(serrialized))\n return delimeter + serrialized\n\ndef log(line):\n message = protocol_pb2.Message()\n message.type = protocol_pb2.Message.LOG\n message.log.line = line\n return seriallize(message)\n\ndef frame(img1, img2):\n message = protocol_pb2.Message()\n message.type = protocol_pb2.Message.IMAGES\n message.images.left.data = open(img1, 'r').read()\n message.images.left.width = 0\n message.images.left.height = 0\n message.images.right.data = open(img2, 'r').read()\n message.images.right.width = 0\n message.images.right.height = 0\n return seriallize(message)\n\ndef send(server, port, message):\n s = socket.socket()\n s.connect((server, port))\n s.send(message)\n s.close()\n\ndef cube():\n message = protocol_pb2.Message()\n message.type = protocol_pb2.Message.MODEL\n for x in (-20.0, 20.0):\n for y in (-20.0, 20.0):\n for z in (-20.0, 20.0):\n vertex = message.model.verticies.add()\n vertex.x = x\n vertex.y = y\n vertex.z = z\n del message.model.faces[:]\n return seriallize(message)\n","sub_path":"send.py","file_name":"send.py","file_ext":"py","file_size_in_byte":1255,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"25162724","text":"import datetime\nfrom typing import Tuple, Union, List\n\nfrom src.database.models.event import db, Event\nfrom src.database.schema.event_schema import EventSchema\nfrom src.logging.mixin import LoggingMixin\n\n\nclass WebsiteEventRepository(LoggingMixin):\n def __init__(self, event_schema: EventSchema) -> None:\n self.event_schema = event_schema\n\n def add_event(self, event: dict) -> Tuple[dict, int]:\n deserialized_event, errors = self.event_schema.load(event)\n if errors:\n return errors, 400\n db.session.add(deserialized_event)\n db.session.commit()\n\n return {'message': 'Success'}, 201\n\n def get_user_events_last_7_days(self, queried_user_id: str) -> Tuple[Union[dict, List[dict]], int]:\n all_user_events = Event.query.filter_by(user_id=queried_user_id)\n\n last_week_datetime = datetime.datetime.utcnow() - datetime.timedelta(days=7)\n user_events_for_last_7_days = all_user_events.filter(Event.timestamp >= last_week_datetime)\n\n user_events = user_events_for_last_7_days.all()\n\n if not user_events:\n return {\n 'message': 'Requested user does not exist or did not connect to the website in the last 7 days: '\n '{}'.format(queried_user_id)\n }, 400\n\n serialized_user_events, errors = self.event_schema.dump(user_events, many=True)\n if errors:\n return errors, 400\n\n return serialized_user_events, 200\n\n def delete_user_events(self, queried_user_id: str) -> Tuple[dict, int]:\n try:\n count_deleted_records = db.session.query(Event).filter_by(user_id=queried_user_id).delete()\n db.session.commit()\n return {'deleted_records': count_deleted_records}, 200\n except Exception as deletion_error:\n db.session.rollback()\n return {\n 'message': 'Error during the records deletion for user {}.\\nDetails: {}'\n .format(queried_user_id, deletion_error)\n }, 400\n","sub_path":"src/repository/website_event_repository.py","file_name":"website_event_repository.py","file_ext":"py","file_size_in_byte":2093,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"203042076","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nfrom tornado.concurrent import return_future\n\n\nclass SearchProvider(object):\n def __init__(self, config, db, authnz_wrapper=None, io_loop=None):\n raise NotImplementedError()\n\n def index_review(self, review):\n raise NotImplementedError()\n\n @return_future\n def get_by_violation_key_name(self, key_id, current_page=1, page_size=10, domain=None, page_filter=None, callback=None):\n raise NotImplementedError()\n\n @return_future\n def get_domain_active_reviews(self, domain, current_page=1, page_size=10, page_filter=None, callback=None):\n raise NotImplementedError()\n\n @classmethod\n def argparser(cls):\n import argparse\n\n parser = argparse.ArgumentParser(description='Setup Holmes index on an ElasticSearch server')\n parser.add_argument(\n '-c', '--conf',\n nargs=1,\n metavar='conf_file',\n help='path to configuration file'\n )\n parser.add_argument(\n '-s', '--server',\n nargs=1,\n metavar='host:port',\n help='elastic search server host and port'\n )\n parser.add_argument(\n '-i', '--index',\n nargs=1,\n metavar='index_name',\n help='name of the index'\n )\n parser.add_argument(\n '--create',\n action='store_true',\n help='create the index'\n )\n parser.add_argument(\n '--recreate',\n action='store_true',\n help='recreate the index (use with caution)'\n )\n parser.add_argument(\n '--delete',\n action='store_true',\n help='delete the index (use with caution)'\n )\n parser.add_argument(\n '-k', '--keys',\n nargs='+',\n metavar='key',\n help='index reviews with violation of such keys'\n )\n parser.add_argument(\n '-a', '--all-keys',\n action='store_true',\n help='index all reviews with at least one violation of any key (might take long)'\n )\n parser.add_argument(\n '-b', '--batch-size',\n type=int,\n nargs=1,\n metavar='N',\n help='batch size (default is 200)'\n )\n parser.add_argument(\n '-r', '--replace',\n action='store_true',\n help='replace entire index (default is increment/resume)'\n )\n parser.add_argument(\n '-v', '--verbose',\n action='count',\n default=0,\n help='log level: v=warning, vv=info, vvv=debug'\n )\n\n return parser\n\n @classmethod\n def new_instance(cls, config):\n raise NotImplementedError()\n\n @classmethod\n def main(cls):\n raise NotImplementedError()\n\nif __name__ == '__main__':\n SearchProvider.main()\n","sub_path":"holmes/search_providers/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":2922,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"488626357","text":"\"\"\"Signal receivers for the workspace app.\"\"\"\n\nimport datetime\nimport logging\n\nfrom django.conf import settings\nfrom django.contrib.auth.signals import user_logged_in\nfrom django.dispatch import receiver\n\nfrom airavata.model.user.ttypes import Status, UserProfile\nfrom airavata.model.workspace.ttypes import Project\nfrom django_airavata.apps.auth.utils import get_authz_token\nfrom django_airavata.utils import get_airavata_client, get_user_profile_client\n\nlog = logging.getLogger(__name__)\n\n\n@receiver(user_logged_in)\ndef create_user_profile_for_new_user(sender, request, user, **kwargs):\n \"\"\"Create basic User Profile for new user.\"\"\"\n # auth middleware hasn't run yet so authz_token attribute is not available\n # on request, so need to create the authz_token manually\n authz_token = get_authz_token(request)\n with get_user_profile_client() as user_profile_client:\n user_profile_exists = user_profile_client.doesUserExist(\n authz_token, user.username, settings.GATEWAY_ID)\n if not user_profile_exists:\n log.debug(\"UserProfile doesn't exist for {username}, \"\n \"creating...\".format(username=user.username))\n new_user_profile = UserProfile()\n new_user_profile.airavataInternalUserId = (user.username + \"@\" +\n settings.GATEWAY_ID)\n new_user_profile.userId = user.username\n new_user_profile.gatewayId = settings.GATEWAY_ID\n new_user_profile.emails = [user.email]\n new_user_profile.firstName = user.first_name\n new_user_profile.lastName = user.last_name\n unix_utcnow_ms = int(datetime.datetime.utcnow().timestamp() * 1000)\n new_user_profile.creationTime = unix_utcnow_ms\n new_user_profile.lastAccessTime = unix_utcnow_ms\n new_user_profile.validUntil = -1\n new_user_profile.State = Status.ACTIVE\n user_profile_client.addUserProfile(authz_token, new_user_profile)\n log.info(\"Created a new UserProfile for {username}\".format(\n username=user.username))\n\n\n@receiver(user_logged_in)\ndef create_default_project_if_not_exists(sender, request, user, **kwargs):\n \"\"\"Create 'Default Project' for new user.\"\"\"\n # auth middleware hasn't run yet so authz_token attribute is not available\n # on request, so need to create the authz_token manually\n authz_token = get_authz_token(request)\n with get_airavata_client() as airavata_client:\n # Just retrieve the first project\n projects = airavata_client.getUserProjects(\n authz_token, settings.GATEWAY_ID, request.user.username, 1, 0)\n if len(projects) == 0:\n log.info(\"Creating default project for user {}\".format(\n user.username))\n default_project = Project()\n default_project.owner = request.user.username\n default_project.name = \"Default Project\"\n default_project.gatewayId = settings.GATEWAY_ID\n default_project.description = (\"This is the default project for \"\n \"user {owner}\".format(\n owner=default_project.owner))\n airavata_client.createProject(authz_token, settings.GATEWAY_ID,\n default_project)\n","sub_path":"django_airavata/apps/workspace/signals.py","file_name":"signals.py","file_ext":"py","file_size_in_byte":3392,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"110700356","text":"'''\nAfter a day of looking up stuff:\n Here is how to establish a connection with a database &\n insert data as object & retreve data as JSON objects\nIn this api I am using:\n sqlalchemy as an ORM to communicate with the data base\n marshmallow to serialize (ORM->python primitives) or deserialize (python primitive -> ORM)\n flask to establish the api\n'''\nfrom sqlalchemy.ext.declarative import declarative_base\nfrom sqlalchemy.orm import scoped_session, sessionmaker, relationship, backref\nfrom flask import *\nfrom marshmallow_sqlalchemy import *\nfrom flask_sqlalchemy import *\n\napp = Flask(__name__)\napp.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///database.db'\ndb = SQLAlchemy(app)\n\nclass Author(db.Model):\n id = db.Column(db.Integer, primary_key=True)\n name = db.Column(db.String(255))\n\n def __repr__(self):\n return \"\".format(self=self)\nclass Book(db.Model):\n id = db.Column(db.Integer, primary_key=True)\n title = db.Column(db.String(255))\n author_id = db.Column(db.Integer, db.ForeignKey(\"author.id\"))\n author= relationship(\"Author\", backref = backref(\"books\"))\n\ndb.create_all()\n\nclass AuthorSchema(SQLAlchemyAutoSchema):\n class Meta:\n model = Author\n include_relationships = True\n load_instance = True\n\n\nclass BookSchema(SQLAlchemyAutoSchema):\n class Meta:\n model = Book\n include_fk = True\n load_instance = True\n\n'''\nauthor = Author(name=\"Chuck Paluhniuk\")\nauthor_schema = AuthorSchema()\nbook = Book(title=\"Fight Club\", author=author)\ndb.session.add(author)\ndb.session.add(book)\ndb.session.commit()\n'''\n@app.route(\"/pushAuthor\", methods=[\"PUT\", \"POST\"])\ndef pushAuthor():\n author = Author(name = request.form[\"name\"])\n db.session.add(author)\n db.session.commit()\n return \"pushed \"+ request.form[\"name\"], 200\n\n@app.route(\"/pushBook/\", methods=[\"PUT\", \"POST\"])\ndef pushBook(authName):\n auth = Author.query.filter_by(name = authName)\n if auth is None:\n abort(\"enter author first\")\n book = Book(title = request.form[\"title\"])\n db.session.add(book)\n db.session.commit()\n return \"pushed \"+ request.form[\"title\"],200\n\n\n@app.route(\"/getall\")\ndef getAll():\n auth = Author.query.all()\n authSchema = AuthorSchema(many=True)\n result = authSchema.dump(auth)\n return jsonify(result), 200\n\n\n'''\ndump_data = author_schema.dump(author)\nprint(dump_data)\n# {'id': 1, 'name': 'Chuck Paluhniuk', 'books': [1]}\n\nload_data = author_schema.load(dump_data, session=db.session)\nprint(load_data)\n# \n'''\nif __name__== \"__main__\":\n app.run(debug=True)","sub_path":"sixthApi/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":2646,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"349369174","text":"\"\"\"\n任务描述:\n IMDB 电影评论二分类\n\n数据描述:\n Kreas 提供的 IMDB 数据集中,所有 word 被映射为一个大于 0 的整数,表示该单词出现频率排名(方便过滤停用词和低频词)\n 每条评论的长度不同,需要 pad 或 truncate 处理,可以使用 keras 提供的 pad_sequences 函数完成\n\n输入:\n (None, max_len) # (25000, 400)\n\n输出:\n (None,) # (25000,)\n\n实验环境:\n Windows 10 Pro x64 1709\n CPU i5-4590 @ 3.30 GHz\n RAM 8GB\n\nRequirements:\n Tensorflow 1.4\n Keras 2.1.6\n\n结果:\n Epoch 1/2:\n 25000/25000 [==============================] - 182s 7ms/step - loss: 0.4046 - acc: 0.7995 - val_loss: 0.3195 - val_acc: 0.8620\n Epoch 2/2\n 25000/25000 [==============================] - 168s 7ms/step - loss: 0.2326 - acc: 0.9067 - val_loss: 0.3008 - val_acc: 0.8726\n\nReferences:\n https://github.com/keras-team/keras/blob/master/examples/imdb_cnn.py\n\n\"\"\"\n\nimport keras.layers as layers\nfrom keras import losses\nfrom keras import optimizers\nfrom keras.models import Sequential\nfrom keras.preprocessing import sequence\n\nfrom huaytools.dataset import load_imdb\n\nimport tensorflow as tf\n\ntf.logging.set_verbosity(tf.logging.INFO)\nlog = tf.logging.info\n\n\nclass Config:\n\n # train\n batch_size = 32\n epochs = 2\n\n # embedding\n vocab_size = 5000\n embedding_dims = 50\n max_len = 400\n\n # cnn\n filters = 250\n kernel_size = 3\n\n # fc\n hidden_dims = 250\n\n\ndef build_model(config: Config):\n model = Sequential()\n\n # Embedding Layer\n model.add(layers.Embedding(input_dim=config.vocab_size,\n output_dim=config.embedding_dims,\n input_length=config.max_len))\n model.add(layers.Dropout(rate=0.2))\n\n # Conv1D with Max pooling\n model.add(layers.Conv1D(filters=config.filters,\n kernel_size=config.kernel_size,\n padding='valid',\n strides=1))\n model.add(layers.GlobalMaxPool1D())\n\n # Dense Layer with relu\n model.add(layers.Dense(units=config.hidden_dims, activation='relu'))\n model.add(layers.Dropout(rate=0.2))\n\n # Dense layer with sigmoid\n model.add(layers.Dense(units=1, activation='sigmoid'))\n\n model.compile(optimizer='adam', # optimizer=optimizers.Adam()\n loss='binary_crossentropy', # losses.binary_crossentropy\n metrics=['accuracy'])\n\n return model\n\n\nif __name__ == '__main__':\n config = Config()\n\n print('Loading data...')\n (x_train, y_train), (x_test, y_test) = load_imdb(num_words=config.vocab_size)\n\n print('Pad sequences...')\n x_train = sequence.pad_sequences(x_train, maxlen=config.max_len)\n x_test = sequence.pad_sequences(x_test, maxlen=config.max_len)\n print('x_train shape:', x_train.shape)\n print('y_train shape:', y_train.shape)\n print('x_test shape:', x_test.shape)\n print('y_test shape:', y_test.shape)\n\n model = build_model(config)\n\n model.fit(x=x_train, y=y_train,\n batch_size=config.batch_size,\n epochs=config.epochs,\n validation_data=[x_test, y_test])\n","sub_path":"nlp/imdb_cnn.py","file_name":"imdb_cnn.py","file_ext":"py","file_size_in_byte":3199,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"44320018","text":"\"\"\" Core functions for the project \"\"\"\n# pylint: disable-msg=C0103\nimport pickle\nimport numpy as np\nimport tensorflow as tf\ntf.enable_eager_execution()\n\ndef save_image(image):\n \"\"\" Save the image received to a temp folder on the server \"\"\"\n image.save(\"./image/temp.jpg\")\n\ndef preprocess(image_path):\n \"\"\" Preprocess the image before feeding it to Inception V3 model \"\"\"\n # Convert all the images to size 299x299 as expected by the inception v3 model\n img = tf.keras.preprocessing.image.load_img(image_path, target_size=(299, 299))\n # Convert image to numpy array of 3-dimensions\n x = tf.keras.preprocessing.image.img_to_array(img)\n # Add one more dimension\n x = np.expand_dims(x, axis=0)\n # preprocess the images using preprocess_input() from inception module\n x = tf.keras.applications.inception_v3.preprocess_input(x)\n\n return x\n\ndef load_inception():\n \"\"\" Load inceptionv3 model without the last layer \"\"\"\n # Load the inception v3 model\n model_inceptionv3 = tf.keras.applications.inception_v3.InceptionV3(weights=\"imagenet\")\n # Remove the last layer (output softmax layer) from the inception V3\n model_inceptionv3_new = tf.keras.models.Model(model_inceptionv3.input, model_inceptionv3.layers[-2].output)\n\n return model_inceptionv3_new\n\ndef encode(image):\n \"\"\" Function to encode a given image into a vector of size (2048, ) \"\"\"\n image = preprocess(image)\n model_inceptionv3_new = load_inception()\n fea_vec = model_inceptionv3_new.predict(image) # Get the encoding vector for the image\n fea_vec = np.reshape(fea_vec, fea_vec.shape[1]) # reshape from (1, 2048) to (2048, )\n\n return fea_vec\n\ndef process_image(image_path):\n \"\"\" Process image before feeding it to the model \"\"\"\n image = encode(image_path)\n image = image.reshape((1, 2048))\n\n return image\n\ndef greedy_search(image_path):\n \"\"\" Core function which does all the job \"\"\"\n # contants\n in_text = \"startseq\"\n max_length = 34\n wordtoix = pickle.load(open(\"./pickle/wordtoix.pkl\", \"rb\"))\n ixtoword = pickle.load(open(\"./pickle/ixtoword.pkl\", \"rb\"))\n\n model_final = tf.keras.models.load_model(\"./model/model_final.h5\")\n photo = process_image(image_path)\n for _ in range(max_length):\n sequence = [wordtoix[w] for w in in_text.split() if w in wordtoix]\n sequence = tf.keras.preprocessing.sequence.pad_sequences([sequence], maxlen=max_length)\n\n yhat = model_final.predict([photo, sequence])\n yhat = np.argmax(yhat)\n word = ixtoword[yhat]\n\n in_text += \" \" + word\n\n if word == \"endseq\":\n break\n\n final = in_text.split()\n final = final[1: -1]\n final = \" \".join(final)\n\n tf.keras.backend.clear_session()\n\n return {\"Caption\": final}\n","sub_path":"functions.py","file_name":"functions.py","file_ext":"py","file_size_in_byte":2768,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"649270296","text":"'''Find new tweets and append them to the queue'''\nimport tweepy, re\nfrom queue import Queue\n\n#Tweet variables\ntwitters_to_rt = ['skinhub', 'SteamAnalyst', 'CSGO500',\n 'CSGOatsecom', 'Society_gg', 'hellcasecom',\n 'CSGOExclusive', 'earnggofficial', 'DrakeMoon',\n 'csgomassive', 'CSGODerby', 'skinupgg', 'flashyflashycom', 'RaffleTrade', 'SteamGems', 'zevoCSGO']\nwords_to_rt = [\"giveaway\", \"contest\", \"enter\", \"rt\", \"luck\"]\nspecial_words = ['reply', 'tag', 'trade', 'affi', 'sub', 'follow', 'like']\nblocked_words = [\"thank\", \"winning\", \"congrat\",\n \"dm\", \"profile url\", \"vote\", \"won\"]\nre_pat = r'((?<=@)|(?<=@ ))([\\w]*)'\n\n#Normalize unicode\ndef uni_norm(text):\n return text.translate({0x2018:0x27, 0x2019:0x27, 0x201C:0x22, 0x201D:0x22,\n 0xa0:0x20})\n\n#Search user's timeline for tweets that we want\ndef getNewestTweets(user, API, DONE, Q):\n print(\"----Scraping %s\" % user, flush=True)\n #Try to get timeline\n try:\n tweets = API.user_timeline(screen_name=user,\n count=5, exclude_replies='true',\n include_rts='false', tweet_mode='extended')\n except tweepy.TweepError as e:\n print('*---%s' % e, flush=True)\n return DONE\n #Check for extra features\n for tweet in tweets:\n extras = {'user': \"\", 'tag': False,\n 'url': False, 'drake_aff': False, 'like': False}\n tweet_id = tweet.id_str\n if tweet_id in DONE.keys():\n continue\n tweet_text = uni_norm(tweet.full_text).lower()\n if any(x in tweet_text for x in words_to_rt):\n if any(y in tweet_text for y in blocked_words):\n continue\n if any(z in tweet_text for z in special_words):\n extras['user'] = user\n if 'tag' in tweet_text:\n extras['tag'] = True\n if 'trade' in tweet_text:\n extras['url'] = True\n if 'affi' in tweet_text and user == \"DrakeMoon\":\n extras['drake_aff'] = True\n if 'sub' in tweet_text:\n print(\"----%s wants to get a subscriber\" % user, flush=True)\n if 'follow' in tweet_text:\n follow_list = re.findall(re_pat, tweet_text)\n for u in follow_list:\n try:\n API.create_friendship(id=u)\n except tweepy.TweepError as e:\n print('*---%s' % e, flush=True)\n if e.api_code == 261:\n return DONE\n if 'like' in tweet_text:\n extras['like'] = True\n DONE[tweet_id] = (False, extras)\n Q.put((tweet_id, extras), True)\n return DONE\n\n#Go through all users and then search\ndef getUserTweets(API, done, Q):\n for user in twitters_to_rt:\n done = getNewestTweets(user, API, done, Q)\n return done","sub_path":"find_tweets.py","file_name":"find_tweets.py","file_ext":"py","file_size_in_byte":3056,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"522984600","text":"from dataclasses import dataclass\nfrom typing import Optional, Any, TypeVar, Type, cast\n\n\nT = TypeVar(\"T\")\n\n\ndef from_int(x: Any) -> int:\n assert isinstance(x, int) and not isinstance(x, bool)\n return x\n\n\ndef from_none(x: Any) -> Any:\n assert x is None\n return x\n\n\ndef from_union(fs, x):\n for f in fs:\n try:\n return f(x)\n except:\n pass\n assert False\n\n\ndef from_bool(x: Any) -> bool:\n assert isinstance(x, bool)\n return x\n\n\ndef from_str(x: Any) -> str:\n assert isinstance(x, str)\n return x\n\n\ndef to_class(c: Type[T], x: Any) -> dict:\n assert isinstance(x, c)\n return cast(Any, x).to_dict()\n\n\n@dataclass\nclass ConfirmacaoReqDTO:\n idCampanha: Optional[int] = None\n idPedidoParceiro: Optional[int] = None\n confirmado: Optional[bool] = None\n idPedidoMktplc: Optional[str] = None\n cancelado: Optional[bool] = None\n motivoCancelamento: Optional[str] = None\n parceiro: Optional[str] = None\n\n @staticmethod\n def from_dict(obj: Any) -> 'ConfirmacaoReqDTO':\n assert isinstance(obj, dict)\n idCampanha = from_union([from_int, from_none], obj.get(\"idCampanha\"))\n idPedidoParceiro = from_union([from_int, from_none], obj.get(\"idPedidoParceiro\"))\n confirmado = from_union([from_bool, from_none], obj.get(\"confirmado\"))\n idPedidoMktplc = from_union([from_str, from_none], obj.get(\"idPedidoMktplc\"))\n cancelado = from_union([from_bool, from_none], obj.get(\"cancelado\"))\n motivoCancelamento = from_union([from_str, from_none], obj.get(\"motivoCancelamento\"))\n parceiro = from_union([from_str, from_none], obj.get(\"parceiro\"))\n return ConfirmacaoReqDTO(idCampanha, idPedidoParceiro, confirmado, idPedidoMktplc, cancelado, motivoCancelamento, parceiro)\n\n def to_dict(self) -> dict:\n result: dict = {}\n result[\"idCampanha\"] = from_union([from_int, from_none], self.idCampanha)\n result[\"idPedidoParceiro\"] = from_union([from_int, from_none], self.idPedidoParceiro)\n result[\"confirmado\"] = from_union([from_bool, from_none], self.confirmado)\n result[\"idPedidoMktplc\"] = from_union([from_str, from_none], self.idPedidoMktplc)\n result[\"cancelado\"] = from_union([from_bool, from_none], self.cancelado)\n result[\"motivoCancelamento\"] = from_union([from_str, from_none], self.motivoCancelamento)\n result[\"parceiro\"] = from_union([from_str, from_none], self.parceiro)\n return result\n\n\ndef ConfirmacaoReqDTOfromdict(s: Any) -> ConfirmacaoReqDTO:\n return ConfirmacaoReqDTO.from_dict(s)\n\n\ndef ConfirmacaoReqDTOtodict(x: ConfirmacaoReqDTO) -> Any:\n return to_class(ConfirmacaoReqDTO, x)\n","sub_path":"lib/models/request/ConfirmacaoReqDTO.py","file_name":"ConfirmacaoReqDTO.py","file_ext":"py","file_size_in_byte":2685,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"147970776","text":"# __ \n# | |--.---.-.-----.---.-.-----.---.-. .--------.---.-.--.--.---.-.\n# | _ | _ | | _ | | _ |__| | _ | | | _ |\n# |_____|___._|__|__|___._|__|__|___._|__|__|__|__|___._|___ |___._|\n# |_____| \n# \n\n\"\"\"\n banana.maya\n ~~~~~~~~~~~\n \n Set of extensions for the Python API of Autodesk Maya.\n \n :copyright: Copyright 2014 by Christopher Crouzet.\n :license: MIT, see LICENSE for details.\n\"\"\"\n\n__version__ = '0.0.2'\n\n__all__ = [\n]\n\n\ndef patch():\n from gorilla.extensionsregistrar import ExtensionsRegistrar\n from banana.maya import extensions\n \n ExtensionsRegistrar.register_extensions(extensions, patch=True)\n","sub_path":"banana/maya/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":870,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"486785528","text":"from flask import Flask, request\n\n\ndef create_app(test_config=None):\n app = Flask(__name__, instance_relative_config=True)\n\n app.config.from_mapping(\n SECRET_KEY='dev',\n )\n\n if test_config is None:\n app.config.from_pyfile('config.py', silent=True)\n else:\n app.config.from_mapping(test_config)\n\n\n @app.route('/hello')\n def hello():\n name = request.args.get('name', 'World') \n return f\"Hello {name}!\"\n \n\n @app.route('/number/')\n def number_route(n):\n return f'Number: {n}'\n\n @app.route('/')\n def index():\n return \"Welcome to Lazaro's App!!\"\n \n\n return app\n\n","sub_path":"flask_boilerplate/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":659,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"510895218","text":"#!/usr/bin/python\n# -*- coding:utf-8 -*-\nimport collections\nimport re\n\nimport sys\n\nfrom base_utils import print_under\n\n# Overview of Common Mapping Methods\nprint_under('Overview of Common Mapping Methods常见映射方法')\nprint_under('default dict')\n# 传入一个default_factory,会在找不到key,即__missing__()时调用的函数\ndefault_dict = collections.defaultdict(list)\ndefault_dict['a'].append('a')\nprint(default_dict)\n\n\nprint_under('OrderedDict')\norder_dict = collections.OrderedDict()\norder_dict['a'] = 1\norder_dict['b'] = 2\norder_dict['c'] = 3\nprint(order_dict) # OrderedDict([('a', 1), ('b', 2), ('c', 3)])\norder_dict.popitem(last=True)\nprint(order_dict) # OrderedDict([('a', 1), ('b', 2)])\norder_dict.popitem(last=False)\nprint(order_dict) # OrderedDict([('b', 2)])\n\nprint_under('about dict update')\na_dict = {'a': 1, 'b': 2, 'c': 3}\na_dict.update({'a': 10})\nprint(a_dict) # {'a': 10, 'b': 2, 'c': 3}\na_dict.update([('a', 1), ('b', 20)])\nprint(a_dict)\n\nprint_under('setdefault example 3-2')\n\nWORD_RE = re.compile(r'\\w+')\nindex = {}\n\nwith open(sys.argv[1], encoding='utf-8') as fp:\n for line_no, line in enumerate(fp, 1):\n for match in WORD_RE.finditer(line):\n word = match.group()\n column_no = match.start()+1\n location = (line_no, column_no)\n # 这其实是一种很不好的实现,这样写只是为了证明论点\n # occurrences = index.get(word, []) 第一次查询\n # occurrences.append(location)\n # index[word] = occurrences 第二次查询\n index.setdefault(word, []).append(location)\n\nfor word in sorted(index, key=str.upper):\n print(word, index[word])\n\n","sub_path":"chap3 Dictionaries and Sets/3.3.py","file_name":"3.3.py","file_ext":"py","file_size_in_byte":1691,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"87563207","text":"from sys import stdin as ip\r\nfrom math import factorial as fff\r\nfrom fractions import gcd\r\nfrom itertools import combinations as c\r\nf=pow(2,3,3)\r\nxs=fff(f+2)\r\nauthor='biggy_bs'\r\n# Main code goes here !!\r\ndp=[0]*110\r\nval_dic={}\r\ndef factors(n): \r\n l=[]\r\n for i in range(2, 1000):\r\n q,r = n/i, n%i \r\n if r == 0:\r\n l.append(i) \r\n return l\r\ndef divisor(x):\r\n for i in range(2,n):\r\n if x%i==0:\r\n return i\r\ndef primes_upto(limit):\r\n is_prime = [False] * 2 + [True] * (limit - 1)\r\n for n in xrange(int(limit**0.5 + 1.5)): \r\n if is_prime[n]:\r\n for i in range(n * n, limit + 1, n): \r\n is_prime[i] = False\r\n for i in xrange(limit + 1):\r\n if is_prime[i]: yield i\r\nxxa=list(primes_upto(10000))\r\nprimes={}\r\ndic={}\r\ndef gen(pos,n,cur,j):\r\n if pos>n:\r\n return\r\n if len(dic)==j:\r\n return\r\n if len(cur)==n:\r\n if not ok(cur):\r\n return\r\n gen(pos+1,n,cur+'0',j)\r\n gen(pos+1,n,cur+'1',j)\r\n\r\ndef ok(s):\r\n s='1'+s+'1'\r\n dic[s]=[]\r\n for i in xrange(2,11):\r\n c=int(s,i)\r\n fac=factors(c)\r\n if fac==[]:\r\n del dic[s]\r\n return False\r\n else:\r\n dic[s].append(fac[0])\r\n else:\r\n return True\r\n\r\nf=open('op2.txt',\"w\")\r\nfor _ in xrange(int(ip.readline())):\r\n n,j=map(int,ip.readline().split())\r\n n=50\r\n gen(0,48,'',j)\r\n f.write(\"Case #%d:\\n\"%(_+1))\r\n for i in dic:\r\n f.write(str(i)+\" \")\r\n for j in dic[i]:\r\n f.write(str(j)+\" \")\r\n f.write(\"\\n\")\r\nf.close()\r\n","sub_path":"codes/CodeJamCrawler/16_0_3_neat/16_0_3_biggydbs_codejam1.py","file_name":"16_0_3_biggydbs_codejam1.py","file_ext":"py","file_size_in_byte":1605,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"197083488","text":"#! /usr/bin/env python\n\n# Set log level to benefit from Scapy warnings\nimport logging\nlogging.getLogger(\"scapy\").setLevel(2)\n\nfrom scapy.all import *\nimport subprocess\nimport sys\nimport time\nfrom random import getrandbits, randint\nfrom ipaddress import IPv4Network, IPv4Address\n\n\nclass MPLS(Packet):\n name = \"MPLS\"\n fields_desc = [\n BitField(\"label\", 3, 20),\n BitField(\"experimental_bits\", 0, 3),\n BitField(\"bottom_of_label_stack\", 1, 1),\n ByteField(\"TTL\", 255)\n ]\n\nif len(sys.argv) != 7 or sys.argv[1] != '-dest' or\\\n\tsys.argv[3] != '-label' or sys.argv[5] != '-b':\n\tprint(\"usage: python sim_traffic.py -dest '' -label -b \")\n\texit(1)\n\n\nbind_layers(Ether, MPLS, type = 0x8847)\nbind_layers(MPLS, MPLS, bottom_of_label_stack = 0)\nbind_layers(MPLS, IP)\n\nwhile True:\n\tp = Ether() / MPLS(label = int(sys.argv[4]), experimental_bits = 0, bottom_of_label_stack=1) / IP(dst = sys.argv[2]) / ICMP()\n\t# p = Ether() / IP(dst = \"10.0.0.3\") / ICMP()\n\ttic = time.perf_counter()\n\tsendp(p, count=int(int(sys.argv[6])/len(p)))\n\ttoc = time.perf_counter()\n\n\twait_for = toc - tic\n\tif wait_for < 1:\n\t\ttime.sleep(1 - wait_for)\n\n","sub_path":"scapy/sim_traffic.py","file_name":"sim_traffic.py","file_ext":"py","file_size_in_byte":1243,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"491287213","text":"# 2013-03-31 Runtime: 65 ms\n\nclass Solution:\n # @param num, a list of integer\n # @return an integer\n def rob(self, num):\n if not num: \n return 0\n dp = [0 for i in xrange(len(num) + 1)]\n dp[0], dp[1] = 0, num[0]\n for i in xrange(2, len(num) + 1):\n dp[i] = max(dp[i-1], dp[i-2] + num[i-1])\n return dp[len(num)]","sub_path":"198_House_Robber.py","file_name":"198_House_Robber.py","file_ext":"py","file_size_in_byte":375,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"388685556","text":"from selenium import webdriver\r\nfrom webdriver_manager.chrome import ChromeDriverManager\r\nimport time\r\n\r\n# In order for ChromeDriverManager to work you must pip install it in your own environment.\r\ndriver = webdriver.Chrome(ChromeDriverManager().install())\r\nURL = \"https://witty-hill-0acfceb03.azurestaticapps.net/mutant_teams.html\"\r\n\r\ndriver.get(URL)\r\ntime.sleep(2)\r\n# team filters\r\noriginal_btn = driver.find_element_by_xpath('/html/body/div/label[1]')\r\nforce_btn = driver.find_element_by_xpath('/html/body/div/label[2]')\r\nfactor_btn = driver.find_element_by_xpath('//html/body/div/label[3]')\r\nhellfire_btn = driver.find_element_by_xpath('//html/body/div/label[4]')\r\n\r\n# members\r\nall_members = driver.find_elements_by_xpath('//ul/li')\r\n# original = []\r\n# force = []\r\n# factor = []\r\n# hellfire = []\r\n\r\n# which member belongs to which groups\r\n# for i in range(16):\r\n# print(all_members[i].get_attribute(\"data-teams\"))\r\n\r\n# putting members to groups\r\n''''\r\n== nem jó mert van aki mind a 4 csoporthoz tartozik, olyan vizsgálat kellene, \r\nami azt nézi hogy pl. \"original\" és bármi egyéb található ott, nem tudom van-e ilyen\r\nmint pl SQLnél ha jól emlékszem $original$\r\n'''\r\n# def selector():\r\n# for i in range(16):\r\n# if all_members[i].get_attribute(\"data-teams\") == original:\r\n# original.append(all_members[i].text)\r\n# elif all_members[i].get_attribute(\"data-teams\") == force:\r\n# force.append(all_members[i].text)\r\n# elif all_members[i].get_attribute(\"data-teams\") == factor:\r\n# factor.append(all_members[i].text)\r\n# else all_members[i].get_attribute(\"data-teams\") == hellfire:\r\n# hellfire.append(all_members[i].text)\r\n\r\n\r\n# print(original)\r\n# print(force)\r\n# print(factor)\r\n# print(hellfire)\r\n\r\n\r\n# trying to find a difference between members and non members\r\n# angel = driver.find_element_by_id('angel')\r\n# print(angel.text)\r\n# emma_frost = driver.find_element_by_id('emma-frost')\r\n# print(emma_frost.text)\r\n# beast = driver.find_element_by_id('beast')\r\n# print(beast.text)\r\n# print(angel.value_of_css_property(\"opacity\"))\r\n# print(angel.value_of_css_property(\"transform\"))\r\n# print(angel.value_of_css_property(\"visibility\"))\r\n# print(emma_frost.value_of_css_property(\"opacity\"))\r\n# print(emma_frost.value_of_css_property(\"transform\"))\r\n# print(emma_frost.value_of_css_property(\"visibility\"))\r\n# hellfire.click()\r\n# time.sleep(2)\r\n# print(angel.text)\r\n# print(emma_frost.text)\r\n# print(beast.text)\r\n\r\n# assert the difference between the groups\r\n# hellfire_btn.click()\r\n# for j in hellfire:\r\n# assert (driver.find_element_by_id(j).value_of_css_property(\"size\")) == ('100x100')\r\n\r\ndriver.close()\r\n","sub_path":"testproject/mutants.py","file_name":"mutants.py","file_ext":"py","file_size_in_byte":2698,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"644135597","text":"from dataclasses import dataclass\nfrom typing import Optional, Union\n\nimport torch\n\nfrom detectron2.structures import ImageList\nfrom transformers import PreTrainedTokenizerBase\nfrom transformers.file_utils import PaddingStrategy\n\n\n@dataclass\nclass DataCollatorForKeyValueExtraction:\n \"\"\"\n Data collator that will dynamically pad the inputs received, as well as the labels.\n\n Args:\n tokenizer (:class:`~transformers.PreTrainedTokenizer` or :class:`~transformers.PreTrainedTokenizerFast`):\n The tokenizer used for encoding the data.\n padding (:obj:`bool`, :obj:`str` or :class:`~transformers.file_utils.PaddingStrategy`, `optional`, defaults to :obj:`True`):\n Select a strategy to pad the returned sequences (according to the model's padding side and padding index)\n among:\n\n * :obj:`True` or :obj:`'longest'`: Pad to the longest sequence in the batch (or no padding if only a single\n sequence if provided).\n * :obj:`'max_length'`: Pad to a maximum length specified with the argument :obj:`max_length` or to the\n maximum acceptable input length for the model if that argument is not provided.\n * :obj:`False` or :obj:`'do_not_pad'` (default): No padding (i.e., can output a batch with sequences of\n different lengths).\n max_length (:obj:`int`, `optional`):\n Maximum length of the returned list and optionally padding length (see above).\n pad_to_multiple_of (:obj:`int`, `optional`):\n If set will pad the sequence to a multiple of the provided value.\n\n This is especially useful to enable the use of Tensor Cores on NVIDIA hardware with compute capability >=\n 7.5 (Volta).\n label_pad_token_id (:obj:`int`, `optional`, defaults to -100):\n The id to use when padding the labels (-100 will be automatically ignore by PyTorch loss functions).\n \"\"\"\n\n tokenizer: PreTrainedTokenizerBase\n padding: Union[bool, str, PaddingStrategy] = True\n max_length: Optional[int] = None\n pad_to_multiple_of: Optional[int] = None\n label_pad_token_id: int = -100\n\n def __call__(self, features):\n label_name = \"label\" if \"label\" in features[0].keys() else \"labels\"\n labels = [feature[label_name] for feature in features] if label_name in features[0].keys() else None\n\n has_image_input = \"image\" in features[0]\n has_bbox_input = \"bbox\" in features[0]\n if has_image_input:\n image = ImageList.from_tensors([torch.tensor(feature[\"image\"]) for feature in features], 32)\n for feature in features:\n del feature[\"image\"]\n batch = self.tokenizer.pad(\n features,\n padding=self.padding,\n max_length=self.max_length,\n pad_to_multiple_of=self.pad_to_multiple_of,\n # Conversion to tensors will fail if we have labels as they are not of the same length yet.\n return_tensors=\"pt\" if labels is None else None,\n )\n\n if labels is None:\n return batch\n\n sequence_length = torch.tensor(batch[\"input_ids\"]).shape[1]\n padding_side = self.tokenizer.padding_side\n if padding_side == \"right\":\n batch[\"labels\"] = [label + [self.label_pad_token_id] * (sequence_length - len(label)) for label in labels]\n if has_bbox_input:\n batch[\"bbox\"] = [bbox + [[0, 0, 0, 0]] * (sequence_length - len(bbox)) for bbox in batch[\"bbox\"]]\n else:\n batch[\"labels\"] = [[self.label_pad_token_id] * (sequence_length - len(label)) + label for label in labels]\n if has_bbox_input:\n batch[\"bbox\"] = [[[0, 0, 0, 0]] * (sequence_length - len(bbox)) + bbox for bbox in batch[\"bbox\"]]\n\n batch = {k: torch.tensor(v, dtype=torch.int64) if isinstance(v[0], list) else v for k, v in batch.items()}\n if has_image_input:\n batch[\"image\"] = image\n return batch\n","sub_path":"xdoc/fine_tuning/funsd/layoutlmft/data/data_collator.py","file_name":"data_collator.py","file_ext":"py","file_size_in_byte":3997,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"43491526","text":"#!/usr/bin/env python\nimport rospy\n\nfrom std_msgs.msg import Float64\nfrom std_msgs.msg import Int32\nfrom sensor_msgs.msg import Imu\nfrom tf.transformations import euler_from_quaternion, quaternion_from_euler\nimport math\nfrom nav_msgs.msg import Odometry\nfrom geometry_msgs.msg import Pose,Quaternion\nfrom sensor_msgs.msg import NavSatFix\nfrom custom_msg.msg import g2g_x_z\n\nmsg = g2g_x_z()\nlacation_in_x_y=g2g_x_z() #assume z as y here\nclass gps_point:\n lat=0.0\n lon=0.0\n theta=0.0\n name=\"map\"\np=gps_point()\nroll = pitch = yaw = 0.0\nflag = 0\n[x,y,angle_diff,distance]=[0,0,0,0]\n\ndef current_location(msg):\n \n p.lat=msg.latitude\n p.lon=msg.longitude\n print(\"current latitude :\",p.lat,\"current longitude:\",p.lon)\n\ndef get_imu_angle (msg):\n global roll, pitch, yaw\n orientation_q = msg.orientation\n orientation_list = [orientation_q.x, orientation_q.y, orientation_q.z, orientation_q.w]\n (roll, pitch, yaw) = euler_from_quaternion (orientation_list)\n print(\"yaw in radian:\",yaw)\n yaw=yaw*180/math.pi\n if yaw<0:\n\t yaw=yaw+360\n print(\"yaw in degree:\",yaw)\n \n\ndef fnc1(gps_pose,args):\n global x,y,gps_angle,yaw,distance,angle_diff\n lacation_in_x_y.x=-gps_pose.position.x\n lacation_in_x_y.z=-gps_pose.position.y\n loaction_in_x_y_pub.publish(lacation_in_x_y)\n x=gps_pose.position.x\n x=-x\n print(\"x:\",x)\n y=gps_pose.position.y\n y=-y\n print(\"y:\",y)\n gps_angle= (math.atan(abs(y)/abs(x)))*180/math.pi\n\n if x>0 and y>0:\n\t gps_angle =gps_angle-90\n elif x>0 and y<0:\n gps_angle = -(90+gps_angle)\n elif x<0 and y>0:\n\t gps_angle=90-gps_angle\n else:\n\t gps_angle =90+gps_angle\n \n print(\"gps_angle:\",gps_angle)\n\n angle_diff= gps_angle-yaw\n if angle_diff >180:\n\t angle_diff -= 360\n elif angle_diff <-180:\n\t angle_diff +=360\n print(\"angle_diff:\",angle_diff)\n \n distance=math.sqrt(math.pow(x,2)+math.pow(y,2))\n print(\"distance:\",distance)\n\n atnms(args[0],args[1])\ndef forward():\n\tmsg.x = 0.7\n\tmsg.z = 0\n\ndef right_turn(): \n\tmsg.x = 0\n\tmsg.z = -0.2\n\t\ndef left_turn():\n\tmsg.x = 0\n\tmsg.z = 0.2\n\ndef stop():\n\tmsg.x = 0\n\tmsg.z = 0\n\ndef atnms(g2g_x_z_pub,flag_pub):\n\tglobal angle_diff,flag\n\tif distance < 1 :\n\t\tstop()\n\t\tflag=flag+1\n\t\tprint(\"stop\")\n\telif angle_diff>0 and abs(angle_diff)>5:\n\t\tleft_turn()\n\t\tprint(\"left turn\")\t\t\t\n\telif angle_diff<0 and abs(angle_diff)>5:\n\t\tright_turn()\n\t\tprint(\"right turn\")\n\telif abs(angle_diff)<5:\n\t\tforward()\n\t\tprint(\"going forward\")\n\tg2g_x_z_pub.publish(msg)\n\tflag_pub.publish(flag)\n\t\nrospy.init_node('g2g_x_z', anonymous=True)\ng2g_x_z_pub = rospy.Publisher('/g2g_x_z_topic', g2g_x_z, queue_size=9)\nflag_pub = rospy.Publisher('/flag_topic_for_mutilple_goal', Int32,latch=True,queue_size=10)\nsub=rospy.Subscriber('/gps_fix',NavSatFix,current_location)\t\nimu_data = rospy.Subscriber('/imu/data',Imu,get_imu_angle)\ngps_data = rospy.Subscriber('/distance',Pose,fnc1,(g2g_x_z_pub,flag_pub))\nloaction_in_x_y_pub = rospy.Publisher('/location_in_x_y', g2g_x_z, queue_size=10)\nrospy.spin()\n","sub_path":"envision/src/bot/src/g2g_for_obavoid.py","file_name":"g2g_for_obavoid.py","file_ext":"py","file_size_in_byte":3038,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"134343759","text":"from flask import Flask, request, render_template, redirect, url_for, session\nfrom models import db, Link, User, Follow\nfrom forms import LinkForm, SignupForm, LoginForm, InviteForm\n\napp = Flask(__name__)\n\n@app.route(\"/\", methods=['GET', 'POST'])\ndef index():\n if 'email' not in session:\n return redirect(url_for('login'))\n\n link_form = LinkForm()\n invite_form = InviteForm()\n\n email = session['email']\n \n if request.method == 'POST':\n link = link_form.link.data\n title = link\n \n new_link = Link(user_email=email, url=link, title=title)\n db.session.add(new_link)\n db.session.commit()\n\n # notify followers that a new link has been posted\n\n return redirect(url_for('index'))\n \n else:\n # get and display all pending friend requests\n requests = Follow.query.filter_by(to_user_email=email, mutual=False).all()\n\n # gather all my links\n links = Link.query.filter_by(user_email=email).all()\n \n # gather links of people i follow where relationship is mutual, and add to links\n following = Follow.query.filter_by(from_user_email=email, mutual=True).all()\n for f in following:\n following_links = Link.query.filter_by(user_email=f.to_user_email).all()\n links.extend(following_links)\n \n # gather links of people following me where relationship is mutual, and add to links\n followers = Follow.query.filter_by(to_user_email=email, mutual=True).all()\n for f in followers:\n follower_links = Link.query.filter_by(user_email=f.from_user_email).all()\n links.extend(follower_links)\n\n\n return render_template('index.html', email=email, links=links, link_form=link_form, invite_form=invite_form, requests=requests)\n\n@app.route(\"/signup\", methods=['GET', 'POST'])\ndef signup():\n if 'email' in session:\n return redirect(url_for('index'))\n\n form = SignupForm()\n\n if request.method == 'POST':\n if form.validate() == False:\n return redirect(url_for('signup'))\n else:\n new_user = User(form.first_name.data, form.last_name.data, form.email.data, form.password.data)\n db.session.add(new_user)\n db.session.commit()\n\n session['email'] = new_user.email\n return redirect(url_for('index'))\n \n else:\n return render_template('signup.html', form=form)\n\n@app.route(\"/login\", methods=['GET', 'POST'])\ndef login():\n if 'email' in session:\n return redirect(url_for('index'))\n\n form = LoginForm()\n\n if request.method == 'POST':\n if form.validate() == False:\n return redirect(url_for('login'))\n else:\n email = form.email.data\n password = form.password.data\n\n user = User.query.filter_by(email=email).first()\n if user is not None and user.check_password(password):\n session['email'] = form.email.data\n return redirect(url_for('index'))\n else:\n return redirect(url_for('login'))\n \n else:\n return render_template('login.html', form=form)\n\n@app.route(\"/logout\")\ndef logout():\n session.pop('email', None)\n return redirect(url_for('login'))\n\n@app.route(\"/invite\", methods=['GET', 'POST'])\ndef invite():\n if 'email' not in session:\n return redirect(url_for('login'))\n\n if request.method == 'POST':\n form = InviteForm()\n if form.validate() == False:\n return redirect(url_for('index'))\n else:\n from_user_email = session['email']\n to_user_email = form.email.data\n\n new_follow = Follow(from_user_email=from_user_email, to_user_email=to_user_email, mutual=False)\n db.session.add(new_follow)\n db.session.commit()\n\n # send email to to_user_email that they've been invited to see the links i want to share with them\n\n return redirect(url_for('index'))\n \n else:\n return redirect(url_for('index'))\n\n@app.route(\"/accept/\")\ndef accept(from_user_email):\n\n if 'email' not in session:\n return redirect(url_for('login'))\n\n if from_user_email:\n follow = Follow.query.filter_by(from_user_email=from_user_email, to_user_email=session['email']).first()\n follow.mutual = True\n db.session.commit()\n\n return redirect(url_for('index'))\n\nif __name__ == \"__main__\":\n app.run(debug=True)","sub_path":"web/routes.py","file_name":"routes.py","file_ext":"py","file_size_in_byte":4115,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"141205310","text":"import numpy as np\nimport tensorflow as tf\nimport matplotlib.pyplot as plt\n\nimport time\nfrom sklearn.utils import shuffle\nimport pickle\nfrom datetime import datetime\nimport os\n\n\n\n\n\n\ndef W_variable(name,shape,type=\"fc\"):\n \"\"\"\"Returns convolutional layer weight. If the name already exists it just retrieves it\n type= \"conv\" or \"fc\"\n \"\"\"\n if type==\"conv\":\n w=tf.get_variable(name=name,shape=shape,dtype=tf.float32,initializer=tf.contrib.layers.xavier_initializer_conv2d(dtype=tf.float32))\n elif type==\"fc\":\n w= tf.get_variable(name=name,shape=shape,dtype=tf.float32,initializer=tf.contrib.layers.xavier_initializer(dtype=tf.float32))\n return w\n\n\ndef b_variable(shape):\n \"\"\"\"Returns a bias variable\n \"\"\"\n return tf.Variable(tf.constant(0.0,shape=shape))\n\n\nclass Conv_pool_layer:\n\n def __init__(self,\n layer_name, #layer name\n size, #size (w,h,num_input_channel,num_output_channel)\n pooling_size, #max pooling layer size (w,h). \"None\" removes pooling layer\n activation, #options: \"sigmoid\", \"relu\", \"None\"\n ):\n\n self.layer_name=layer_name\n self.size=size\n self.pooling_size=pooling_size\n self.activation=activation\n\n\n #create weight\n b_shape=[size[-1]]\n self.w=tf.get_variable(name=layer_name,shape=size,dtype=tf.float32,initializer=tf.contrib.layers.xavier_initializer_conv2d(dtype=tf.float32))\n self.b=tf.get_variable(name=layer_name+'_b',dtype=tf.float32,initializer=np.zeros(shape=b_shape,dtype=np.float32))\n\n\n def forward(self,\n input,\n dropout_keep_prob=None # dropout keep probablity (None: no dropout)\n ):\n #output operation\n output=tf.nn.conv2d(input=input,\n filter=self.w,\n strides=[1,1,1,1],\n padding=\"SAME\")\n\n #apply bias\n output+=self.b\n\n #activation\n if self.activation == \"relu\":\n output = tf.nn.relu(output)\n elif self.activation == \"sigmoid\":\n output = tf.nn.sigmoid(output)\n\n\n #pooling (down-sampling)\n if self.pooling_size is not None:\n output=tf.nn.max_pool(value=output,\n ksize=[1,self.pooling_size[0],self.pooling_size[1],1],\n strides=[1,self.pooling_size[0],self.pooling_size[1],1],\n padding=\"SAME\")\n if dropout_keep_prob is not None:\n output=tf.nn.dropout(output,keep_prob=dropout_keep_prob)\n\n return output\n\n\n\n\n\nclass Fully_conn_layer:\n\n def __init__(self,\n layer_name, #layer name\n size, #size (num_inputs,num_outputs)\n activation=\"relu\", #options: \"sigmoid\", \"relu\", \"None\"\n ):\n\n self.layer_name=layer_name\n self.size=size\n self.activation=activation\n\n\n\n #create weight\n b_shape=[size[-1]]\n self.w= tf.get_variable(name=layer_name,shape=size,dtype=tf.float32,initializer=tf.contrib.layers.xavier_initializer(dtype=tf.float32))\n self.b=tf.get_variable(name=layer_name+'_b',shape=b_shape,dtype=tf.float32,initializer=tf.constant(0.0,shape=b_shape))\n\n\n\n def forward(self,\n input,\n dropout_keep_prob = None # dropout keep probablity (None: no dropout)\n ):\n\n #output operation\n output=tf.matmul(input,self.w)\n\n output+=self.b\n\n\n #pooling (down-sampling)\n if self.activation==\"relu\":\n output=tf.nn.relu(output)\n elif self.activation==\"sigmoid\":\n output=tf.nn.sigmoid(output)\n\n\n if dropout_keep_prob is not None:\n output=tf.nn.dropout(output,keep_prob=dropout_keep_prob)\n\n return output\n\n\n\n\n\n\n\nclass CNN:\n\n def __init__(self,\n conv_layers_size,\n max_pooling_layers_size,\n conv_dropout_keep_prob,\n conv_activation,\n hidden_layers_size,\n hidden_activation,\n hidden_dropout_keep_prob):\n\n self.conv_layers_size=conv_layers_size\n self.max_pooling_layers_size=max_pooling_layers_size\n self.hidden_layers_size=hidden_layers_size\n self.conv_activation=conv_activation\n self.conv_dropout_keep_prob=conv_dropout_keep_prob\n self.hidden_activation=hidden_activation\n self.hidden_dropout_keep_prob=hidden_dropout_keep_prob\n\n\n\n\n def _create_layers(self,\n num_input_channel, #input image number of channels\n num_classes #numer of classification classes\n ):\n\n # create conv layers\n self.conv_pool_layers = []\n\n for i, layer_sz in enumerate(self.conv_layers_size):\n layer = Conv_pool_layer(layer_name='conv' + str(i),\n size=[layer_sz[0], layer_sz[1], num_input_channel, layer_sz[-1]],\n pooling_size=self.max_pooling_layers_size[i],\n activation=self.conv_activation[i],\n )\n\n self.conv_pool_layers.append(layer)\n num_input_channel = layer_sz[-1]\n\n # create hidden layers\n num_inputs = np.prod(self.conv_layers_size[-1])\n self.fc_layers = []\n for i, layer_sz in enumerate([self.hidden_layers_size, num_classes]):\n layer = Fully_conn_layer(layer_name='hd' + str(i),\n size=[num_inputs, layer_sz],\n activation=self.hidden_activation[i],\n )\n\n self.fc_layers.append(layer)\n num_inputs = layer_sz\n\n def _forward(self,X,conv_dropout_keep_prob,hidden_dropout_keep_prob):\n\n output=X\n\n #conv_layers\n for i,layer in enumerate(self.conv_pool_layers):\n output=layer.forward(output,conv_dropout_keep_prob[i])\n\n if self.conv_dropout_keep_prob[i] is not None:\n output = tf.nn.dropout(output, keep_prob=self.conv_dropout_keep_prob[i])\n\n #flatten the output\n output=self._flatten(output)\n\n #fc layers\n for i,layer in enumerate(self.fc_layers):\n output=layer.forward(output,[hidden_dropout_keep_prob,])\n if self.hidden_dropout_keep_prob[i] is not None:\n output = tf.nn.dropout(output, keep_prob=self.hidden_dropout_keep_prob[i])\n\n return output\n\n\n\n\n\n\n def _flatten(self,input):\n \"\"\"This function flattens the convolutional leyrs output to feed to fully-connected layer\n \"\"\"\n\n # find the shape\n shape = input.get_shape()\n\n num_features = np.prod(shape[1:])\n\n # reshape\n output = tf.reshape(input, [-1, num_features])\n\n return output, num_features\n\n\n\n def fit(self,X,Y_ind,X_val,Y_ind_val,num_iterations,batch_sz,print_period):\n\n #get input and output dimensions\n N,width,height,num_input_channel=X.shape\n _,num_classes=Y_ind.shape\n\n #create layers\n self._create_layers(num_input_channel=num_input_channel,\n num_classes=num_classes\n )\n\n # define placeholders\n x = tf.placeholder(tf.float32, shape=(None, width, height, num_input_channel), name='x')\n y_ind = tf.placeholder(tf.float32, shape=(None, num_classes), name='y')\n # y_ind = tf.cast(tf.argmax(y, dimension=1), dtype=tf.float32)\n # keep = tf.placeholder(tf.float32, name='keep')\n\n\n # model output\n logits = self._forward(X=x,conv_dropout_keep_prob=self.conv_dropout_keep_prob,hidden_dropout_keep_prob=self.hidden_dropout_keep_prob)\n\n # training optimization operation with decaying learning rate\n cost=tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=logits, labels=y))\n global_step = tf.Variable(0)\n learning_rate = tf.train.exponential_decay(0.05, global_step, 10000, 0.96, staircase=True)\n train_op = tf.train.AdagradOptimizer(learning_rate).minimize(cost, global_step=global_step)\n\n # accuracy calculation operation\n logits_predict = self._forward(X=x, conv_dropout_keep_prob=len(self.conv_dropout_keep_prob)*[None],hidden_dropout_keep_prob=len(self.hidden_dropout_keep_prob)*[None])\n Y_ind_prediction = tf.cast(tf.argmax(logits_predict, dimension=1),tf.float32)\n\n accuracy_op = tf.reduce_mean(tf.cast(tf.equal(Y_ind_prediction, y_ind), tf.float32))\n\n\n\n session = tf.Session()\n session.run(tf.initialize_all_variables())\n\n\n\n for i in range(num_iterations):\n\n # set batch data\n offset = (i * batch_sz) % (X.shape[0] - batch_sz)\n X_batch = X[offset:(offset + batch_sz), :, :, :]\n Y_ind_batch = Y_ind[offset:(offset + batch_sz), :]\n\n # train\n session.run(train_op, feed_dict={x: X_batch, y_ind: Y_ind_batch})\n\n if i % print_period == 0:\n batch_acc = session.run(accuracy_op, feed_dict={x: X_batch, y_ind: Y_ind_batch})\n print('batch accuracy at step %d:%.4f' % (i, batch_acc))\n\n val_acc = session.run(accuracy_op, feed_dict={x: X_val, y_ind: Y_ind_val})\n print('validation accuracy at step %d:%.4f' % (i, val_acc))\n\n # save session\n tf.train.Saver().save(session, 'saved_sessions/', global_step=global_step)\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\ndef main():\n\n\n #load data\n Xtrain, Ytrain, Ytrain_ind = pickle.load(open(\"../facial_recog_data/train_data.p\", \"rb\"))\n Xtest, Ytest, Ytest_ind = pickle.load(open(\"../facial_recog_data/test_data.p\", \"rb\"))\n Xval, Yval, Yval_ind = pickle.load(open(\"../facial_recog_data/val_data.p\", \"rb\"))\n\n\n\n\n\n model=CNN(conv_layers_size=[(5,5,20),(5,5,40)],\n max_pooling_layers_size=[(2,2),None],\n conv_dropout_keep_prob=[0.5, 1.0],\n conv_activation=['relu','relu'],\n hidden_layers_size=[256,50],\n hidden_activation=['relu','relu'],\n hidden_dropout_keep_prob=[1.0,1.0])\n\n model.fit(Xtrain,Ytrain_ind,Xval,Yval_ind,5000,100,10)\n\n\n\n\n\n\n\n\n\n\nif __name__==\"__main__\":\n main()\n\n\n\n\n\n","sub_path":"CNN_tensorflow_facial_recognition.py","file_name":"CNN_tensorflow_facial_recognition.py","file_ext":"py","file_size_in_byte":10572,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"600704266","text":"import requests\nfrom datetime import datetime\nimport time\nfrom lxml import etree\nfrom lxml.html import html5parser, tostring\nimport json\nfrom constants import *\nimport pprint\n\n\nglobal apps\n\ndef get_raw_data(link):\n retry_count = 0\n connected = False\n while not connected:\n try:\n get = requests.get(link)\n connected = True\n except Exception as err:\n retry_count+=1\n wait_time = 2**(retry_count)\n print(\"connection problem->retry attempt {0}->wait time {1}secs\".format(retry_count,wait_time))\n time.sleep(wait_time)\n \n html = get.text\n html = tostring(html5parser.fromstring(html))\n #debugging\n #if (link[:len(FROM_THE_DEV_PREFIX)]!=FROM_THE_DEV_PREFIX):\n # with open(link[len(PLAY_STORE_PREFIX):]+\".html\",mode='w+') as html_file:\n # html_file.write(html)\n #data = etree.HTML(html)\n data = etree.HTML(html)\n return data\n\ndef get_app_links():\n app_links = []\n link = \"https://play.google.com/store/apps/dev?id=5700313618786177705\"\n data = get_raw_data(link)\n scroll_info = data.xpath(SCROLL_APPS_INFO)\n pagTok = scroll_info[0].attrib['data-load-more-first-continuation-token']\n sp = scroll_info[0].attrib['data-load-more-suggest-params']\n retry_count = 0\n connected = False\n\n link = \"https://play.google.com/store/xhr/searchcontent?authuser=0\"\n #getting advancement tokens\n headers = {\"content-Type\":\"application/x-www-form-urlencoded;charset=UTF-8\",\"Origin\":\"https://play.google.com\", \"Referer\":\"https://play.google.com/store/apps/dev?id=5700313618786177705\",\n\"User-Agent\":\"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10_4) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/44.0.2403.125 Safari/537.36\"}\n \n request_data = \"pageNum={0}&sp={1}&pagTok={2}&xhr=1\"\n\n\n for i in range(1,11):\n connected = False\n while not connected:\n req_data = request_data.format(str(i),sp,pagTok)\n try:\n get = requests.post(link,data=req_data,headers=headers)\n connected = True\n except Exception as err:\n retry_count+=1\n wait_time = 2**(retry_count)\n print(\"connection problem->retry attempt {0}->wait time {1}secs\".format(retry_count,wait_time))\n time.sleep(wait_time)\n\n html = get.text\n html = html.encode('utf-8')\n html = html.decode('unicode-escape')\n\n pagTok = get.text[-26:-4]\n data = etree.HTML(html)\n scroll_info = data.xpath(EXPANDED_APPS_INFO)\n\n for j in scroll_info:\n if (j.attrib['href'] not in app_links):\n app_links.append(j.attrib['href'])\n print(j.attrib['href'])\n return app_links\n\ndef get_app_info(link):\n data = get_raw_data(ORIGIN+link)\n \n print(\"processing: \"+link)\n app_id = link[link.index('=')+1:]\n\n app_name = data.xpath(APP_NAME)\n app_category = data.xpath(APP_CATEGORY)\n app_dev = data.xpath(APP_DEV)\n app_top_dev = data.xpath(APP_TOP_DEV)\n dev_url = data.xpath(DEV_URL)\n app_publish_date = data.xpath(APP_PUBLISH_DATE)\n app_price = data.xpath(APP_PRICE)\n app_description = data.xpath(APP_DESCRIPTION)\n app_description_extra = data.xpath(APP_DESCRIPTION_EXTRA)\n app_five_stars = data.xpath(APP_FIVE_STARS)\n app_four_stars = data.xpath(APP_FOUR_STARS)\n app_three_stars = data.xpath(APP_THREE_STARS)\n app_two_stars = data.xpath(APP_TWO_STARS)\n app_one_stars = data.xpath(APP_ONE_STARS)\n app_update_date = data.xpath(APP_UPDATE_DATE)\n app_size = data.xpath(APP_SIZE)\n app_version = data.xpath(APP_VERSION)\n app_installs = data.xpath(APP_INSTALLS)\n app_content_rating = data.xpath(APP_CONTENT_RATING)\n app_os_required = data.xpath(APP_OS_REQUIRED)\n in_app_purchase = data.xpath(IN_APP_PURCHASE)\n developer_urls = data.xpath(DEVELOPER_URLS)\n physical_address = data.xpath(PHYSICAL_ADDRESS) \n app_whats_new = data.xpath(WHATS_NEW)\n \n \n update_field(app_id,'name',app_name)\n update_field(app_id,'category', app_category)\n update_field(app_id,'dev', app_dev)\n update_field(app_id,'publish_date',app_publish_date)\n update_field(app_id,'price',app_price)\n\n \n \n update_field(app_id,'description',app_description)\n description_extra = ''\n for i in app_description_extra:\n description_extra += str(i) + '\\n'\n update_field(app_id,'description_extra', description_extra)\n update_field(app_id,'five_stars', app_five_stars)\n update_field(app_id,'four_stars', app_four_stars)\n update_field(app_id,'three_stars', app_three_stars)\n update_field(app_id,'two_stars', app_two_stars)\n update_field(app_id,'one_stars', app_one_stars)\n update_field(app_id,'update_date', app_update_date)\n update_field(app_id,'size',app_size)\n update_field(app_id,'version',app_version)\n update_field(app_id,'installs',app_installs)\n update_field(app_id,'content_rating',app_content_rating)\n update_field(app_id,'os_required',app_os_required)\n update_field(app_id,'in_app_purchase',in_app_purchase)\n update_field(app_id,'developer_urls',developer_urls)\n update_field(app_id,'physical_address',physical_address)\n update_field(app_id,'whats new',app_whats_new)\n\ndef update_field(app_id,field,data):\n global apps\n\n\n if len(data) == 1:\n data = data[0]\n elif len(data) == 0:\n data = None\n else:\n data = data\n\n if app_id not in apps.keys():\n apps[app_id] = {}\n\n if field not in apps[app_id].keys():\n apps[app_id][field] = [(data,str(datetime.utcnow()))]\n elif data != apps[app_id][field][-1][0]:\n apps[app_id][field].append((data,str(datetime.utcnow())))\n\n\n\nif __name__ == '__main__':\n global apps\n try:\n print(\"grabbing data from json file...\")\n time.sleep(3)\n with open('data.json') as data_file: \n apps = json.load(data_file)\n print(\"data loaded!\")\n except:\n print(\"no previous data found or the data is invalid\\n creating new data...\")\n time.sleep(3)\n apps = {}\n app_links = get_app_links()\n for app in app_links:\n get_app_info(app)\n\n print(\"done collecting data!\\nwriting to file...\")\n with open('data.json',mode='w') as data_file:\n json.dump(apps,data_file,indent=2)\n\n\n\n","sub_path":"google_app_tracker.py","file_name":"google_app_tracker.py","file_ext":"py","file_size_in_byte":6367,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"572002017","text":"from tests.integration.fixtures import long_table\nimport pytest\n\nfrom pandas.testing import assert_frame_equal\n\nimport pandas as pd\nimport dask.dataframe as dd\n\n\ndef test_sort(c, user_table_1):\n df = c.sql(\n \"\"\"\n SELECT\n *\n FROM user_table_1\n ORDER BY b, user_id DESC\n \"\"\"\n )\n df = df.compute().reset_index(drop=True)\n df_expected = user_table_1.sort_values(\n [\"b\", \"user_id\"], ascending=[True, False]\n ).reset_index(drop=True)\n\n assert_frame_equal(df, df_expected)\n\n\ndef test_sort_by_alias(c, user_table_1):\n df = c.sql(\n \"\"\"\n SELECT\n b AS my_column\n FROM user_table_1\n ORDER BY my_column, user_id DESC\n \"\"\"\n )\n df = df.compute().reset_index(drop=True).rename(columns={\"my_column\": \"b\"})\n df_expected = user_table_1.sort_values(\n [\"b\", \"user_id\"], ascending=[True, False]\n ).reset_index(drop=True)[[\"b\"]]\n\n assert_frame_equal(df, df_expected)\n\n\ndef test_sort_with_nan(c):\n with pytest.raises(ValueError):\n c.sql(\n \"\"\"\n SELECT\n *\n FROM user_table_nan\n ORDER BY c\n \"\"\"\n )\n\n with pytest.raises(ValueError):\n c.sql(\n \"\"\"\n SELECT\n *\n FROM user_table_inf\n ORDER BY c\n \"\"\"\n )\n\n\ndef test_sort_strings(c):\n string_table = pd.DataFrame({\"a\": [\"zzhsd\", \"öfjdf\", \"baba\"]})\n c.create_table(\"string_table\", string_table)\n\n df = c.sql(\n \"\"\"\n SELECT\n *\n FROM string_table\n ORDER BY a\n \"\"\"\n )\n df = df.compute().reset_index(drop=True)\n df_expected = string_table.sort_values([\"a\"], ascending=True).reset_index(drop=True)\n\n assert_frame_equal(df, df_expected)\n\n\ndef test_sort_not_allowed(c):\n # No DESC implemented for the first column\n with pytest.raises(NotImplementedError):\n c.sql(\"SELECT * FROM user_table_1 ORDER BY b DESC\")\n\n # Wrong column\n with pytest.raises(Exception):\n c.sql(\"SELECT * FROM user_table_1 ORDER BY 42\")\n\n\ndef test_limit(c, long_table):\n df = c.sql(\"SELECT * FROM long_table LIMIT 101\")\n df = df.compute()\n\n assert_frame_equal(df, long_table.iloc[:101])\n\n df = c.sql(\"SELECT * FROM long_table LIMIT 100\")\n df = df.compute()\n\n assert_frame_equal(df, long_table.iloc[:100])\n\n df = c.sql(\"SELECT * FROM long_table LIMIT 100 OFFSET 99\")\n df = df.compute()\n\n assert_frame_equal(df, long_table.iloc[99 : 99 + 100])\n\n df = c.sql(\"SELECT * FROM long_table LIMIT 100 OFFSET 100\")\n df = df.compute()\n\n assert_frame_equal(df, long_table.iloc[100 : 100 + 100])\n\n df = c.sql(\"SELECT * FROM long_table LIMIT 101 OFFSET 101\")\n df = df.compute()\n\n assert_frame_equal(df, long_table.iloc[101 : 101 + 101])\n","sub_path":"tests/integration/test_sort.py","file_name":"test_sort.py","file_ext":"py","file_size_in_byte":2752,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"38288006","text":"import pygame as py\nfrom credits_screen import CreditsScreen\nfrom match_end_screen import MatchEndScreen\nfrom menu_screen import MenuScreen\nfrom play_screen import PlayScreen\n\n\nclass ScreensManager:\n \"\"\"\n SCREENS ID\n 0 -> Menu\n 1 -> Play\n 2 -> Credits\n 3 -> Match end\n \"\"\"\n\n def __init__(self):\n # Default screen - menu - 0\n self.current_screen = 0\n self.menu = MenuScreen()\n self.play = PlayScreen()\n self.matchEnd = MatchEndScreen()\n self.creditsScreen = CreditsScreen()\n\n def update(self):\n screens_dict = {'0': self.menu.update(self), '1': self.play.update(self),\n '2': self.creditsScreen.update(self), '3': self.matchEnd.update(self)}\n\n current_screen = str(self.current_screen)\n # print(current_screen)\n if current_screen in screens_dict.keys():\n chosen = screens_dict[current_screen]\n return chosen\n\n def render(self, screen):\n \"\"\"Show screen, depending from chosen state\"\"\"\n if self.current_screen == 0:\n self.menu.render(screen)\n\n elif self.current_screen == 1:\n self.play.render(screen)\n\n elif self.current_screen == 2:\n self.creditsScreen.render(screen)\n\n elif self.current_screen == 3:\n self.matchEnd.render(screen, self.play)\n","sub_path":"screens_manager.py","file_name":"screens_manager.py","file_ext":"py","file_size_in_byte":1363,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"340601005","text":"#!/usr/bin/env python3\n\nimport os\nimport pytz\nimport pandas\nimport argparse\nimport numpy as np\nfrom datetime import datetime\nimport matplotlib.pyplot as plt\n\n\ndef load_data(workingdir, pickle_file='activity.pkl'):\n\n # load the activity data\n path = os.path.join(workingdir, pickle_file)\n df = pandas.read_pickle(path)\n\n # convert dates\n df['date'] = pandas.to_datetime(df.session_timestamp) \\\n .dt.normalize()\n\n # add another date column and make it the index\n df['Date'] = df['date']\n\n # change the index to timestamp\n df.set_index(['Date'], inplace=True)\n\n return df\n\n\ndef subset_by_date(dat, st, et):\n\n if type(dat) == pandas.DataFrame:\n\n # select dates between start/end range\n mask = (dat.date >= st) & (dat.date <= et)\n dat = dat.loc[mask]\n return dat\n\n elif type(dat) == pandas.Series:\n\n # select dates between start/end range\n mask = (dat.index >= st) & (dat.index <= et)\n return dat.loc[mask]\n\n\ndef downloads_by_type(working_dir, st, et, drop_cols,\n figtitle,\n filename='hydroshare-downloads-by-types.png'):\n\n # load the data based on working directory\n df = load_data(working_dir)\n df = df.sort_index()\n df = df[df.action == 'download']\n df = subset_by_date(df, st, et)\n df = df[~df.user_type.isnull()]\n\n df = df.filter(items=['user_type'])\n user_types = ['Unspecified',\n 'Post-Doctoral Fellow',\n 'Commercial/Professional',\n 'University Faculty',\n 'Government Official',\n 'University Graduate Student',\n 'Professional',\n 'University Professional or Research Staff',\n 'Local Government',\n 'University Undergraduate Student',\n 'School Student Kindergarten to 12th Grade',\n 'School Teacher Kindergarten to 12th Grade',\n 'Other'\n ]\n\n # count number of users for each type\n for u in user_types:\n df[u] = np.where(df['user_type'] == u, 1, 0)\n df['Other'] = np.where(~df['user_type'].isin(user_types), 1, 0)\n\n # remove 'usr_type' b/c it's no longer needed\n df = df.drop('user_type', axis=1)\n\n # remove specified columns so they won't be plotted\n unreported_users = 0\n for drp in drop_cols:\n try:\n print('--> not reporting %s: %s users'\n % (drp, df[drp].sum()))\n df.drop(drp, inplace=True, axis=1)\n except:\n pass\n\n # calculate total and percentages for each user type\n ds = df.sum()\n df = pandas.DataFrame({'type': ds.index, 'score': ds.values})\n df = df.set_index('type')\n df['percent'] = round(df['score']/df['score'].sum()*100, 2)\n\n print('--> total number of users reporting: %d' % df.score.sum())\n\n for u in user_types:\n if u not in drop_cols:\n pct = df.loc[u].percent\n df = df.rename({u: '%s (%2.2f%%)' % (u, pct)})\n\n # make pie chart\n print('--> making user types pie chart...')\n fig, ax = plt.subplots(figsize=(10, 10))\n plt.title(figtitle)\n\n # remove where percentage is 0.00\n df = df[df.score > 0]\n df = df.sort_values(by='percent', ascending=False)\n\n labels = list(df.index)\n values = list(df.percent)\n\n # hard coded: move undergrad away from com/professional b/c they overlap\n idx = len(labels) - 1\n labels.insert(2, labels.pop(idx))\n values.insert(2, values.pop(idx))\n\n ax.pie(values, labels=labels)\n# pi = df.percent.plot.pie(ax=ax, explode=explode, labeldistance=1.1, startangle=10)\n\n# bbox_props = dict(boxstyle=\"square,pad=0.5\", fc=\"w\", ec=\"k\", lw=0)\n# kw = dict(arrowprops=dict(arrowstyle=\"-\"),\n# bbox=bbox_props, zorder=0, va=\"center\")\n# texts = [t for t in pi.texts]\n#\n# import pdb; pdb.set_trace()\n# total_patches = len(pi.patches)\n# pop_idx = 0\n# for i in range(0, total_patches):\n# p = pi.patches[i]\n# text = pi.texts.pop(pop_idx)\n## if text.get_text() == '':\n## continue\n# pop_idx += 1\n#\n# ang = (p.theta2 - p.theta1)/2. + p.theta1\n# y = np.sin(np.deg2rad(ang))\n# x = np.cos(np.deg2rad(ang))\n# horizontalalignment = {-1: \"right\", 1: \"left\"}[int(np.sign(x))]\n# connectionstyle = \"angle,angleA=0,angleB={}\".format(ang)\n# kw[\"arrowprops\"].update({\"connectionstyle\": connectionstyle})\n#\n# ax.annotate(text.get_text(), xy=(x, y),\n# xytext=(1.35*np.sign(x), 1.4*y),\n# horizontalalignment=horizontalalignment, **kw)\n\n plt.xlabel('')\n plt.ylabel('')\n\n # save the figure and the data\n print('--> saving figure as %s' % filename)\n outpath = os.path.join(working_dir, filename)\n plt.savefig(outpath, bbox_inches=\"tight\")\n\n\ndef downloads_by_specified(working_dir, st, et, figtitle,\n filename='downloads-known-vs-unknown.png'):\n\n # load the data based on working directory\n df = load_data(working_dir)\n df = df.sort_index()\n df = df[df.action == 'download']\n df = subset_by_date(df, st, et)\n\n # user type is not specified for \"anonymous\" users\n total_downloads = len(df)\n unknown_user_downloads = len(df[df.user_type.isnull()])\n known_user_downloads = total_downloads - unknown_user_downloads\n\n # count number of users for each type\n df = pandas.DataFrame({'type': ['Unknown', 'HydroShare Users'],\n 'score': [unknown_user_downloads,\n known_user_downloads]\n })\n df = df.set_index('type')\n df['percent'] = round(df['score']/df['score'].sum()*100, 2)\n df = df.rename({'Unknown': 'Unknown (%2.2f%%)' %\n (df.loc['Unknown'].percent)})\n df = df.rename({'HydroShare Users': 'HydroShare Users (%2.2f%%)' %\n (df.loc['HydroShare Users'].percent)})\n\n # make pie chart\n print('--> making pie chart...')\n fig = plt.figure(figsize=(10, 10))\n plt.title(figtitle)\n\n def make_autopct(values, scores):\n def my_autopct(pct):\n idx = np.argmin(abs(values - pct))\n return '{p:.2f}% ({v:d})'.format(p=pct, v=scores[idx])\n return my_autopct\n\n labels = ['Unknown', 'HydroShare Users']\n fracs = df.percent.values\n scores = df.score.values\n plt.pie(fracs, labels=labels, autopct=make_autopct(fracs, scores))\n\n plt.xlabel('')\n plt.ylabel('')\n\n # save the figure and the data\n print('--> saving figure as %s' % filename)\n outpath = os.path.join(working_dir, filename)\n plt.savefig(outpath, bbox_inches=\"tight\")\n\n\nif __name__ == \"__main__\":\n\n parser = argparse.ArgumentParser(description='user type statistics')\n parser.add_argument('--working-dir',\n help='path to directory containing elasticsearch data',\n required=True)\n parser.add_argument('--figure-title',\n help='title for the output figure',\n default='HydroShare User Type Distribution %s'\n % datetime.now().strftime('%m-%d-%Y'))\n parser.add_argument('--filename',\n help='output figure name',\n default='hydroshare-downloads.png')\n parser.add_argument('--st',\n help='start time MM-DD-YYYY (UTC)',\n default='01-01-2000')\n parser.add_argument('--et',\n help='start time MM-DD-YYYY (UTC)',\n default=datetime.now().strftime('%m-%d-%Y'))\n parser.add_argument('--exclude',\n help='comma separated list of user types to exclude',\n type=str, default=',')\n parser.add_argument('-k',\n help='plot pie chart of downloads for known users',\n action='store_true')\n parser.add_argument('-u',\n help='plot pie chart of downloads: known and unknown',\n action='store_true')\n args = parser.parse_args()\n\n excludes = [item for item in args.exclude.split(',')]\n\n ######### check date formats #########\n st_str = args.st\n et_str = args.et\n try:\n st = datetime.strptime(st_str, '%m-%d-%Y')\n except ValueError:\n st = datetime.strptime('01-01-2000', '%m-%d-%Y')\n print('\\tincorrect start date format, using default start date: 01-01-2000')\n try:\n et = datetime.strptime(et_str, '%m-%d-%Y')\n except ValueError:\n et = datetime.now()\n print('\\tincorrect end date format, using default start date: %s' % et.strftime('%m-%d-%Y'))\n\n # set timezone to UTC\n st = pytz.utc.localize(st)\n et = pytz.utc.localize(et)\n\n # check that dat exist\n if not os.path.exists(os.path.join(args.working_dir, 'users.pkl')):\n print('\\n\\tcould not find \\'users.pkl\\', skipping.'\n '\\n\\trun \\'collect_hs_data\\' to retrieve these missing data')\n else:\n if args.k:\n downloads_by_type(args.working_dir, st, et,\n excludes,\n args.figure_title,\n args.filename)\n if args.u:\n downloads_by_specified(args.working_dir, st, et,\n args.figure_title,\n args.filename)\n","sub_path":"report-generation/activity-pie.py","file_name":"activity-pie.py","file_ext":"py","file_size_in_byte":9520,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"289216482","text":"# Rijndael\n# by: The Epidemics\n\nfrom sys import stdin\nfrom hashlib import sha256\nfrom Crypto import Random\nfrom Crypto.Cipher import AES\nimport re\n\n# the AES block size to use\nBLOCK_SIZE = 16\n# the padding character to use to make the plaintext a multiple of BLOCK_SIZE in length\nPAD_WITH = \"#\"\n# the key to use in the cipher\n#KEY = \"rijndael\"\n\n# Dictionary Set-up\n#dictionary = open('dictionary1-3.txt') # Insert dictionary file here\n#word_list = dictionary.readlines()\n#word_list = map(lambda s: s.strip().encode('utf-8'), word_list)\n\nwith open('dictionary1-3.txt') as dictionary:\n word_list = [line.strip() for line in dictionary]\n\n# decrypts a ciphertext with a key\ndef decrypt(ciphertext, key):\n\t# hash the key (SHA-256) to ensure that it is 32 bytes long\n\tkey = sha256(key.encode('utf-8')).digest()\n\t# get the 16-byte IV from the ciphertext\n\t# by default, we put the IV at the beginning of the ciphertext\n\tiv = ciphertext[:16]\n\n\t# decrypt the ciphertext with the key using CBC block cipher mode\n\tcipher = AES.new(key, AES.MODE_CBC, iv)\n\t# the ciphertext is after the IV (so, skip 16 bytes)\n\tplaintext = cipher.decrypt(ciphertext[16:])\n\n\t# remove potential padding at the end of the plaintext\n\t# figure this one out...\n\t#plaintext = unpad(plaintext)\n\n\treturn plaintext\n\n# encrypts a plaintext with a key\ndef encrypt(plaintext, key):\n\t# hash the key (SHA-256) to ensure that it is 32 bytes long\n\tkey = sha256(key).digest()\n\t# generate a random 16-byte IV\n\tiv = Random.new().read(BLOCK_SIZE)\n\n\t# encrypt the ciphertext with the key using CBC block cipher mode\n\tcipher = AES.new(key, AES.MODE_CBC, iv)\n\t# if necessary, pad the plaintext so that it is a multiple of BLOCK SIZE in length\n\tplaintext += (BLOCK_SIZE - len(plaintext) % BLOCK_SIZE) * PAD_WITH\n\t# add the IV to the beginning of the ciphertext\n\t# IV is at [:16]; ciphertext is at [16:]\n\tciphertext = iv + cipher.encrypt(plaintext)\n\n\treturn ciphertext\n\n# Frequency Check\ndef freq_check(plaintext):\n e_count, expected_frequency = 0, 0.1202\n candidate_text = plaintext\n adjusted_string = re.sub(r\"[^a-z]\", \"\", candidate_text.lower())\n for letter in adjusted_string:\n if letter == 'e':\n e_count += 1\n total_characters = len(adjusted_string)\n if e_count > 0:\n frequency = float(e_count / total_characters)\n confidence = (1 - float(abs(expected_frequency - frequency)) / float(expected_frequency))\n return confidence\n else:\n return 0\n\n# Unpad the plaintext\ndef unpad(s):\n\treturn s[:-ord(s[len(s)-1:])]\n\n#word_list=['heartburn']\n# MAIN\nciphertext = stdin.buffer.read().strip()\nfor key in word_list: # For valid words\n #print(\"Ciphertext: {}\".format(ciphertext))\n #print(len(ciphertext))\n plaintext = decrypt(ciphertext, key)\n #print(\"Plaintext: {}\".format(plaintext))\n try:\n e_freq = freq_check(str(plaintext))\n print(\"Key: {}\".format(key))\n print(\"Plaintext: {}\".format(plaintext))\n except(TypeError): x = 1\n #break\n #if (e_freq >= .85) & (e_freq < 1):\n # print(\"KEY={}\".format(key)) # Print the key\n # print(plaintext) # Print the plaintext\n\n\"\"\" print (\"Plaintext:\")\nprint (plaintext)\nprint('\\n')\n\nciphertext = encrypt(plaintext, KEY)\nprint (\"Ciphertext (encrypted with {}):\".format(KEY))\nprint (ciphertext)\nprint('\\n')\nprint (\"Ciphertext (encoded in base64):\")\nprint (ciphertext.encode(\"base64\").replace(\"\\n\", \"\"))\nprint('\\n')\n\nplaintext = decrypt(ciphertext, KEY)\nprint (\"Plaintext (decrypted with {}):\".format(KEY))\nprint (plaintext) \"\"\"","sub_path":"Programs/hw6-aes/rijndael.py","file_name":"rijndael.py","file_ext":"py","file_size_in_byte":3523,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"535978117","text":"###############################################################################\n# PyDial: Multi-domain Statistical Spoken Dialogue System Software\n###############################################################################\n#\n# Copyright 2015 - 2018\n# Cambridge University Engineering Department Dialogue Systems Group\n#\n# \n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n###############################################################################\n\n'''\nGPPolicy.py - Gaussian Process policy \n============================================\n\nCopyright CUED Dialogue Systems Group 2015 - 2017\n\n \n**Relevant Config variables** [Default values]::\n\n [gppolicy]\n kernel = polysort\n thetafile = '' \n\n.. seealso:: CUED Imports/Dependencies: \n\n import :mod:`policy.GPLib` |.|\n import :mod:`policy.Policy` |.|\n import :mod:`policy.PolicyCommittee` |.|\n import :mod:`ontology.Ontology` |.|\n import :mod:`utils.Settings` |.|\n import :mod:`utils.ContextLogger`\n\n************************\n\n'''\n\n__author__ = \"cued_dialogue_systems_group\"\n\n\n\nimport FeudalSubPolicy\nimport SummaryActionRel\nfrom policy import SummaryUtils\nfrom cedm.utils import DActEntity\nfrom utils import ContextLogger\nlogger = ContextLogger.getLogger('')\n\nclass GPPolicy(FeudalSubPolicy.FeudalGPSubPolicy):\n '''\n An implementation of the dialogue policy based on Gaussian process and the GPSarsa algorithm to optimise actions where states are GPState and actions are GPAction.\n \n The class implements the public interfaces from :class:`~Policy.Policy` and :class:`~PolicyCommittee.CommitteeMember`.\n '''\n def __init__(self, domainString, learning, sharedParams=None):\n super(GPPolicy, self).__init__(domainString,learning,sharedParams)\n \n self.actions = SummaryActionRel.SummaryActionRel(domainString, False, self.useconfreq)\n # Total number of system actions.\n self.numActions = len(self.actions.action_names)\n \n def act_on(self, state):\n '''\n Main policy method: mapping of belief state to system action.\n \n This method is automatically invoked by the agent at each turn after tracking the belief state.\n \n May initially return 'hello()' as hardcoded action. Keeps track of last system action and last belief state. \n \n :param state: the belief state to act on\n :type state: :class:`~utils.DialogueState.DialogueState`\n :param hyps: n-best-list of semantic interpretations\n :type hyps: list\n :returns: the next system action of type :class:`~utils.DiaAct.DiaAct`\n '''\n beliefstate = state.getFocusBelief( newOne = True, getConflict = True)\n beliefstate['features']['inform_info'] = self._updateDBfeatures(state.getMergedBelief(state.entityFocus),state.entityFocus)\n \n if self.lastSystemAction is None and self.startwithhello:\n _systemAct = 'hello()'\n else:\n _systemAct = self.nextAction(beliefstate)\n self.lastSystemAction = _systemAct\n self.prevbelief = beliefstate\n \n systemAct = DActEntity.DiaActEntity(_systemAct, state.entityFocus)\n return systemAct\n \n def _updateDBfeatures(self,belief,eType):\n features = []\n for numAccepted in range(1,6):\n temp = SummaryUtils.actionSpecificInformSummary(belief, numAccepted, eType)\n features += temp\n return features\n \n \n# END OF FILE\n","sub_path":"cedm/policy/GPPolicy.py","file_name":"GPPolicy.py","file_ext":"py","file_size_in_byte":3969,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"513438461","text":"import requests\nimport json\nimport re\nfrom bson import json_util\nimport os\nfrom models import User, Team, Game, Challenge\nfrom datetime import datetime\nfrom random import shuffle\nfrom slackclient import SlackClient\n\nnum_teams = int(os.environ['NUM_TEAMS'])\nteam_members = int(os.environ['NUM_MEMBERS'])\ntoken = os.environ[\"SLACK_TOKEN\"]\n\nsc = SlackClient(token)\n\n\ndef get_random_post(requested_difficulty='Easy', any_difficulty=False):\n r = requests.get('http://reddit.com/r/dailyprogrammer/random.json', headers = {'User-agent': 'reddit_daily_algo_bot 0.1'})\n data = r.json()\n post = data[0]['data']['children'][0]['data']\n title = post['title']\n\n regex = r\"\\[[^)]+\\].*?\\[([^)]+)\\]\"\n try:\n difficulty = re.search(regex, title).group(1)\n except:\n return get_random_post(requested_difficulty)\n\n\n # if difficulty != requested_difficulty and not any_difficulty:\n # print(\"wrong difficulty\", difficulty)\n # return get_random_post(requested_difficulty)\n\n description = post['selftext']\n description = description.replace('\\n', '\\\\n')\n regex = r\"^#(.*?)\\\\n#\"\n try:\n description = re.search(regex, description).group(1)\n except:\n # print(\"regex failed for desc\",post['selftext'])\n return get_random_post(requested_difficulty)\n\n description = description.replace('\\\\n', '\\n')\n try:\n Challenge.objects.get(description=description)\n print(\"challenge already exists\")\n return get_random_post(requested_difficulty)\n except:\n pass\n\n url = post['url']\n data = {\n 'title': title,\n 'description': description,\n 'url': url,\n 'difficulty': difficulty\n }\n return data\n\ndef diff_color(diff):\n if diff == \"Hard\":\n return \"#CB3535\"\n elif diff == \"Intermediate\":\n return \"#E7AB17\"\n elif diff == \"Easy\":\n return \"#54D600\"\n else:\n return \"#2E5DFF\"\n\n\ndef background_worker(response_url, channel):\n data = []\n for i in range(3):\n challenge = get_random_post(any_difficulty=True)\n new_challenge = Challenge(\n title=challenge['title'],\n description=challenge['description'],\n difficulty=challenge['difficulty'],\n url=challenge['url'])\n data.append(new_challenge.save())\n game = Game(choices=data)\n game = game.save()\n game_id = json.loads(json_util.dumps(game.id))\n message = {\n \"response_type\": \"in_channel\",\n \"text\": \"Here are three random Algorithm challenges!\",\n \"attachments\": []}\n choices = {\n \"title\": \"Choose which Algo you'd like to solve!\",\n \"callback_id\": game_id['$oid'],\n \"attachment_type\": \"default\",\n \"actions\": []\n }\n for i, chall in enumerate(data):\n challenge_attachment = {}\n challenge_attachment[\"title\"] = \"<\" + chall.url + \"|\" + chall.title + \">\"\n challenge_attachment[\"text\"] = chall.description\n challenge_attachment[\"color\"] = diff_color(chall.difficulty)\n message[\"attachments\"].append(challenge_attachment)\n choice = {}\n choice[\"name\"] = \"choice\"\n choice[\"text\"] = \"Challenge #\" + str(i+1)\n choice[\"type\"] = \"button\"\n choice[\"value\"] = json.loads(json_util.dumps(chall.id))[\"$oid\"]\n choices[\"actions\"].append(choice)\n message[\"attachments\"].append(choices)\n\n sc.api_call(\n \"chat.postMessage\",\n channel=channel,\n text=message[\"text\"],\n response_type=message[\"response_type\"],\n attachments=message[\"attachments\"]\n )\n\n\ndef randomize_teams(names, no_teams, game):\n teams = []\n shuffle(names)\n for _ in range(no_teams):\n teams.append([])\n\n last_game = Game.objects.first()\n last = last_game.teams\n\n while names:\n for team in teams:\n if names:\n team.append(names.pop())\n\n for team in teams:\n if team in last:\n return randomize_teams(names, no_teams, game)\n\n \n teams_object = []\n for team in teams:\n max_driver_time = datetime.now()\n current_driver = \"\"\n for (idx, member) in enumerate(team):\n if 'last_lead' in member:\n if member.last_lead < max_driver_time:\n max_driver_time = member.last_lead\n current_driver = idx\n else:\n current_driver = idx\n break\n temp = team[0]\n team[0] = team[current_driver]\n team[current_driver] = temp\n team[0].last_lead = datetime.now()\n team[0].save()\n team = Team(members=team)\n teams_object.append(team)\n \n print(teams,teams_object)\n game.teams = teams_object\n game.save()\n\n return teams\n\n\nif __name__ == \"__main__\":\n get_random_post()\n","sub_path":"reddit_api.py","file_name":"reddit_api.py","file_ext":"py","file_size_in_byte":4808,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"616080825","text":"#################################################################################################\nimport sys\nimport os\nimport inspect\ncmd_folder = os.path.realpath(os.path.abspath(os.path.split(inspect.getfile( inspect.currentframe() ))[0]))\ncmd_folder = os.path.realpath(os.path.join(cmd_folder, \"..\"))\nif cmd_folder not in sys.path:\n sys.path.insert(0,cmd_folder)\n#################################################################################################\n\nimport logging\nfrom argparse import ArgumentParser, ArgumentDefaultsHelpFormatter\n\nfrom smac3_benchmarksuite.other_opts.AbstractOptRun import OptRun\n\nimport smac3_benchmarksuite.other_opts.SkOptRun as SkOptRun\nimport smac3_benchmarksuite.other_opts.TPERun as TPERun\nimport smac3_benchmarksuite.other_opts.RoboRun as RoboRun\nimport smac3_benchmarksuite.other_opts.GPyOptRun as GPyOptRun\nfrom smac3_benchmarksuite.utils import HPOLIB_SYNTH_FUNCTIONS, HPOLIB_REAL_FUNCTIONS\n\nscenarios = sorted(list(HPOLIB_SYNTH_FUNCTIONS.keys()) + list(HPOLIB_REAL_FUNCTIONS.keys()))\nopts = {\"skopt\": SkOptRun.run_ET, \"skopt_rf\": SkOptRun.run_RF,\n \"robo\": RoboRun.run, \"robo_rf\": RoboRun.run_rf,\n \"gpyopt\": GPyOptRun.run, \"gpyopt_mcmc\": GPyOptRun.run_MCMC, \"gpyopt_sim\": GPyOptRun.run_MCMC_10Dlatin_matern52,\n # \"gpyopt_rf\": GPyOptRun.run_rf, # Does not work due to a not working import statement\n \"tpe\": TPERun.run, \"random\": TPERun.run_random}\n\nparser = ArgumentParser(formatter_class=ArgumentDefaultsHelpFormatter)\n\nparser.add_argument(\"--opt\", choices=list(opts.keys())+[\"ALL\"], default=\"skopt\", help=\"AC procedure\")\nparser.add_argument(\"--benchmark\", required=True, choices=scenarios+[\"ALL\"], help=\"HPOLIB benchmark name\")\nparser.add_argument(\"--repetitions\", default=1, type=int, help=\"number of runs\")\nparser.add_argument(\"--n_parallel\", default=1, type=int, help=\"number of parallel executions\")\nparser.add_argument(\"--first_n\", default=1, type=int, help=\"first random seed\")\nparser.add_argument(\"--sim_time\", default=False, action=\"store_true\", help=\"Treat return values as runtimes\")\nparser.add_argument(\"--logy\", default=False, action=\"store_true\", help=\"Return log(regret)\")\nparser.add_argument(\"--no_regret\", default=False, action=\"store_true\", help=\"Return function value (instead of regret)\")\nparser.add_argument(\"--verbose\", default=\"INFO\", choices=[\"INFO\", \"DEBUG\"], help=\"verbosity level\")\n\nargs_, _ = parser.parse_known_args()\n\nlogging.basicConfig(level=args_.verbose)\n\nif args_.opt != \"ALL\":\n opts_to_run = [args_.opt]\nelse:\n opts_to_run = list(opts.keys())\n\nif args_.benchmark != \"ALL\":\n scenarios = [args_.benchmark]\n\nif not os.path.isdir(\"./results\"):\n os.mkdir(\"./results\")\n\nfor opt in opts_to_run:\n for scenario in scenarios:\n if not os.path.isdir(\"./results/%s_t%s_r%s/\" % (scenario, args_.sim_time, not(args_.no_regret))):\n os.mkdir(\"./results/%s_t%s_r%s/\" %(scenario, args_.sim_time, not(args_.no_regret)))\n opt_run = OptRun(benchmark_name=scenario, sim_time=args_.sim_time,\n name=opt, opt_run=opts[opt],\n regret=not(args_.no_regret))\n opt_run.run(repetitions=args_.repetitions,\n n_parallel=args_.n_parallel,\n first_n=args_.first_n,\n log_y=args_.logy)\n","sub_path":"scripts/run_other.py","file_name":"run_other.py","file_ext":"py","file_size_in_byte":3309,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"498281743","text":"from odoo import api, models, tools, fields\n\nclass MailMail(models.Model):\n \"\"\"Add tracking of user opening the mail\"\"\"\n _inherit = ['mail.mail']\n\n @api.model\n def create(self, vals):\n internal_users=self.env['res.users'].search([('share', '=',\n False)]).mapped('partner_id')\n stage_2=self.env['crm.stage'].search([('automate_trigger', '=',\n '2')])\n if vals.get('mail_message_id', False):\n msg=self.env['mail.message'].browse(vals.get('mail_message_id'))\n if msg.model=='crm.lead' and msg.author_id in internal_users:\n lead=self.env['crm.lead'].browse(msg.res_id)\n if lead.stage_id.automate_trigger == '1':\n lead.write({'stage_id':stage_2.id})\n\n elif vals.get('res_id', False) and vals.get('model', False) == 'crm.lead':\n lead=self.env['crm.lead'].browse(vals.get('res_id'))\n if lead.stage_id.automate_trigger == '1':\n lead.write({'stage_id':stage_2.id})\n return super(MailMail, self).create(vals)\n","sub_path":"auto_crm_stage/models/mail.py","file_name":"mail.py","file_ext":"py","file_size_in_byte":1075,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"541372516","text":"\"\"\"\n\tA program that uses a DFA (Deterministic Finite State Automaton)\n\tto determine if the given input is a valid double.\n\tTim Coutinho\n\"\"\"\n\nimport re\nimport sys\n\nimport numpy as np\n\ntransTable = np.array([[2,3,9,4],[2,9,6,5],[2,9,9,4],[5,9,9,9],\n\t\t\t\t\t [5,9,6,9],[8,7,9,9],[8,9,9,9],[8,9,9,9],[9,9,9,9]])\nvalid = [2, 5, 8]\n\ndef hasInvalidAscii(d):\n\treturn re.search(r'[^0-9eE\\.\\+-]', d)\n\ndef getDoubles():\n\twith open(sys.argv[1]) as f:\n\t\tlines = f.read().splitlines()\n\treturn lines\n\ndef evaluate():\n\tfor double in getDoubles():\n\t\tif hasInvalidAscii(double):\n\t\t\tstate = 9\n\t\telse:\n\t\t\tstate = evaluateDouble(1, double)\n\t\tprint(double, 'A' if state in valid else 'R')\n\ndef evaluateDouble(state, double):\n\tif re.match(r'[0-9]', double):\n\t\treturn evaluateDouble(transTable[state-1][0], double[1:])\n\telif re.match(r'[\\+-]', double):\n\t\treturn evaluateDouble(transTable[state-1][1], double[1:])\n\telif re.match(r'[eE]', double):\n\t\treturn evaluateDouble(transTable[state-1][2], double[1:])\n\telif re.match(r'\\.', double):\n\t\treturn evaluateDouble(transTable[state-1][3], double[1:])\n\telse:\n\t\treturn state\n\nevaluate()\n","sub_path":"Math and Theory/DFA.py","file_name":"DFA.py","file_ext":"py","file_size_in_byte":1106,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"202097996","text":"# -*- coding: utf-8 -*-\n\"\"\"\n月报\n\n@author: 林迪南\n\"\"\"\nimport os\n\nimport matplotlib.pyplot as plt # 使用import导入模块matplotlib.pyplot,并简写成plt \nimport matplotlib.dates as mdates\nimport numpy as np # 使用import导入模块numpy,并简写成np\nfrom matplotlib.ticker import FuncFormatter\n\nimport datetime\nfrom matplotlib.dates import DateFormatter\n\nimport xlrd\nimport xlwt\n\nmonitorYear=2020;monitorMonth=11;monitorDay=30;\ntotalHours=744\n\nbase = datetime.datetime(monitorYear, monitorMonth, monitorDay,19)\ndate1=base.strftime(\"%Y-%m-%d %H:%M:%S\");date2=base +datetime.timedelta(hours=totalHours);\ndate2=date2.strftime(\"%Y-%m-%d %H:%M:%S\");\n\ndataExcel='Data.xlsx';dataSheet='传感器监测数据报表'\ndata=xlrd.open_workbook(dataExcel)\nsh=data.sheet_by_name(dataSheet)\n\nresultExcel='Result.xls';resultSheet='Result'\n\nif os.path.exists(resultExcel):\n os.remove(resultExcel)\n\nif os.path.exists(resultExcel+'x'):\n os.remove(resultExcel+'x')\n\n\ndefaultFontsize=14 #默认字体大小\nnPoints=15 #15个测点\n\ndataCounts=totalHours #例:31天数据,31*24个,每小时采集一次数据\nscaleFactor=1.0 #控制x轴长度\n\nstartRow=2 #从excel表格第3行开始读取\nmeasurePointsList=[] #测点名称\nyList=[] #监测数据\n\nprint(sh.cell_value(startRow-1,15))\n\nfor i in range(0,nPoints):\n measurePointsList.append(sh.cell_value(startRow-1,i+1))\n\ndates=[]\nfor i in range(0,dataCounts):\n dates.append(datetime.datetime.strptime(sh.cell_value(startRow+i,0), \"%Y-%m-%d %H:%M:%S\"))\n yList.append(sh.cell_value(startRow+i,1))\n\n#dates即x轴数据\n#dates = [base + datetime.timedelta(hours=(1 * i)) for i in range(dataCounts)] #1小时1个数据\n\nyList = np.array(yList) #列表转换为nparray\n\nxFigsize=10;yFigsize=3\nrotAngle=40 #旋转角度\n\n\n\n\n\nlims = [(np.datetime64(date1), np.datetime64(date2)),\n (np.datetime64(date1), np.datetime64(date2)),\n (np.datetime64(date1), np.datetime64(date2)),\n (np.datetime64(date1), np.datetime64(date2)),\n (np.datetime64(date1), np.datetime64(date2)),\n (np.datetime64(date1), np.datetime64(date2)),\n (np.datetime64(date1), np.datetime64(date2)),\n (np.datetime64(date1), np.datetime64(date2)),\n (np.datetime64(date1), np.datetime64(date2)),\n (np.datetime64(date1), np.datetime64(date2)),\n (np.datetime64(date1), np.datetime64(date2)),\n (np.datetime64(date1), np.datetime64(date2)),\n (np.datetime64(date1), np.datetime64(date2)),\n (np.datetime64(date1), np.datetime64(date2)),\n (np.datetime64(date1), np.datetime64(date2))]\n\n\n#F17-D-位移(mm)\tG11-D-位移(mm)\tF17-L-倾角(°)\tG11-L-倾角(°)\tD2-2倾角-倾角(°)\tZW24-1倾角-倾角(°)\tZW24-2倾角-倾角(°)\tZE24-1倾角-倾角(°)\tZE24-2倾角-倾角(°)\tD2-2-1位移-位移(mm)\tD2-2-2位移-位移(mm)\tZW24-1位移-位移(mm)\tZW24-2位移-位移(mm)\tZE24-1位移-位移(mm)\tZE24-2位移-位移(mm)\n\ndefaultDispLims=(-1.5,1.5)\ndefaultLeanLims=(-0.001,0.002)\ndefaultLeanAlertSection=(-0.1,0.1)\n\nylabelList=['位移(mm)','位移(mm)','倾角(°)','倾角(°)','倾角(°)','倾角(°)','倾角(°)','倾角(°)','倾角(°)','位移(mm)','位移(mm)','位移(mm)','位移(mm)','位移(mm)','位移(mm)']\n\nylims = [defaultDispLims,defaultDispLims,defaultLeanLims,defaultLeanLims,defaultLeanLims\n ,defaultLeanLims,defaultLeanLims,defaultLeanLims,defaultLeanLims,defaultDispLims\n ,defaultDispLims,defaultDispLims,defaultDispLims,defaultDispLims,defaultDispLims\n ,defaultDispLims,defaultDispLims,defaultDispLims,defaultDispLims,defaultDispLims\n ,defaultDispLims,defaultDispLims,defaultDispLims,defaultDispLims,defaultDispLims]\n\nalertLine = [(-1.0, 1.0),(-1.0, 1.0),defaultLeanAlertSection,defaultLeanAlertSection,defaultLeanAlertSection\n ,defaultLeanAlertSection,defaultLeanAlertSection,defaultLeanAlertSection,defaultLeanAlertSection,(-1.0, 1.0)\n ,(-1.0, 1.0),(-1.0, 1.0),(-1.0, 1.0),(-1.0, 1.0),(-1.0, 1.0)\n ,(-1.0, 1.0),(-1.0, 1.0),(-1.0, 1.0),(-1.0, 1.0),(-1.0, 1.0)\n ,(-1.0, 1.0),(-1.0, 1.0),(-1.0, 1.0),(-1.0, 1.0),(-1.0, 1.0)]\n\n#参考\n#string[] headerTitle = new string[dataColumns] { \"F17-D-位移(mm)\", \"G11-D-位移(mm)\", \n#\"ZW24-1位移-位移(mm)\", \"ZW24-2位移-位移(mm)\", \"ZE24-1位移-位移(mm)\", \"ZE24-2位移-位移(mm)\", \"F17-D-位移(mm)\"\n#, \"G11-D-位移(mm)\" };\n\n#string[] headerTitle = new string[dataColumns] { \"D2-2-1位移-位移(mm)\", \"D2-2-2位移-位移(mm)\", \n#\"ZW24-1位移-位移(mm)\", \"ZW24-2位移-位移(mm)\", \"ZE24-1位移-位移(mm)\", \"ZE24-2位移-位移(mm)\", \"F17-D-位移(mm)\"\n#, \"G11-D-位移(mm)\" };\n#decimal[] benchmarkData = new decimal[dataColumns] { 170m, 150m, 240m, 210m, 145m, 130m, 338.00m, 250.00m };\nbaseData = [338.00, 250.00,0.0,0.0,0.0,0.0,0.0,0.0,0.0,150.0, 170.0, 240.0, 210.0, 145.0, 130.0]\ndirectionCoff = [ -1.00, -1.0 ,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0 , 1.0, 1.0, 1.0, 1.0, 1.0 ];\n \nyList = [[] for i in range(nPoints)] # 创建的是多行n列的二维列表\nfor i in range(0,nPoints):\n startRow=2\n temp=[] \n for j in range(0,dataCounts): \n temp.append(baseData[i]+directionCoff[i]*sh.cell_value(startRow+j,i+1))\n yList[i] = np.array(temp)\n\nplt.rcParams['font.sans-serif']=['SimHei'] #用来正常显示中文标签\nplt.rcParams['axes.unicode_minus']=False #用来正常显示负号\n\ndef formatnum(x, pos):\n return '$%.2f$' % (x)\n\n\n\nformatter = FuncFormatter(formatnum)\n\nxFormatter = DateFormatter('%m-%d')\n\nw=xlwt.Workbook()\nws=w.add_sheet(resultSheet)\n\n\nfor i in range(0,nPoints):\n fig, ax = plt.subplots(constrained_layout=True, figsize=(xFigsize, yFigsize))\n \n ax.plot(dates, yList[i])\n #ax.set(xlabel='时间', ylabel=ylabelList[i])\n \n ax.grid()\n \n ax.xaxis.set_major_formatter(xFormatter)\n \n ax.yaxis.set_major_formatter(formatter)\n ax.set_xlim(lims[i])\n \n #ax.set_ylim(ylims[i])\n vals = ax.get_yticks()\n ax.set_yticklabels(['{:1.4f}'.format(x) for x in vals])\n \n \n #作双轴曲线\n #---------\n# ax2 = ax.twinx() # instantiate a second axes that shares the same x-axis\n#\n# color = 'tab:blue'\n# ax2.set_ylabel('sin', color=color) # we already handled the x-label with ax1\n# ax2.plot(dates, yList[1], color=color)\n# ax2.tick_params(axis='y', labelcolor=color)\n #---------\n \n #-------\n max_indx=np.argmax(yList[i])#max value index\n min_indx=np.argmin(yList[i])#min value index\n #plt.plot(yList[i],'r-o')\n #plt.plot(max_indx,yList[i][max_indx],'ks')\n show_max='['+str(max_indx)+' '+str(yList[i][max_indx])+']'\n print(show_max)\n show_min='['+str(min_indx)+' '+str(yList[i][min_indx])+']'\n print(show_min)\n \n ws.write(0,i,measurePointsList[i])\n ws.write(1,i,yList[i][max_indx])\n ws.write(2,i,yList[i][min_indx])\n #plt.annotate(show_max,xytext=(max_indx,yList[i][max_indx]),xy=(max_indx,yList[i][max_indx]))\n #plt.plot(min_indx,yList[i][min_indx],'gs')\n #-------\n \n #ax.get_yaxis().get_major_formatter().set_scientific(False)\n plt.xticks(fontsize=defaultFontsize)\n plt.yticks(fontsize=defaultFontsize)\n plt.xlabel(xlabel='时间',fontsize=defaultFontsize)\n plt.ylabel(ylabel=ylabelList[i],fontsize=defaultFontsize)\n #作预警线\n #---------\n# plt.axhline(y=alertLine[i][0],c=\"yellow\")#添加水平直线\n# plt.axhline(y=alertLine[i][1],c=\"yellow\")\n #---------\n plt.savefig(measurePointsList[i]+'.jpg')\n#k=0\n#for nn, ax in enumerate(axs):\n# \n# ax.plot(dates, yList[k])\n# ax.set_xlim(lims[nn])\n# # rotate_labels...\n# for label in ax.get_xticklabels():\n# label.set_rotation(rotAngle)\n# label.set_horizontalalignment('right')\n# k=k+1\n \n#for i in range(0,1):\n# axs[0].plot(dates, yList[i])\n# axs[0].set_xlim(lims[i])\n# # rotate_labels...\n# for label in axs[0].get_xticklabels():\n# label.set_rotation(rotAngle)\n# label.set_horizontalalignment('right')\n#\n#axs[0].set_title('Default Date Formatter')\n#plt.show()\n\n#fig, ax = plt.subplots()\n#ax.plot(xList, yList)\n\n#ax.xaxis.set_minor_locator(AutoMinorLocator())\n\n#ax.tick_params(which='both', width=2)\n#ax.tick_params(which='major', length=7)\n#ax.tick_params(which='minor', length=4, color='r')\n \nw.save(resultExcel)\n\n#03excel转07及以上excel\n#https://www.cnblogs.com/zifeiy/p/8142853.html\nimport win32com.client as win32\n\nfname = resultExcel\nexcel = win32.gencache.EnsureDispatch('Excel.Application')\nwb = excel.Workbooks.Open(os.getcwd()+'\\\\'+resultExcel)\n\nwb.SaveAs(os.getcwd()+'\\\\'+resultExcel+'x', FileFormat = 51) #FileFormat = 51 is for .xlsx extension\nwb.Close() #FileFormat = 56 is for .xls extension\nexcel.Application.Quit()","sub_path":"main1.py","file_name":"main1.py","file_ext":"py","file_size_in_byte":8774,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"407675473","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Mon Jul 16 18:27:20 2018\r\n\r\n@author: lijie\r\n\"\"\"\r\n\r\n#小明很喜欢数学,有一天他在做数学作业时,要求计算出9~16的和,他马上就写出了正确答案是100。\r\n#但是他并不满足于此,他在想究竟有多少种连续的正数序列的和为100(至少包括两个数)。\r\n#没多久,他就得到另一组连续正数和为100的序列:18,19,20,21,22。现在把问题交给你,你能不能也很快的找出所有和为S的连续正数序列? Good Luck!\r\n#输出描述:\r\n#输出所有和为S的连续正数序列。序列内按照从小至大的顺序,序列间按照开始数字从小到大的顺序\r\n\r\n'''\r\n思路:\r\n根据错误信息 先加上特殊情况的判断 : 给定的和的数字小于等于2时 结果不存在 输出空列表\r\n因为连续正数数列 是公差为1的等差数列 \r\n然后利用等差数列的公式 \r\n an=a1+(n-1)d\r\n s=(a1+an)*n/2\r\n从首项为1开始计算 分首项和项数的两层循环 每层循环每次依次增加1\r\n当首项与给定和相等时 终止循环 小于时进行数列内部和的计算:\r\n 对于每一个首项,计算指定n的数列的和 每次n+1 直到数列和大于给定s时终止此循环 \r\n'''\r\n\r\n\r\nclass Solution:\r\n def FindContinuousSequence(self, tsum):\r\n # write code here\r\n \r\n a1=1\r\n ans={}\r\n t=0\r\n if tsum<=2:\r\n return []\r\n while a1\\g<1>${voc}ay'),word))\r\n w = Template(w)\r\n if re.match(r'[aeiou]',word) is not None:\r\n w = w.substitute(voc=\"y\")\r\n else:\r\n w = w.substitute(voc=\"\")\r\n sys.stdout.write(str.capitalize(w) + \" \")\r\n#######\r\n\r\n\r\nread_file()\r\n","sub_path":"PLPython.py","file_name":"PLPython.py","file_ext":"py","file_size_in_byte":771,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"361302548","text":"import numpy as np\nfrom pytorch_classification.utils import Bar, AverageMeter\nimport time\nfrom pickle import Pickler, Unpickler\nimport os\nfrom utils import *\n\n\nclass Arena():\n \"\"\"\n An Arena class where any 2 agents can be pit against each other.\n \"\"\"\n def __init__(self, player1, player2, game, display=None, displaybar=True, mcts=None):\n \"\"\"\n Input:\n player 1,2: two functions that takes board as input, return action\n game: Game object\n display: a function that takes board as input and prints it (e.g.\n display in othello/OthelloGame). Is necessary for verbose\n mode.\n\n see othello/OthelloPlayers.py for an example. See pit.py for pitting\n human players/other baselines with each other.\n \"\"\"\n self.player1 = player1\n self.player2 = player2\n self.game = game\n self.display = display\n self.displaybar = displaybar\n\n if(mcts!=None):\n self.mcts=mcts\n self.trainExamples=[]\n else:\n self.mcts=None\n \n def playGame(self, verbose=False):\n \"\"\"\n Executes one episode of a game.\n\n Returns:\n either\n winner: player who won the game (1 if player1, -1 if player2)\n or\n draw result returned from the game that is neither 1, -1, nor 0.\n \"\"\"\n trainExamples=[]\n \n players = [self.player2, None, self.player1]\n curPlayer = 1\n board = self.game.getInitBoard()\n it = 0\n action = -1\n while self.game.getGameEnded(board, curPlayer, action)==0:\n it+=1\n if verbose:\n assert(self.display)\n #print(\"Turn \", str(it), \"Player \", str(curPlayer))\n self.display(board)\n\n #if(self.mcts!=None and self.mcts_player == curPlayer):\n if(self.mcts!=None):\n action, data = self.mcts_play_and_collect_data(board, curPlayer)\n trainExamples.append(data)\n else:\n action = players[curPlayer+1](board, curPlayer)\n\n valids = self.game.getValidMoves(board,1)\n\n if valids[action]==0:\n #print(action)\n assert valids[action] >0\n board, curPlayer = self.game.getNextState(board, curPlayer, action)\n if verbose:\n assert(self.display)\n print(\"Game over: Turn \", str(it), \"Result \", str(self.game.getGameEnded(board, 1, action)))\n self.display(board, end = True)\n\n if(self.mcts!=None):\n args=dotdict({'coeff': 0.9, 'learnFromEnd':0})\n \n mylist = []\n templist = []\n\n reward0 = self.game.getGameEnded(board, curPlayer, action)\n for i,x in enumerate(reversed(trainExamples[args.learnFromEnd:])):\n reward = (args.coeff**(i//2))*reward0*((-1)**(x[1]!=curPlayer))\n mylist.append((x[0], x[1], x[2], reward))\n templist.append(list(mylist))\n self.trainExamples.append(templist)\n return self.game.getGameEnded(board, 1, action), it\n\n def playGames(self, num, verbose=False, mcts = None):\n \"\"\"\n Plays num games in which player1 starts num/2 games and player2 starts\n num/2 games.\n\n Returns:\n a) In mcts mode:\n The trainExamples\n b) In normal mode:\n oneWon: games won by player1\n twoWon: games won by player2\n draws: games won by nobody\n \n \"\"\"\n eps_time = AverageMeter()\n bar = Bar('Arena.playGames', max=num)\n end = time.time()\n eps = 0\n maxeps = int(num)\n\n num = int(num/2)\n oneWon = 0\n twoWon = 0\n draws = 0\n oneStepNum = 0.0\n twoStepNum = 0.0\n self.mcts_player=1\n for _ in range(num):\n gameResult, stepnum = self.playGame(verbose=verbose)\n oneStepNum+=stepnum\n if gameResult==1:\n oneWon+=1\n elif gameResult==-1:\n twoWon+=1\n else:\n draws+=1\n # bookkeeping + plot progress\n eps += 1\n eps_time.update(time.time() - end)\n end = time.time()\n if(self.displaybar):\n bar.suffix = '({eps}/{maxeps}) Eps Time: {et:.3f}s | Total: {total:} | ETA: {eta:}'.format(eps=eps+1, maxeps=maxeps, et=eps_time.avg,\n total=bar.elapsed_td, eta=bar.eta_td)\n bar.next()\n\n self.player1, self.player2 = self.player2, self.player1\n self.mcts_player=-1\n for _ in range(num):\n gameResult, stepnum = self.playGame(verbose=verbose)\n twoStepNum+=stepnum\n if gameResult==-1:\n oneWon+=1 \n elif gameResult==1:\n twoWon+=1\n else:\n draws+=1\n # bookkeeping + plot progress\n eps += 1\n eps_time.update(time.time() - end)\n end = time.time()\n if(self.displaybar):\n bar.suffix = '({eps}/{maxeps}) Eps Time: {et:.3f}s | Total: {total:} | ETA: {eta:}'.format(eps=eps+1, maxeps=num, et=eps_time.avg,\n total=bar.elapsed_td, eta=bar.eta_td)\n bar.next()\n \n bar.finish()\n print(twoStepNum/(twoStepNum+oneStepNum))\n #self.log_data()\n\n if(self.mcts!=None):\n return self.trainExamples\n else:\n return oneWon, twoWon, twoStepNum/(twoStepNum+oneStepNum)\n\n def log_data(self):\n if(self.mcts!=None):\n #---save history---\n folder = './temp_try'\n if not os.path.exists(folder):\n os.makedirs(folder)\n filename = os.path.join(folder, 'trainhistory.pth.tar'+\".examples\")\n with open(filename, \"wb+\") as f:\n Pickler(f).dump(self.trainExamples)\n f.closed\n\n def mcts_play_and_collect_data(self, board, curPlayer):\n pi, counts = self.mcts.getActionProb(board, curPlayer=curPlayer, debug=True)\n action = np.random.choice(len(pi), p=pi)\n mtx = self.mcts.heuristic.get_field_stregth_mtx(board, 1)\n heuristic_components = self.mcts.heuristic.get_x_line_mtx(board, 1)\n shape = list(board.shape)+[1]\n return action, [np.concatenate([np.reshape(board, shape),\n np.reshape(mtx, shape),\n heuristic_components], axis=2),\n curPlayer, pi, None]\n \n","sub_path":"Arena.py","file_name":"Arena.py","file_ext":"py","file_size_in_byte":6900,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"295764763","text":"\"\"\" define policy network and export its graph as meta file \"\"\"\n\nimport os\nimport tensorflow as tf\n\n\ndef conv_bn(inputs, filters, kernel_size, name, training, activation=tf.nn.relu):\n conv = tf.layers.conv2d(\n inputs=inputs, filters=filters, kernel_size=kernel_size, strides=(1, 1),\n padding=\"SAME\", activation=None, use_bias=False, name=name,\n kernel_initializer=tf.contrib.layers.xavier_initializer_conv2d())\n conv = tf.layers.batch_normalization(conv, axis=3, training=training)\n if activation is not None:\n conv = activation(conv)\n return conv\n\ndef res_block(inputs, filters, kernel_size, name, training):\n with tf.variable_scope(name):\n conv = conv_bn(inputs, filters, kernel_size, \"conv_1\", training)\n conv = conv_bn(inputs, filters, kernel_size, \"conv_2\", training, None)\n return tf.nn.relu(conv + inputs)\n\ndef export_meta(model_name):\n with tf.Graph().as_default():\n \"\"\" neural network for logit computing \"\"\"\n training = tf.placeholder(tf.bool, name=\"training\")\n sy_x_b = tf.placeholder(tf.float32, shape=[None, 15, 15, 11], name=\"x_b\")\n sy_y_b = tf.placeholder(tf.float32, shape=[None, 226], name=\"y_b\")\n\n conv = conv_bn(sy_x_b, 192, 3, \"conv_initial\", training)\n for i in range(9):\n conv = res_block(conv, 192, 3, \"res_%d\" % i, training)\n \n policy = conv_bn(conv, 2, 1, \"conv_policy\", training)\n logits = tf.layers.dense(tf.reshape(policy, shape=[-1, 450]), 226, name=\"fc_out_logits\")\n sy_y_p = tf.nn.softmax(logits, name=\"y_p\")\n accuracy = tf.reduce_mean(tf.cast(tf.equal(tf.argmax(sy_y_p, 1), tf.argmax(sy_y_b, 1)), tf.float32), name=\"accuracy\")\n\n loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=sy_y_b, logits=logits), name=\"loss\")\n\n update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)\n with tf.control_dependencies(update_ops):\n step = tf.train.AdamOptimizer(1e-4).minimize(loss, name=\"step\")\n\n if not os.path.exists(model_name):\n os.makedirs(model_name)\n saver = tf.train.Saver(max_to_keep=99999999)\n saver.export_meta_graph(model_name + \"/\" + model_name + \".meta\")\n","sub_path":"python/policy_network.py","file_name":"policy_network.py","file_ext":"py","file_size_in_byte":2222,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"337961367","text":"from __future__ import absolute_import \nfrom __future__ import print_function \nimport os \nimport numpy as np\nimport pandas as pd \nfrom skimage import io as io \nfrom keras.preprocessing.image import ImageDataGenerator \nfrom keras.models import Sequential \nfrom keras.layers.core import Dense, MaxoutDense, Dropout, Activation, Flatten \nfrom keras.layers.convolutional import Convolution2D, MaxPooling2D \nfrom keras.utils import np_utils, generic_utils \nfrom six.moves import range \n\ndef create_model():\n model = Sequential()\n # First Convolutional Layers\n model.add(Convolution2D(32, 1, 3, 3, border_mode='full'))\n model.add(Activation('relu'))\n model.add(Convolution2D(32, 32, 3, 3))\n model.add(Activation('relu'))\n model.add(MaxPooling2D(poolsize=(2, 2)))\n model.add(Dropout(0.25))\n # Second Convolutional Layers\n model.add(Convolution2D(64, 32, 3, 3, border_mode='full'))\n model.add(Activation('relu'))\n model.add(Convolution2D(64, 64, 3, 3))\n model.add(Activation('relu'))\n model.add(MaxPooling2D(poolsize=(2, 2)))\n model.add(Dropout(0.25))\n # Third Convolutional Layers\n model.add(Convolution2D(128, 64, 3, 3, border_mode='full'))\n model.add(Activation('relu'))\n model.add(Convolution2D(128, 128, 3, 3))\n model.add(Activation('relu'))\n model.add(MaxPooling2D(poolsize=(2, 2)))\n model.add(Dropout(0.25))\n # Dense Fully Connected Layer with MaxOut\n model.add(Flatten())\n model.add(MaxoutDense(128 * 16 * 16, 512, init='he_normal', nb_feature=2))\n model.add(Activation('relu'))\n model.add(Dropout(0.5))\n # Dense Fully Connected Layer\n model.add(Dense(512, nb_classes, init='he_normal'))\n model.add(Activation('softmax'))\n # Compiling\n model.compile(loss='categorical_crossentropy', optimizer='rmsprop')\n # Returning\n return model\n\ndef sample(labels, size):\n return labels.ix[np.random.choice(labels.index, size, replace = False)]\n \ndef downsample(labels, levels, sizes):\n return pd.concat([sample(labels[labels.level == level], size) for level, size in zip(levels, sizes)]) \n\ndef chunks(l, n):\n \"\"\"Yield successive n-sized chunks from l.\"\"\"\n for i in xrange(0, len(l), n):\n yield l[i:i+n] \n\ndef load_train_data(levels, sizes, train_folder = '../train-128', image_size = 128, labels_file = '../data/trainLabels.csv'):\n labels = pd.read_csv(labels_file)\n list_of_files = downsample(labels, levels, sizes)\n list_of_files = list_of_files.reindex(np.random.permutation(list_of_files.index))\n train_file_list = [file + '.jpeg' for file in list_of_files.image.values];\n train_size = len(train_file_list)\n shape = (train_size, 1, image_size, image_size)\n x_train = np.zeros(shape, dtype = 'float32')\n y_train = np.zeros(train_size, dtype = 'uint8')\n for index, fn in enumerate(train_file_list):\n original_image = io.imread(train_folder + '/' + fn)\n x_train[index] = np.asarray(original_image.reshape(1, image_size, image_size), dtype = 'float32')\n y_train[index] = labels.ix[labels['image'] == fn.replace('.jpeg', ''), 'level'].values[0]\n return x_train, y_train \n\ndef batch_load_test_data(test_file_list, test_folder = '../test-128', image_size = 128):\n test_size = len(test_file_list)\n shape = (test_size, 1, image_size, image_size)\n x_test = np.zeros(shape, dtype = 'float32')\n test_labels = np.empty(test_size, dtype='S12')\n for index, fn in enumerate(test_file_list):\n original_image = io.imread(test_folder + '/' + fn)\n original_image = (original_image - original_image.mean()) / original_image.std()\n x_test[index] = np.asarray(original_image.reshape(1, image_size, image_size), dtype = 'float32')\n test_labels[index] = fn.replace('.jpeg', '')\n return x_test, test_labels \n\ndef test(model, test_folder = '../test-128', image_size = 128):\n test_file_list = [file for file in os.listdir(test_folder) if file.endswith('.jpeg')];\n output = pd.DataFrame(columns = ['image', 'level'])\n for test_files in chunks(test_file_list, 1000):\n x_test, test_labels = batch_load_test_data(test_files, test_folder, image_size)\n y_test = model.predict_classes(x_test)\n output = pd.concat([output, pd.DataFrame({ 'image': test_labels, 'level': y_test })])\n del x_test, y_test, test_labels\n return output \n\ndef save_data(output, output_file = 'submission.csv'):\n output.sort(['image'], inplace = True)\n output['level'] = output['level'].astype(np.int8)\n output.to_csv(output_file, index = False)\n \nif __name__ == \"__main__\":\n submission_file = 'submissionUltimate.csv' \n weight_file = 'ultimateNet.hdf5'\n levels = [0, 1, 2, 3, 4]\n sizes = [700, 700, 700, 700, 700]\n\n np.random.seed(1337) # for reproducibility \n \n batch_size = 32 \n nb_classes = 2 \n nb_epoch = 200\n model = create_model()\n X_train, Y_train = load_train_data(levels, sizes)\n Y_train = np_utils.to_categorical(Y_train, nb_classes)\n print(X_train.shape[0], 'train samples')\n \n print(\"Using real time data augmentation\")\n \n # this will do preprocessing and realtime data augmentation\n datagen = ImageDataGenerator(\n featurewise_center=False,\n samplewise_center=True,\n featurewise_std_normalization=False,\n samplewise_std_normalization=True,\n zca_whitening=False,\n rotation_range=45,\n width_shift_range=0.4,\n height_shift_range=0.4,\n horizontal_flip=True,\n vertical_flip=True)\n datagen.fit(X_train)\n for e in range(nb_epoch):\n print('-'*40)\n print('Epoch', e)\n print('-'*40)\n print(\"Training...\")\n # batch train with realtime data augmentation\n progbar = generic_utils.Progbar(X_train.shape[0])\n for X_batch, Y_batch in datagen.flow(X_train, Y_train, batch_size = batch_size):\n loss = model.train_on_batch(X_batch, Y_batch)\n progbar.add(X_batch.shape[0], values=[(\"train loss\", loss)])\n model.save_weights(weight_file)\n \n output = test(model)\n save_data(output, output_file = submission_file)\n","sub_path":"scripts/ultimate_net.py","file_name":"ultimate_net.py","file_ext":"py","file_size_in_byte":6283,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"456336826","text":"import numpy as np\nimport pandas as pd\nimport random\nimport test_data_gen as tdg\nimport test_mc as tmc\nimport statistics_clust as sc\nimport datetime as dt\n\ndef getscore(df,mc_method,reps = 1000, pca_inc = False,ranged = [2,3,4,5,6]):\n gen = getattr(tmc, mc_method )\n data_list = [gen(df,i) for i in range(reps)]\n def get_p(mc_list,og_n):\n mc_add = np.append(mc_list,og_n)\n return((list(np.flip(np.sort(mc_add))).index(og_n) + 1)/(len(mc_list) + 1))\n def one_k(data_list,k):\n results = [sc.km_out(i,k,pca_inc = pca_inc) for i in data_list]\n og = sc.km_out(df,k,pca_inc = pca_inc)\n results = np.array(results).T\n methods = ['hubers','norm','tss']\n out_dict = {methods[i]:[np.mean(results[i]),\n np.var(results[i]),\n og[i],\n get_p(results[i],og[i])] for i in range(len(methods))}\n return(out_dict)\n return([one_k(data_list,i) for i in ranged])\n\n\ndef full_func(df_in,reps = 1000, pca_inc = False,ranged = [2,3,4,5,6]):\n df_lab = df_in[0]\n df = df_in[1]\n method_list = ['min_max','random_order','pca_trans']\n out_dic = {str(i):getscore(df,i,reps = reps, pca_inc = pca_inc, ranged = ranged) for i in method_list}\n print(df_lab + ' done')\n return([df_lab,out_dic])\n\n\n\n\n\n","sub_path":"sand_box/full_func.py","file_name":"full_func.py","file_ext":"py","file_size_in_byte":1344,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"160418563","text":"\"\"\"\nCopyright (c) 2017 SPARKL Limited. All Rights Reserved.\nAuthor Jacoby Thwaites.\n\nElasticsearch event push implementation.\n\nThis is designed to read stdin from a pipe such as:\n\n sparkl listen | sparkl event | sparkl elastic http://localhost:9200\n\nwhere elasticsearch is listening on port 9200.\n\"\"\"\nfrom __future__ import print_function\n\nfrom elasticsearch import (\n Elasticsearch)\n\nfrom elasticsearch.helpers import (\n streaming_bulk)\n\nfrom sparkl_cli.common import (\n read_terms)\n\n\ndef parse_args(subparser):\n \"\"\"\n Adds module-specific subcommand arguments.\n \"\"\"\n subparser.add_argument(\n \"-i\", \"--index\",\n type=str,\n default=\"sparkl\",\n help=\"elasticsearch index, default 'sparkl'\")\n\n subparser.add_argument(\n \"-u\", \"--url\",\n type=str,\n default=\"http://localhost:9200\",\n help=\"elasticsearch url, default http://localhost:9200\")\n\n subparser.add_argument(\n \"-d\", \"--delete\",\n action=\"store_true\",\n help=\"delete the specified index and its documents\")\n\n subparser.add_argument(\n \"-b\", \"--bulk\",\n type=int,\n help=\"use bulk API, specify number of documents per chunk\")\n\n\ndef command(args):\n \"\"\"\n Reads JSON terms, one per line, from stdin and streams\n them to Elasticsearch. Any lines that are not JSON are passed through\n to stdout.\n\n Note the url can incorporate user and password, such as\n 'http://user:pass@localhost:9200'.\n \"\"\"\n instance = elastic_instance(args)\n\n if args.delete:\n delete_index(instance, args.index)\n return None\n\n check_index(instance, args.index)\n\n if args.bulk:\n (good, bad) = send_bulk(args, instance)\n else:\n (good, bad) = send_singly(args, instance)\n\n result = {\n \"tag\": \"elastic\",\n \"attr\": {\n \"good\": good,\n \"bad\": bad\n }\n }\n\n return result\n\n\ndef send_singly(args, instance):\n \"\"\"\n Sends documents one at a time.\n Returns the number of terms indexed on exit.\n \"\"\"\n good = bad = 0\n for term in read_terms():\n result = instance.create(\n args.index,\n doc_type=term[\"tag\"],\n id=term[\"id\"],\n body=term,\n refresh=True)\n\n if result.get(\"created\", False):\n good += 1\n else:\n bad += 1\n\n return (good, bad)\n\n\ndef send_bulk(args, instance):\n \"\"\"\n Sends documents in bulk, -b/--bulksize at a time.\n Returns the number of documents indexed on exit.\n\n Note that this doesn't send a chunk when full, only\n when the next document arrives that can't fit.\n\n It's tedious behaviour that means we need separate logic\n to send documents singly.\n \"\"\"\n good = bad = 0\n actions = actions_generator(args.index)\n stream = streaming_bulk(\n instance, actions,\n chunk_size=args.bulk)\n\n for (ok, result) in stream:\n if ok:\n good += 1\n else:\n bad += 1\n print(\"Document not indexed:\", result)\n\n return (good, bad)\n\n\ndef actions_generator(index):\n \"\"\"\n Yields an action per line containing a JSON term.\n All non-JSON lines are printed.\n \"\"\"\n for term in read_terms():\n action = make_action(index, term)\n yield action\n\n\ndef make_action(index, term):\n \"\"\"\n Makes an action from the term.\n\n See also:\n http://elasticsearch-py.readthedocs.io/en/master/helpers.html#bulk-helpers\n \"\"\"\n action = {\n \"_op_type\": \"create\",\n \"_index\": index,\n \"_type\": term[\"tag\"],\n \"_id\": term[\"id\"],\n \"_source\": term\n }\n\n return action\n\n\ndef elastic_instance(args):\n \"\"\"\n Creates an elasticsearch instance using the args.\n \"\"\"\n es = Elasticsearch(args.url)\n return es\n\n\ndef delete_index(instance, index):\n \"\"\"\n Deletes the index on the instance and all documents in it.\n \"\"\"\n if instance.indices.exists(index):\n instance.indices.delete(index)\n print(\"Index {Index} deleted\".format(\n Index=index))\n else:\n print(\"No index {Index}\".format(\n Index=index))\n\n\ndef check_index(instance, index):\n \"\"\"\n Checks the index on the instance, creating it if not already present.\n \"\"\"\n if not instance.indices.exists(index):\n print(\"New index {Index}\".format(\n Index=index))\n create_index(instance, index)\n else:\n print(\"Existing index {Index}\".format(\n Index=index))\n\n\ndef create_index(instance, index):\n \"\"\"\n Creates the index with the SPARKL event mappings including\n timestamp and 'nested' type for cause and data lists.\n \"\"\"\n config = {\n \"settings\": {\n },\n \"mappings\": {\n \"_default_\": {\n \"dynamic_templates\": [\n {\n \"timestamp\": {\n \"match\": \"timestamp\",\n \"mapping\": {\n \"type\": \"date\"\n }\n }\n }\n ]\n }\n }\n }\n instance.indices.create(\n index, config,\n update_all_types=False)\n","sub_path":"sparkl_cli/cmd_elastic.py","file_name":"cmd_elastic.py","file_ext":"py","file_size_in_byte":5245,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"522656357","text":"\"\"\"\n给定一个循环数组(最后一个元素的下一个元素是数组的第一个元素),输出每个元素的下一个更大元素。数字 x 的下一个更大的元素是按数组遍历顺序,这个数字之后的第一个比它更大的数,这意味着你应该循环地搜索它的下一个更大的数。如果不存在,则输出 -1。\n\n示例 1:\n\n输入: [1,2,1]\n输出: [2,-1,2]\n解释: 第一个 1 的下一个更大的数是 2;\n数字 2 找不到下一个更大的数;\n第二个 1 的下一个最大的数需要循环搜索,结果也是 2。\n\"\"\"\nfrom typing import List\n\n\nclass Solution:\n def nextGreaterElements(self, nums: List[int]) -> List[int]:\n if not nums:\n return []\n\n length = len(nums)\n stack = []\n res = [-1] * length\n\n for index in range(length * 2):\n while stack and nums[stack[-1]] < nums[index % length]:\n res[stack[-1]] = nums[index % length]\n stack.pop()\n stack.append(index % length)\n return res\n","sub_path":"algorithm/LeetCode_503_下一个最大的元素||.py","file_name":"LeetCode_503_下一个最大的元素||.py","file_ext":"py","file_size_in_byte":1055,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"408663491","text":"import pandas as pd\nimport json, os, glob\nimport re, xlrd, csv\n\n\n\"\"\"Script to read a mapping document that follows a fixed template where first row contains following column structure:\n ColA: Diagnosis (non-standardized names)\n ColB: Patient Facing Terminology\n ColC: ICD-10 Group 4\n ColD: ICD-10 Group 5\n ColE: ICD-10 Group 6\n ColF: ICD-10 Group 7\n Col*: [flexible number of columns allowed]\n Col*: [flexible number of columns allowed]\n\"\"\"\n\ndef deleteFile(sourceDir,filename):\n \"\"\"Deletes the temporary JSON file after writing the correctly formated JSON.\n\n Args:\n sourceDir: directory where all the CSV files live\n filename: name of the temp JSON file that needs to be deleted\n Raises:\n Prints 'File does not exist!' if the filename provided couldn't be found in sourceDir \n\n \"\"\"\n \n os.chdir(sourceDir)\n if os.path.exists(filename):\n os.remove(filename)\n else:\n print(\"File does not exist!\")\n\ndef writeJSON(sourceDir,outputDir,name, jsonObj):\n \"\"\"Writes the correctly formatted JSON object into a file\n\n Args:\n sourceDir: directory where all the CSV files live\n outputDir: directory where JSON files would be written to\n name: name of the temp JSON file\n jsonObj: formatted JSON object that needs to be written\n \n \"\"\"\n os.chdir(os.path.dirname(os.path.realpath(__file__)))\n name = re.sub('\\_temp.json$', '', name)\n filename = os.path.join(outputDir,name + \".json\")\n with open(filename, \"w\") as f:\n json.dump(jsonObj,f)\n f.close()\n print(\"Output file ready inside '{}' for DB!\".format(filename))\n deleteFile(sourceDir, name+\"_temp.json\")\n\ndef expandICD(val,colE):\n \"\"\"To simplify manual efforts, ICD codes were concatenated in 3 different forms. This method expands those\n formatting to have each code separated by commas.\n\n Args:\n val: value entered in each excel cell\n colE: values in colE (ICD-10 Group 5) for a particular row\n\n Returns:\n List containing expanded version of codes in each cell.\n\n \"\"\"\n expandedList = []\n val = val.replace(\".\", \"\") # Step 1: remove periods\n children = re.findall(\".*?\\((.*?)\\)\", val) # Step 2: separate text before and after parenthesis\n roots = re.findall(r'(.*?)\\(.*?\\)', val)\n \n for i,v in enumerate(roots):\n v = v.replace(\",\",\"\").strip(\" \")\n if v != \"All\":\n if \",\" in children[i]: # CASE 1: M82(1,2,9)\n c = children[i].split(\",\")\n for k in c:\n expandedList.append(v+k)\n elif \"-\" in children[i]: # CASE 2: M82(1-9)\n c = children[i].split(\"-\")\n for k in range(int(c[0]),int(c[1])+1):\n expandedList.append(v+str(k))\n else: # CASE 3: All(A,D,E)\n for e in colE:\n c = children[i].split(\",\")\n for k in c:\n expandedList.append(str(e)+k)\n\n## elif \",\" in v: #comma separated values within a cell\n## val = val.split(\",\")\n## for i in val:\n## expandedList.append(i.strip())\n## else:\n## print(\"CAME INTO ELSE\")\n## expandedList.append(val)\n\n## print(\"Original = {}\".format(val))\n\n## print(\">>>>Final output = {}\".format(expandedList))\n return expandedList\n\ndef getICD(d, key, val, ICD_listId):\n \"\"\"Reads multiple columns and aggregates content into a list of list of ICD values for each row in excel\n\n Args:\n d: row id\n key: preprocessed IDs list (does NOT contain null values)\n val: preprocessed values list (including null values)\n ICD_listId: ICD index within the list of list predefined in 'readJSON' method below\n\n Returns:\n List of list of ICD values containing all the 4 groups [[grp_4],[grp_5],[grp_6],[grp_7]]\n\n \"\"\"\n\n## expandICD(\"M80.01,M80.02(1,2,9),M80.03(1,2,9),ALL(1,2,9),M80.06(1,2,9),M80.07(1,2,9),M80.08X\", 0)\n ICD = []\n for icd in range(0,len(key[ICD_listId])):\n if d in key[ICD_listId][icd]:\n if len(ICD) > 2:\n ICD.append(expandICD(val[ICD_listId][icd][d], ICD[2]))\n else:\n ICD.append(expandICD(val[ICD_listId][icd][d], 0))\n else:\n ICD.append([])\n return ICD\n \n\ndef getCF(d, key, val, CF_listId):\n \"\"\"Goes through multiple clinical focus columns and picks up relevant CF name for that row and returns a list\n\n Args:\n d: row id\n key: preprocessed IDs list (does NOT contain null values)\n val: preprocessed values list (including null values)\n CF_listId: CF index within the list of list predefined in 'readJSON' method below\n\n Returns:\n List of Clinical Focus names relevant for each row\n\n \"\"\"\n CF = []\n for cf in range(0,len(key[CF_listId])):\n if d in key[CF_listId][cf]:\n CF.append(val[CF_listId][cf][d])\n return CF\n \ndef getSS(d, key, val, SS_listId):\n \"\"\"Goes through multiple subspecialty columns and picks up relevant SS name for that row and returns a list\n\n Args:\n d: row id\n key: preprocessed IDs list (does NOT contain null values)\n val: preprocessed values list (including null values)\n CF_listId: CF index within the list of list predefined in 'readJSON' method below\n\n Returns:\n List of Subspecialties names within a dictionary for each row\n\n \"\"\"\n SS = []\n for ss in range(0,len(key[SS_listId])):\n SS_dict = {}\n if d in key[SS_listId][ss]:\n SS_dict[\"name\"] = val[SS_listId][ss][d]\n SS.append(SS_dict)\n return SS\n \n\ndef readJSON(jsonFile,cf_count,ss_count):\n \"\"\"Takes the temporary JSON file and breaks the content into 2 list of lists with 8 lists each - category, diagnosis, ICD, symptoms, test, treatment, clinicalFocus, subspecialty\n\n Args:\n jsonFile: temporary json filename that was spun out of the csv parsed\n cf_count: number of clinical focus columns\n ss_count: number of subspecialty columns \n\n Returns:\n Two separate list of lists.\n IdLists: contains all the index of the rows that HAS some value for each of the 8 categories (meaning, indexes with 'none' values are IGNORED)\n lists: contains whatever value existed for each of those 8 categories\n\n \"\"\"\n with open(jsonFile, encoding='utf-8') as f:\n data = json.loads(f.read())\n f.close()\n\n counter = len(data['Patient Facing Terminology'].keys())\n\n laymanDiagnosisList, laymanDiagnosisId = [], []\n diagnosisList, diagnosisId = [], []\n ICDList, ICDId = [], []\n clinicalFocusList, clinicalFocusId = [], []\n subspecialtyList, subspecialtyId = [], []\n\n \n lists = [diagnosisList, laymanDiagnosisList, ICDList, clinicalFocusList, subspecialtyList]\n IdLists = [diagnosisId, laymanDiagnosisId, ICDId, clinicalFocusId, subspecialtyId]\n\n cf_start = 7 ##TODO: if template changes, might have to move this value to param of this function\n i = 0\n for d in data.keys():\n if i < cf_start:\n if i == 9:\n pass\n elif i >= 2 and i <= 5:\n eachICD = []\n eachICD_id = []\n for c in range(counter):\n value = str(data[d][str(c)]).strip()\n eachICD.append(value)\n if value != \"None\":\n eachICD_id.append(c)\n ICDList.append(eachICD)\n ICDId.append(eachICD_id)\n else:\n for c in range(counter):\n value = data[d][str(c)]\n if i < 2:\n lists[i].append(str(value).strip().upper())\n if value != None:\n IdLists[i].append(c)\n else:\n lists[i-3].append(str(value).strip().upper())\n if value != None:\n IdLists[i-3].append(c)\n elif i >= cf_start and i < (cf_start + cf_count):\n eachCF = []\n eachCF_id = []\n for c in range(counter):\n value = str(data[d][str(c)]).strip().lower()\n eachCF.append(value.replace('x', d.strip().upper()))\n if value != \"none\":\n eachCF_id.append(c)\n clinicalFocusList.append(eachCF)\n clinicalFocusId.append(eachCF_id)\n else:\n eachSS = []\n eachSS_id = []\n for c in range(counter):\n value = str(data[d][str(c)]).strip().lower()\n eachSS.append(value.replace('x', d.strip().upper()))\n if value != \"none\":\n eachSS_id.append(c) \n subspecialtyList.append(eachSS)\n subspecialtyId.append(eachSS_id)\n i += 1\n\n return IdLists, lists\n\ndef convertMapping(sourceDir, outputDir, filename,cf_count,ss_count):\n \"\"\"Takes care of formating a JSON object by calling all of the methods defined above\n\n Args:\n sourceDir: directory where all the CSV files live\n outputDir: directory where you expect to find JSON files\n filename: name of the temp JSON file\n cf_count: number of clinical focus columns\n ss_count: number of subspecialty columns\n\n \"\"\"\n key, val = readJSON(filename,cf_count,ss_count)\n formattedJson = {}\n formattedJson[\"specialty\"] = []\n specialtyDict = {}\n specialtyDict[\"name\"] = (filename.split(\"_\")[0]).upper()\n specialtyDict[\"diagnosis\"] = []\n\n count = 0\n for i,v in enumerate(key[0]):\n diagnosisDict = {}\n diagnosisDict[\"name\"] = val[0][v]\n diagnosisDict[\"laymanDiagnosis\"] = val[1][v]\n## diagnosisDict[\"subspecialty\"] = getSS(v,key,val,7)\n## diagnosisDict[\"clinicalFocus\"] = getCF(v,key,val,6)\n diagnosisDict[\"ICD\"] = getICD(v,key,val,2)\n specialtyDict[\"diagnosis\"].append(diagnosisDict)\n test = getICD(v,key,val,2)\n formattedJson[\"specialty\"].append(specialtyDict)\n writeJSON(sourceDir, outputDir, filename, formattedJson)\n\ndef convertExcel2CSV(sourceDir):\n \"\"\"Takes all Excel Rules file and converts into corresponding CSV files.\n\n Args:\n sourceDir: directory where all the CSV files live\n\n \"\"\"\n counts = []\n files = [i for i in glob.glob('*.{}'.format('xlsx'))]\n\n for file in files:\n cf_count = 0\n ss_count = 0\n wb = xlrd.open_workbook(file)\n sh = wb.sheets()[0]\n col_names = sh.row_values(0, start_colx=0, end_colx=None)\n filename = re.sub('\\.xlsx$', '', file)+\".csv\"\n csvFile = open(filename, 'w', encoding='utf-8')\n wr = csv.writer(csvFile, quoting=csv.QUOTE_ALL)\n\n for rownum in range(sh.nrows):\n if rownum > 0:\n wr.writerow(sh.row_values(rownum))\n else:\n for i,val in enumerate(col_names):\n if col_names[i] == 'Clinical Focus':\n cf_count += 1\n for k in range(i+1,len(col_names)):\n if col_names[k] == \"\":\n cf_count += 1\n else:\n break\n elif \"Subspecialty Training\" in col_names[i]:\n ss_count += 1\n for k in range(i+1,len(col_names)):\n if col_names[k] == \"\":\n ss_count += 1\n counts.append([cf_count,ss_count])\n csvFile.close()\n## print((\"Step 1: Created {} file!\").format(filename))\n return counts\n\ndef convertExcelMappings2JSON(sourceDir, outputDir):\n \"\"\"Main method user needs to call to convert all Mapping Excel files from source directory into formatted JSON files in output directory\n\n Args:\n sourceDir: directory where all the Excel files live\n outputDir: directory where you expect to find JSON files \n\n \"\"\"\n os.chdir(sourceDir)\n counts = convertExcel2CSV(sourceDir)\n files = [i for i in glob.glob('*.{}'.format('csv'))]\n filenames = []\n i = 0\n for file in files:\n data = pd.read_csv(file)\n name = os.path.splitext(file)[0]\n temp = name + \"_temp.json\"\n f = open(temp, \"w\")\n f.write(data.to_json())\n f.close()\n## print((\"Step 2: Created {} file!\").format(temp))\n deleteFile(\"./\",file) #deleting CSV files\n print(\"....Reading '{}' with CF = {} & SS = {}\".format(temp,counts[i][0], counts[i][1]))\n convertMapping(sourceDir, outputDir, temp, counts[i][0], counts[i][1])\n i += 1\n \nsourceDir = \"source/mappings/\"\noutputDir = \"output/mappings/\" \nconvertExcelMappings2JSON(sourceDir,outputDir)\n","sub_path":"ClinicalDocs_parser/archive/loadMappings.py","file_name":"loadMappings.py","file_ext":"py","file_size_in_byte":12993,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"246199386","text":"from nltk.corpus import stopwords\nfrom nltk.cluster.util import cosine_distance\nimport numpy as np\nimport networkx as nx\n \ndef read_test_file(file_name):\n article = file_name.split(\". \")\n sentences = []\n\n for sentence in article:\n print(sentence)\n sentences.append(sentence.replace(\"[^a-zA-Z]\", \" \").split(\" \"))\n sentences.pop() \n \n return sentences\n\ndef sentence_similar(sent1, sent2, stopwords=None):\n if stopwords is None:\n stopwords = []\n \n sent1 = [w.lower() for w in sent1]\n sent2 = [w.lower() for w in sent2]\n \n all_words = list(set(sent1 + sent2))\n \n vector1 = [0] * len(all_words)\n vector2 = [0] * len(all_words)\n \n #first sentence vector build\n for w in sent1:\n if w in stopwords:\n continue\n vector1[all_words.index(w)] += 1\n \n #second sentence vector build\n for w in sent2:\n if w in stopwords:\n continue\n vector2[all_words.index(w)] += 1\n \n return 1 - cosine_distance(vector1, vector2)\n \ndef build_similarity_matrix(sentences, stop_words):\n # Create an empty similarity matrix\n similarity_matrix = np.zeros((len(sentences), len(sentences)))\n \n for idx1 in range(len(sentences)):\n for idx2 in range(len(sentences)):\n if idx1 == idx2: #ignore if both are same sentences\n continue \n similarity_matrix[idx1][idx2] = sentence_similar(sentences[idx1], sentences[idx2], stop_words)\n\n return similarity_matrix\n\n\ndef generate_summary(file_name, top_n=5):\n stop_words = stopwords.words('english')\n summarize_text = []\n\n #Read text anc split it\n sentences = read_test_file(file_name)\n\n #Generate Similary Martix across sentences\n sentence_similarity_martix = build_similarity_matrix(sentences, stop_words)\n\n #Rank sentences in similarity martix\n sentence_similarity_graph = nx.from_numpy_array(sentence_similarity_martix)\n scores = nx.pagerank(sentence_similarity_graph)\n\n #Sort the rank and pick top sentences\n ranked_sentence = sorted(((scores[i],s) for i,s in enumerate(sentences)), reverse=True) \n \n print(\"Indexes of top ranked_sentence order are \", ranked_sentence) \n\n for i in range(top_n):\n summarize_text.append(\" \".join(ranked_sentence[i][1]))\n\n #output the summarize texr\n print(\"Summarize Text: \\n\", \". \".join(summarize_text))\n\n\ntest = 'Indegene Inc. is a company offering research and development and management services to healthcare and pharmaceutical enterprises. It was founded in 1998 and is based in Bangalore, India. Indegene helps global healthcare organizations address complex challenges by seamlessly integrating analytics, technology, operations and medical expertise. Indegene helps clients drive outcomes, revenue and productivity improvements by making giant leaps in digital transformation, customer engagement, health reform, healthcare cost reduction, and health outcomes improvement. Indegene has a global footprint with offices in North America, Europe, China and India.'\n\ngenerate_summary( test, 2)\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3070,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"329163419","text":"from conans import ConanFile, CMake, tools\nimport os\nimport shutil\n\nclass Hdf5Conan(ConanFile):\n name = \"hdf5\"\n version = \"1.10.5\"\n license = \"https://support.hdfgroup.org/ftp/HDF5/releases/COPYING\"\n author = \"KudzuRunner\"\n url = \"https://github.com/kudzurunner/conan-hdf5\"\n description = \"HDF5 C and C++ libraries\"\n settings = \"os\", \"compiler\", \"build_type\", \"arch\"\n options = {\"shared\": [True, False]}\n default_options = \"shared=True\"\n generators = \"cmake\"\n requires = \"zlib/1.2.11\"\n\n build_name = \"build\"\n\n def source(self):\n git = tools.Git(folder=self.name)\n git.clone(\"https://bitbucket.hdfgroup.org/scm/hdffv/hdf5.git\", \"hdf5-{}\".format(self.version.replace(\".\", \"_\")))\n\n tools.replace_in_file(\"{}/CMakeLists.txt\".format(self.name), \"project (HDF5 C)\",\n '''project (HDF5 C)\ninclude(${CMAKE_BINARY_DIR}/conanbuildinfo.cmake)\nconan_basic_setup()''')\n\n def configure(self):\n self.options[\"zlib\"].shared = self.options.shared\n\n def build(self):\n os.mkdir(self.build_name)\n shutil.move(\"conanbuildinfo.cmake\", self.build_name)\n cmake = CMake(self)\n cmake.definitions[\"HDF5_BUILD_EXAMPLES\"] = \"OFF\"\n cmake.definitions[\"HDF5_BUILD_TOOLS\"] = \"OFF\"\n cmake.definitions[\"HDF5_BUILD_HL_LIB\"] = \"OFF\"\n cmake.definitions[\"HDF5_BUILD_CPP_LIB\"] = \"ON\"\n cmake.definitions[\"HDF5_ENABLE_Z_LIB_SUPPORT\"] = \"ON\"\n cmake.definitions[\"CMAKE_INSTALL_PREFIX\"] = self.package_folder\n if self.settings.compiler == \"Visual Studio\":\n cmake.definitions[\"CMAKE_INSTALL_SYSTEM_RUNTIME_LIBS_SKIP\"] = True\n cmake.configure(source_folder=self.name, build_folder=self.build_name)\n cmake.build()\n cmake.install()\n\n def package(self):\n self.copy(\"COPYING\", src=self.name, keep_path=False)\n\n def package_info(self):\n debug_suffix = (\"_D\" if self.settings.build_type==\"Debug\" else \"\")\n if self.options.shared:\n self.cpp_info.libs = [\"hdf5\" + debug_suffix]\n else:\n self.cpp_info.libs = [\"libhdf5\" + debug_suffix]\n if tools.os_info.is_windows and self.options.shared:\n self.cpp_info.defines = [\"H5_BUILT_AS_DYNAMIC_LIB\"]","sub_path":"conanfile.py","file_name":"conanfile.py","file_ext":"py","file_size_in_byte":2262,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"431293851","text":"from ml.Config import Config\r\nfrom ml.Storage import Storage\r\nimport string\r\nconfig = Config()\r\n\r\nconfig.ml.site = 'tubman'\r\nconfig.ml.name = 'Tubman'\r\nconfig.ml.cookie_life = 7 # in number of days\r\n\r\nconfig.ml.valid_username_chars = string.letters + string.digits + \"-_\"\r\nconfig.ml.min_username_len = 3\r\nconfig.ml.min_password_len = 8\r\n\r\nconfig.ml.sandstone_mac_url = 'http://sandstone.muzzylane.com/Sandstone-Player/SandstonePlayerSetup-Mac-%s.mpkg'\r\nconfig.ml.sandstone_win32_url = 'http://sandstone.muzzylane.com/Sandstone-Player/SandstonePlayerSetup-Win32-%s.exe'\r\n\r\nconfig.ml.jquery = \"http://code.jquery.com/jquery-1.8.3.min.js\"\r\n\r\nconfig.ml.static_file_path = \"/srv/tubman/static\"\r\nconfig.platform.static_file_path = \"/srv/tubman/onyx/static\"\r\n\r\nconfig.ml.account_code_required = False\r\nconfig.ml.class_token_bases = []\r\nconfig.ml.play_prologue_default = False\r\n\r\nconfig.ml.not_logged_in_redirect = '/login'\r\nconfig.db.host = '10.25.198.134'\r\nconfig.db.database = 'tubman'\r\nconfig.db.username = 'tubman'\r\nconfig.db.password = ''\r\n\r\nconfig.logdb.host = '10.25.198.134'\r\nconfig.logdb.database = 'tubman-log'\r\nconfig.logdb.username = 'tubman-log'\r\nconfig.logdb.password = ''\r\n\r\nconfig.ml.http = 'http://tubman-stage.muzzylane.com'\r\nconfig.ml.domain = 'tubman-stage.muzzylane.com'\r\nconfig.ml.port = '80'\r\n\r\nconfig.ml.api_game_timeout = 300 # Number of seconds before the play session times out\r\nconfig.ml.reset_to_first_playable = True\r\n\r\nconfig.ml.api_service_hostname = 'test-api.muzzylane.com'\r\nconfig.ml.api_service_url = 'http://test-api.muzzylane.com'\r\nconfig.ml.api_service_port = 80\r\n\r\nconfig.locale = 'en_US'\r\n\r\n\r\nconfig.email.allow = True\r\nconfig.email.host = 'smtp.gmail.com'\r\nconfig.email.port = 587\r\nconfig.email.login = 'automate@muzzylane.com'\r\nconfig.email.password = 'G4P7ays'\r\nconfig.email.replyto = 'info@muzzylane.com'\r\nconfig.email.templates_dir = ['/srv/site/onyx/views/emails/']\r\n\r\nconfig.ml.key = 'abekey'\r\nconfig.ml.api_key = '8brAdmHjf2AscLqD'\r\nconfig.ml.sandstone_api_key = 'LLZKXe4J0p0Ls'\r\n\r\nconfig.log.access_levels = ['LOG', 'ACCESS', 'INFO', 'XML'] # XML should be removed from production\r\nconfig.log.error_levels = ['WARN', 'ERROR']\r\nconfig.log.debug_levels = ['DEBUG'] # Should be empty for deployment\r\nconfig.log.access_file = 'access.log'\r\nconfig.log.error_file = 'error.log'\r\nconfig.log.dir = '/srv/muzzylane/log/'\r\n\r\nconfig.roles.standard\r\nconfig.roles.standard.name = 'standard'\r\nconfig.roles.standard.display_name = 'Standard'\r\nconfig.roles.standard.is_standard = True\r\nconfig.roles.standard.can_play_games = True\r\nconfig.roles.standard.can_use_admin_tools = False\r\nconfig.roles.standard.logged_in = True\r\n\r\nconfig.roles.admin\r\nconfig.roles.admin.name = 'admin'\r\nconfig.roles.admin.display_name = 'Admin'\r\nconfig.roles.admin.is_admin = True\r\nconfig.roles.admin.can_user_admin_tools = True\r\nconfig.roles.admin.can_play_games = True\r\nconfig.roles.admin.can_use_admin_tools = False\r\nconfig.roles.admin.logged_in = True\r\n\r\nconfig.roles.teacher\r\nconfig.roles.teacher.name = 'teacher'\r\nconfig.roles.teacher.display_name = 'Teacher'\r\nconfig.roles.teacher.is_teacher = True\r\nconfig.roles.teacher.can_play_games = True\r\nconfig.roles.teacher.can_use_admin_tools = False\r\nconfig.roles.teacher.logged_in = True\r\n\r\nconfig.roles.student.name = 'student'\r\nconfig.roles.student.display_name = 'Student'\r\nconfig.roles.student.is_student = True\r\nconfig.roles.student.can_play_games = True\r\nconfig.roles.student.can_use_admin_tools = False\r\nconfig.roles.student.logged_in = True\r\n\r\nconfig.roles.demo.name = 'demo'\r\nconfig.roles.demo.display_name = 'Demo'\r\nconfig.roles.demo.is_demo = True\r\nconfig.roles.demo.can_play_games = True\r\nconfig.roles.demo.can_use_admin_tools = False\r\nconfig.roles.demo.logged_in = True","sub_path":"web/config/test/tubman-stage.py","file_name":"tubman-stage.py","file_ext":"py","file_size_in_byte":3738,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"392449452","text":"from constants import *\nfrom utils import *\nfrom core import *\nimport heapq\nimport pdb\nimport copy\nfrom functools import reduce\n\nfrom statesactions import *\n\n############################\n## HELPERS\n\n### Return true if the given state object is a goal. Goal is a State object too.\ndef is_goal(state, goal):\n return len(goal.propositions.difference(state.propositions)) == 0\n\n### Return true if the given state is in a set of states.\ndef state_in_set(state, set_of_states):\n for s in set_of_states:\n if s.propositions == state.propositions:\n return True\n return False\n\n### For debugging, print each state in a list of states\ndef print_states(states):\n for s in states:\n ca = None\n if s.causing_action is not None:\n ca = s.causing_action.name\n print(s.id, s.propositions, ca, s.get_g(), s.get_h(), s.get_f())\n\n\n############################\n### Planner \n###\n### The planner knows how to generate a plan using a-star and heuristic search planning.\n### It also knows how to execute plans in a continuous, time environment.\n\nclass Planner():\n\n def __init__(self):\n self.running = False # is the planner running?\n self.world = None # pointer back to the world\n self.the_plan = [] # the plan (when generated)\n self.initial_state = None # Initial state (State object)\n self.goal_state = None # Goal state (State object)\n self.actions = [] # list of actions (Action objects)\n\n ### Start running\n def start(self):\n self.running = True\n \n ### Stop running\n def stop(self):\n self.running = False\n\n ### Called every tick. Executes the plan if there is one\n def update(self, delta = 0):\n result = False # default return value\n if self.running and len(self.the_plan) > 0:\n # I have a plan, so execute the first action in the plan\n self.the_plan[0].agent = self\n result = self.the_plan[0].execute(delta)\n if result == False:\n # action failed\n print(\"AGENT FAILED\")\n self.the_plan = []\n elif result == True:\n # action succeeded\n done_action = self.the_plan.pop(0)\n print(\"ACTION\", done_action.name, \"SUCCEEDED\")\n done_action.reset()\n # If the result is None, the action is still executing\n return result\n\n ### Call back from Action class. Pass through to world\n def check_preconditions(self, preconds):\n if self.world is not None:\n return self.world.check_preconditions(preconds)\n return False\n\n ### Call back from Action class. Pass through to world\n def get_x_y_for_label(self, label):\n if self.world is not None:\n return self.world.get_x_y_for_label(label)\n return None\n\n ### Call back from Action class. Pass through to world\n def trigger(self, action):\n if self.world is not None:\n return self.world.trigger(action)\n return False\n\n ### Generate a plan. Init and goal are State objects. Actions is a list of Action objects\n ### Return the plan and the closed list\n def astar(self, init, goal, actions):\n plan = [] # the final plan\n open = [] # the open list (priority queue) holding State objects\n closed = [] # the closed list (already visited states). Holds state objects\n ### YOUR CODE GOES HERE\n queue = []\n cameFrom = {}\n counter = 0\n g = {init: 0}\n h = self.compute_heuristic(init, goal, actions)\n if (is_goal(init,goal)):\n return plan, closed\n\n queue.append((h, counter, init))\n curr = init\n while len(queue) != 0 and (is_goal(curr, goal) == False):\n if curr not in closed:\n closed.append(curr)\n curr = heapq.heappop(queue)[2]\n neighbors = self.getNeighbors(curr, actions, closed)\n for neighbor in neighbors:\n if neighbor not in closed:\n tentativeScore = g[curr] + self.compute_heuristic(curr, neighbor, actions)\n if neighbor not in g or g[neighbor] > tentativeScore:\n counter = counter + 1\n cameFrom[neighbor] = curr\n g[neighbor] = tentativeScore\n h = self.compute_heuristic(neighbor, goal, actions) \n f = h + g[neighbor]\n heapq.heappush(queue, (f, counter, neighbor))\n plan = self.reconstructPath(cameFrom, curr, init)\n ### CODE ABOVE\n return plan, closed\n\n def reconstructPath(self, cameFrom, curr, init):\n path = []\n path.append(curr.causing_action)\n while cameFrom.get(curr) != None and cameFrom.get(curr) != init:\n curr = cameFrom.get(curr)\n path = list(path)\n path.append(curr.causing_action)\n path.reverse()\n return path\n\n def getNeighbors(self, curr, actions, closed):\n # neighbors = [action[1] for action in actions if action[0].preconditions == curr.add_list] + \\\n # [action[0] for action in actions if action[1].preconditions == curr.add_list]\n neighbors = []\n for action in actions:\n if curr.propositions.issuperset(action.preconditions):\n #if all the preconditions of the action are in the set of current state propositions\n neighbor = copy.deepcopy(curr)\n if neighbor not in closed:\n neighbor.causing_action = action\n neighbor.propositions = (neighbor.propositions).union(action.add_list)\n neighbor.propositions = (neighbor.propositions).difference(action.delete_list)\n neighbors.append(neighbor)\n return neighbors\n #go look at the door thing gotta delete shit too and add list before adding to neighbors\n\n ### Compute the heuristic value of the current state using the HSP technique.\n ### Current_state and goal_state are State objects.\n def compute_heuristic(self, current_state, goal_state, actions):\n actions = copy.deepcopy(actions) # Make a deep copy just in case\n h = 0 # heuristic value to return\n ### YOUR CODE BELOW\n dummyStart = Action(name = \"dummyStart\", preconditions = {}, add_list = current_state.propositions, delete_list = {}, cost = 0)\n dummyGoal = Action(name = \"dummyGoal\", preconditions = goal_state.propositions, add_list = {}, delete_list = {})\n nodes = [] #Probably a list of Actions (including dummy Actions\n edges = [] #Probably an edge is a tuple (a1, prop, a2) where a1 and a2 could be pointers to Actions or Action.id\n actions.append(dummyStart)\n actions.append(dummyGoal)\n for action in actions:\n nodes.append(action)\n for i in nodes:\n for j in nodes:\n currEdges = (i.add_list).intersection(j.preconditions) #new set with elements common to action1's add list and action 2's preconds\n for edge in currEdges:\n #edge is precond name so edges is in the form (action1, precond, action2)\n edges.append((i, edge, j))\n #now i should have all the edges for my grpah i think\n #name, preconditions, add_list, delete_list, cost = 1\n q = []\n visited = []\n currentPropositions = set()\n from collections import defaultdict\n costs = defaultdict(list)\n costs = {}\n q.append(dummyStart) #append dummy node to the queue\n # visited.append(dummyStart) #mark as visited\n while len(q)!= 0:\n currAction = q.pop()\n currentPropositions = currentPropositions.union(currAction.add_list) #add dummy node's add list to the list of props\n if currAction in visited:\n break\n visited.append(currAction)\n if currAction == dummyGoal:\n return costs[currAction]\n for action in actions:\n if currentPropositions.issuperset(action.preconditions):\n if action not in visited:\n q.append(action)\n current_value = 0\n for edge in edges:\n if (edge[2] == currAction):\n #if this is incoming edge I want to track cost\n current_value = max(current_value, costs[edge[2]])\n costs[currAction] = current_value\n for edge in edges:\n if (edge[0] == currAction):\n #fixing the cost of all outgoing edges from the current action\n costs[edge[2]] = costs[edge[0]] + currAction.cost\n #last currAction must be goal.. \n h = costs[currAction]\n ### YOUR CODE ABOVE\n return h\n\n","sub_path":"GameAI/planning/planner.py","file_name":"planner.py","file_ext":"py","file_size_in_byte":8169,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"173415347","text":"from django.contrib import messages\nfrom django.shortcuts import render, redirect\nfrom django.http import HttpResponse\nfrom django.core.exceptions import ObjectDoesNotExist\nfrom django.core.urlresolvers import reverse\nfrom django.core.mail import send_mail\nimport random\n\nfrom .forms import UserForm, LoginForm, VerificationForm\nfrom .models import User, RecentUser, VerificationNumber\n\n# Create your views here.\n\ndef login(request):\n\tif request.method == 'POST':\n\t\tform = LoginForm(request.POST)\n\t\tif form.is_valid():\n\t\t\tInfo = form.cleaned_data\n\n\t\t\ttry:\n\t\t\t\tUser.objects.get(POVIS_ID = Info['POVIS_ID'])\n\t\t\texcept ObjectDoesNotExist:\n\t\t\t\terror = 'ID'\n\t\t\t\treturn render(request, 'login/login.html', {'form' : form, 'error' : error})\n\t\t\telse:\n\t\t\t\tuser = User.objects.get(POVIS_ID = Info['POVIS_ID'])\n\t\t\t\tif user.Password == Info['Password']:\n\t\t\t\t\treturn render(request, 'login/login.html', {'form' : form})\t\n\t\t\t\telse:\n\t\t\t\t\terror = 'Password'\n\t\t\t\t\treturn render(request, 'login/login.html', {'form' : form, 'error' : error})\n\t\telse:\n\t\t\terror = 'Blank'\n\t\t\treturn render(request, 'login/login.html', {'form' : form, 'error' : error})\n\telse:\n\t\tform = LoginForm()\n\n\treturn render(request, 'login/login.html', {'form' : form})\n\ndef registeration(request):\n\tif request.method == 'POST':\n\t\tform = UserForm(request.POST)\n\t\tif form.is_valid():\n\t\t\tuser = form.save()\n\t\t\tif User.objects.filter(POVIS_ID = user.POVIS_ID).count() != 1:\n\t\t\t\terror = 'ID'\n\t\t\t\tuser.delete()\n\t\t\t\treturn render(request, 'login/register.html', {'form' : form, 'error' : error})\n\t\t\tif user.Password != user.Password_Confirm:\n\t\t\t\terror = 'Password'\n\t\t\t\tuser.delete()\n\t\t\t\treturn render(request, 'login/register.html', {'form' : form, 'error' : error})\n\t\t\tuser.save()\n\t\t\trecent = RecentUser(POVIS_ID = user.POVIS_ID)\n\t\t\trecent.save()\n\t\t\t#messages.add_message(request, 25, 'S')\n\t\t\treturn redirect(verification)\n\telse:\n\t\tform = UserForm()\n\n\treturn render(request, 'login/register.html', {'form' : form})\n\ndef verification(request):\n\ttry:\n\t\trecentUser = RecentUser.objects.get()\n\texcept:\n\t\terror = '?'\n\telse:\n\t\tsirial_number = 23232323\n\t\tsend_mail('신개념 열람석 자리관리 시스템 BlueRocks의 가입 인증 메일입니다!',\n\t\t\t'신개념 열람석 관리 시스템 BlueRocks에 가입해주셔서 감사합니다. 저희는 현재 포스텍 구성원을 대상으로 운영되고 있으며 POVIS 메일을 사용하여 인증을 진행하고 있습니다. 앞으로 발전하는 BlueRocks가 되겠습니다. 인증번호는 {}입니다. 감사합니다.'.format(sirial_number),\n\t\t\t'bluerocks@gmail.com', [recentUser.POVIS_ID+'@postech.ac.kr'],\n\t\t\tfail_silently=False)\n\n\t\t\n\n\t\tif request.method == 'POST':\n\t\t\tform = VerificationForm(request.POST)\n\t\t\tSirial_Number = VerificationNumber(Sirial_Number = str(sirial_number))\n\t\t\tif form.is_valid():\n\t\t\t\tinfo = form.cleaned_data\n\t\t\t\tif Sirial_Number.Sirial_Number == info['Sirial_Number']:\n\t\t\t\t\trecentUser.delete()\n\t\t\t\t\tmessages.add_message(request, 25, 'S')\n\t\t\t\t\treturn redirect(login)\n\t\t\t\telse:\n\t\t\t\t\terror = 'SNE'\n\t\t\t\t\tUser.objects.get(POVIS_ID=recentUser.POVIS_ID).delete()\n\t\t\t\t\trecentUser.delete()\n\t\t\t\t\tmessages.add_message(request, 40, 'F')\n\t\t\t\t\treturn redirect(login)\n\t\telse:\n\t\t\tform = VerificationForm()\n\t\treturn render(request, 'login/verification.html', {'form': form})\n","sub_path":"SeatManage/login/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":3292,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"616638248","text":"#!/usr/bin/env python3\n# mymap.py - memory mapped IO\nimport sys\nfrom os import *\nfrom mmap import *\n\nif (len(sys.argv) != 2):\n raise SystemExit(\"Usage: %s file\" %sys.argv[0])\n\nfile = sys.argv[1]\nflen = path.getsize(file) # get file length\nfd = open(file, O_RDWR)\n\nbuf = mmap(fd, flen, MAP_SHARED, PROT_READ | PROT_WRITE) \nprint(buf[:]) # show all lines\nbuf[:] = buf[:].upper() # convert to uppercase\nbuf.close() # unmap memory\nclose(fd) # close file\n\n###############################################\n#\n# $ mymap.py numlines\n# this is line one.\n# this is line two.\n# this is line three.\n# \n# $ cat numlines\n# THIS IS LINE ONE.\n# THIS IS LINE TWO.\n# THIS IS LINE THREE.\n#\n","sub_path":"py3/pgms/sec2/mymap.py","file_name":"mymap.py","file_ext":"py","file_size_in_byte":766,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"217842627","text":"# -*- coding: utf-8 -*-\n# @Time : 2019/7/22 15:58\n# @Author : Ye Jinyu__jimmy\n# @File : RF_forecast.py\n\nimport pandas as pd\nimport cx_Oracle\nimport os\nimport numpy as np\n# 显示所有列\npd.set_option('display.max_columns', None)\n# 显示所有行\npd.set_option('display.max_rows', 500)\n# 设置value的显示长度为100,默认为50\npd.set_option('max_colwidth', 100)\n# 注:设置环境编码方式,可解决读取数据库乱码问题\nos.environ['NLS_LANG'] = 'SIMPLIFIED CHINESE_CHINA.UTF8'\nfrom matplotlib import pyplot as plt\n\n#parser是根据字符串解析成datetime,字符串可以很随意,可以用时间日期的英文单词,\n# 可以用横线、逗号、空格等做分隔符。没指定时间默认是0点,没指定日期默认是今天,没指定年份默认是今年。\n# from pylab import *\nplt.switch_backend('agg')\nfrom pandas.plotting import register_matplotlib_converters\nregister_matplotlib_converters()\n#如下是支持中文数字\n# mpl.rcParams['font.sans-serif'] = ['SimHei']\n#读取得到数据\nfrom sklearn.ensemble import RandomForestRegressor\nfrom tqdm import *\nimport itertools\nimport datetime\nfrom sklearn.neighbors import KNeighborsRegressor\nimport warnings\nimport time\n\n# import chinese_calendar as calendar #\nwarnings.filterwarnings(\"ignore\")\nimport get_holiday_inf_2019\nimport get_holiday_inf_2018\n\nholiday_2019 = get_holiday_inf_2019.main()\nholiday_2019 = holiday_2019[1].fillna(0).reindex()\nholiday_2018 = get_holiday_inf_2018.main()\nholiday_2018 = holiday_2018[1].fillna(0).reindex()\nholiday_2019['Account_date'] = pd.to_datetime(holiday_2019['Account_date'], format='%Y-%m-%d', errors='ignore')\nholiday_2018['Account_date'] = pd.to_datetime(holiday_2018['Account_date'], format='%Y-%m-%d', errors='ignore')\nweather_data = pd.read_csv('D:\\jimmy-ye\\AI_supply_chain\\data\\weather_feature.csv',encoding='utf_8_sig')\nweather_data = weather_data.rename(index=str,columns ={'weather_date':'Account_date'})\n\n#<---------------------------------------------------------------------------------->确认外部参数的计算日期\ndef date_parameter_read(end_date):\n # date_parameter = sys.argv[1]\n # date_parameter_intercept = date_parameter[0:8]\n end = pd.to_datetime(datetime. datetime.strptime(end_date,'%Y%m%d'))\n return end\n\n\n#--------------------------------------------------------------------------------->对日期进行转化返回string前一天的日期\ndef date_convert(end):\n datetime_forma= datetime.datetime.strptime(end, \"%Y%m%d\")\n yesterday = datetime_forma - datetime.timedelta(days=1)\n yesterday = yesterday.strftime(\"%Y%m%d\")\n return yesterday\n\n\n#-------------------------------------------------------------------------->函数读取近两年总销量前50名的SKU的资源id\ndef read_oracle_data(start_date,end_date,i):\n host = \"192.168.1.11\" # 数据库ip\n port = \"1521\" # 端口\n sid = \"hdapp\" # 数据库名称\n parameters = cx_Oracle.makedsn(host, port, sid)\n #读取的数据包括销量的时间序列,天气和活动信息\n # hd40是数据用户名,xfsg0515pos是登录密码(默认用户名和密码)\n conn = cx_Oracle.connect(\"hd40\", \"xfsg0515pos\", parameters)\n #查看详细的出库数据,进行了日期的筛选,查看销量签50名的SKU\n stkout_detail_sql = \"\"\" SELECT sum(b.qty),b.GDGID\n FROM stkout ss ,stkoutdtl b, stkoutlog c,store s ,goods g,warehouseh wrh\n where ss.num = b.num and ss.num =c.num and b.gdgid=g.gid and ss.sender =s.gid\n and ss.cls='统配出' and ss.cls=c.cls and ss.cls=b.cls and ss.wrh = wrh.gid\n and c.stat IN ('700','720','740','320','340')\n AND wrh.NAME LIKE'%%商品仓%%' \n AND ss.SENDER= %s\n and c.time>= to_date('2019-03-01','yyyy-mm-dd')\n and c.time < to_date('%s','yyyy-mm-dd')\n GROUP BY b.GDGID order by sum(b.QTY) DESC\"\"\" %(i,end_date)\n GDGID_sales = pd.read_sql(stkout_detail_sql, conn)\n #将SKU的的iD转成list,并保存前50个,再返回值\n conn.close\n sku_id = GDGID_sales['GDGID'].tolist()\n # sku_id = sku_id[0:10]\n return sku_id\n\n\n#------------------------------------------------------------------>根据SKU 的id来获取每个SKU的具体的销售明细数据\ndef get_detail_sales_data(sku_id,start_date,end_date,DC_CODE):\n host = \"192.168.1.11\" # 数据库ip\n port = \"1521\" # 端口\n sid = \"hdapp\" # 数据库名称\n dsn = cx_Oracle.makedsn(host, port, sid)\n\n # hd40是数据用户名,xfsg0515pos是登录密码(默认用户名和密码)\n conn = cx_Oracle.connect(\"hd40\", \"xfsg0515pos\", dsn)\n # 查看出货详细单的数据\n stkout_detail_sql = \"\"\"SELECT ss.sender,s.name Dc_name \n ,wrh.GID as WRH,wrh.NAME warehouse_name\n ,ss.num ,b.gdgid,G.NAME sku_name\n ,trunc(c.time) OCRDATE,b.CRTOTAL,b.munit,b.qty,b.QTYSTR\n ,b.TOTAL,b.price,b.qpc,b.RTOTAL\n FROM stkout ss ,stkoutdtl b, stkoutlog c,store s ,goods g,warehouseh wrh\n where ss.num = b.num and ss.num =c.num \n and b.gdgid=g.gid and ss.sender =s.gid\n and ss.cls='统配出' and ss.cls=c.cls and ss.cls=b.cls and ss.wrh = wrh.gid\n and c.stat IN ('700','720','740','320','340')\n and c.time>= to_date('%s','yyyy-mm-dd')\n and c.time < to_date('%s','yyyy-mm-dd')\n and b.GDGID = %s \n AND wrh.NAME LIKE'%%商品仓%%' AND ss.SENDER= %s\"\"\" % \\\n (start_date,end_date,sku_id,DC_CODE)\n stkout_detail = pd.read_sql(stkout_detail_sql, conn)\n conn.close\n return stkout_detail\n\n#---------------------------------------------------------------------------->按照不同的\n# def diff_DC(n):\n# host = \"192.168.1.11\" # 数据库ip\n# port = \"1521\" # 端口\n# sid = \"hdapp\" # 数据库名称\n# dsn = cx_Oracle.makedsn(host, port, sid)\n#\n# # hd40是数据用户名,xfsg0515pos是登录密码(默认用户名和密码)\n# conn = cx_Oracle.connect(\"hd40\", \"xfsg0515pos\", dsn)\n# # 查看出货详细单的数据\n# DC = \"\"\"select s.SENDER,COUNT(s.SENDER) from STKOUT s INNER JOIN STORE s1 ON s.sender = s1.gid\n# INNER JOIN(SELECT * FROM WAREHOUSE w WHERE w.NAME LIKE'%%商品仓%%' )w\n# ON w.STOREGID = s1.gid\n# INNER JOIN STORE s2 ON s.CLIENT = s2.gid\n# WHERE bitand(s1.property,32)=32\n# AND bitand(s2.property,32)<>32\n# AND substr(s2.AREA,2,3)<'8000'\n# AND s.CLS='统配出'\n# GROUP BY s.SENDER order by count(s.SENDER) DESC\"\"\"\n# DC_detail = pd.read_sql(DC, conn)\n# conn.close\n# DC_detail = DC_detail['SENDER'].tolist()\n# DC_detail = DC_detail[0:n]\n# return DC_detail\n\n\n\n#临时增加,只计算杭州配送中心的配送数据\ndef diff_DC(n):\n DC_detail=[1000255]\n return DC_detail\n\n#--------------------------------------------------------------------------------->日的标准化转化\ndef date_normalize(data_frame):\n data_frame_sort = data_frame.sort_values(by = ['OCRDATE'],ascending=False )\n data_frame_sort['OCRDATE'] = pd.to_datetime(data_frame_sort['OCRDATE']).dt.normalize()\n return data_frame_sort\n\n#<------------------------------------------------------------------------->在列表阶段对日期转化为标准的格式\ndef date_transform(data):\n data = data.sort_values([\"Account_date\"], ascending=1)\n data[\"Account_date\"]= pd.to_datetime(data[\"Account_date\"].apply(lambda x : x.strftime(\"%Y-%m-%d\")))\n return data\n\n#------------------------------------------------------------------------------->以日期作为分组内容查看每天每个SKU的具体的销量\ndef data_group(data):\n #这里的毛利是门店卖出的总金额与仓库进货的总金额的差值比\n data['GROSS_PROFIT_RATE'] = (data['RTOTAL'] - data['TOTAL']) / data['TOTAL']\n #计算仓库销售的正确单价\n data['PRICE'] = data['PRICE']/ data['QTY']\n #以下是用来保存分组后的数据\n sales_data = pd.DataFrame(columns = [\"Account_date\",\"Sku_id\",'Dc_name',\"Sales_qty\",\"Price\",'Gross_profit_rate','Dc_code',\n 'Wrh','Warehouse_name','Sku_name','Munit'])\n sales_data[\"Sales_qty\"]=data.groupby([\"OCRDATE\"],as_index = False).sum()[\"QTY\"]\n sales_data[\"Price\"] = data.groupby([\"OCRDATE\"],as_index = False).mean()[\"PRICE\"]\n sales_data[\"Gross_profit_rate\"] = data.groupby([\"OCRDATE\"],as_index = False).mean()[\"GROSS_PROFIT_RATE\"]\n sales_data[\"Account_date\"]= data.groupby(['OCRDATE']).sum().index\n sales_data[\"Sku_id\"] = [data[\"GDGID\"].iloc[0]]*len(sales_data[\"Sales_qty\"])\n sales_data[\"Dc_name\"] = [data[\"DC_NAME\"].iloc[0]] * len(sales_data[\"Sku_id\"])\n sales_data[\"Dc_code\"] = [data[\"SENDER\"].iloc[0]] * len(sales_data[\"Sku_id\"])\n sales_data[\"Munit\"] = [data[\"MUNIT\"].iloc[0]] * len(sales_data[\"Sales_qty\"])\n sales_data[\"Wrh\"] = [data[\"WRH\"].iloc[0]] * len(sales_data[\"Sales_qty\"])\n sales_data[\"Warehouse_name\"] = [data[\"WAREHOUSE_NAME\"].iloc[0]] * len(sales_data[\"Sales_qty\"])\n sales_data[\"Sku_name\"] = [data[\"SKU_NAME\"].iloc[0]] * len(sales_data[\"Sales_qty\"])\n sales_data = sales_data.sort_values( by = ['Account_date'], ascending = False)\n return sales_data\n\n#----------------------------------------------------------------------->合并含有节假日对应信息的数据到数据集中\ndef holiday_merge(data,holiday_01,holiday_02):\n holiday = pd.concat([holiday_02,holiday_01],join='outer',axis=0)\n merge_data = pd.merge(data,holiday,on=['Account_date'],how='inner')\n return merge_data\n\n\n#---------------------------------------------------------------------------->对日期没有销量和价格等信息进行补齐操作\ndef date_fill(data,end):#\n yesterday = date_convert(end)\n date_range_sku = pd.date_range(start='20181220', end = yesterday)\n data_sku = pd.DataFrame({'Account_date': date_range_sku})\n result = pd.merge(data, data_sku,on=['Account_date'],how='right')\n #如果在某一天没有销量的话,采取补零的操作\n result[\"Sales_qty\"].iloc[np.where(np.isnan(result[\"Sales_qty\"]))] = 0\n result = result.fillna(method='ffill')\n result = result.sort_values([\"Account_date\"], ascending=1)\n return result\n\n\ndef one_hot(data,features):\n # 把带中文的标称属性转��为数值型,因为one-hot编码也需要先转换成数值型,用简单整数代替即可\n data = data[[features]]\n listUniq = data.ix[:, features].unique()\n for j in range(len(listUniq)):\n data.ix[:, features] = data.ix[:, features].apply(lambda x: j if x == listUniq[j] else x)\n # 进行one-hot编码\n # tempdata = data[[features]]\n # enc = preprocessing.OneHotEncoder()\n # enc.fit(tempdata)\n #\n # # one-hot编码的结果是比较奇怪的,最好是先转换成二维数组\n # tempdata = enc.transform(tempdata).toarray()\n # print('取值范围整数个数:', enc.n_values_)\n #\n # # 再将二维数组转换为DataFrame,记得这里会变成多列\n # tempdata = pd.DataFrame(tempdata, columns=[features] * len(tempdata[0]))\n return data\n\n\ndef holiday_features(sales_shop_data):\n sales_shop_data = holiday_merge(sales_shop_data, holiday_2018, holiday_2019)\n tempdata_weekday = one_hot(sales_shop_data, 'Weekday')\n tempdata_solar_festival = one_hot(sales_shop_data, 'Solar_festival')\n tempdata_term_festival = one_hot(sales_shop_data, 'Term_festival')\n tempdata_lunar_festival = one_hot(sales_shop_data, 'Lunar_festival')\n tempdata_chinese_festival = one_hot(sales_shop_data, 'Chinese_festival')\n sales_data = sales_shop_data.drop(\n ['Weekday', 'Chinese_festival', 'Solar_festival', 'Term_festival', 'Lunar_festival'], axis=1)\n original_data = pd.concat(\n [sales_data, tempdata_weekday, tempdata_chinese_festival, tempdata_solar_festival, tempdata_term_festival\n , tempdata_lunar_festival], join='outer', axis=1)\n # original_data.to_csv('D:\\jimmy-ye\\AI_supply_chain\\data\\original_data.csv',encoding='utf_8_sig')\n return original_data\n\n\n#-------------------------------------------------------------------------->统合操作数据清洗后的所有可用的需求预测数据\ndef all_pre_data(good_id,DC_CODE,start_date,end_date):\n\n final_forecast = []\n for i in tqdm(good_id):\n sales_data = get_detail_sales_data(i,start_date,end_date,DC_CODE)\n #-----------------------------------存在某个仓库的sku在某个时间段并没有销售记录\n if sales_data.empty==True:\n pass\n else:\n sales_group = data_group(sales_data)\n sales_shop_data = date_fill(sales_group,end_date)\n sales_shop_data = sales_shop_data.reset_index(drop=True, inplace=False)\n sales_shop_data = date_transform(sales_shop_data)\n print('sales_shop_data')\n print(sales_shop_data)\n original_data = holiday_features(sales_shop_data)\n original_data = weather_feature(original_data)\n final_forecast.append(original_data)\n return final_forecast\n\n\n#------------------------------------------------------------------->构建时间特征,定义打标签\ndef period_of_month(day):\n if day in range(1, 11): return 1\n if day in range(11, 21): return 2\n if day in range(21, 32): return 3\n\ndef period2_of_month(day):\n if day in range(1, 16): return 1\n if day in range(16, 32): return 2\n\ndef week_of_month(day):\n if day in range(1, 8): return 1\n if day in range(8, 15): return 2\n if day in range(15, 22): return 3\n if day in range(22, 32): return 4\n\ndef quarter(month):\n if month in range(1, 4): return 1\n if month in range(4, 7): return 2\n if month in range(7, 10): return 3\n if month in range(10, 13): return 4\n#\n\n\n\n#<---------------------------------------------------------------------->以年月日日期在进行特征构建\ndef time_subset(x):\n x[\"dayofweek\"] = x['Account_date'].apply(lambda x: x.dayofweek)\n x[\"weekofyear\"] = x[\"Account_date\"].apply(lambda x: x.weekofyear)\n x['month'] = x['Account_date'].apply(lambda x: x.month)\n x['day'] = x['Account_date'].apply(lambda x: x.day)\n x['year'] = x['Account_date'].apply(lambda x: x.year)\n x['period_of_month'] = x['day'].apply(lambda x: period_of_month(x))\n x['period2_of_month'] = x['day'].apply(lambda x: period2_of_month(x))\n x['week_of_month'] = x['day'].apply(lambda x: week_of_month(x))\n x['quarter'] = x['month'].apply(lambda x: quarter(x))\n return x\n\n#加入天气特征因素\ndef weather_feature(data):\n\n weather_data['Account_date'] = pd.to_datetime(weather_data['Account_date']).dt.normalize()\n merge_data = pd.merge(data,weather_data,on=['Account_date'],how='inner')\n return merge_data\n\n#--------------------------------------------------------------------->构建测试时间范围与基本特征\ndef creat_test(end):\n #先将string日期转成pd的时间格式的日期\n start = pd.to_datetime(datetime. datetime.strptime(end,'%Y%m%d'))\n date_prediction = pd.date_range(start=start,periods=15)\n date_dataframe = pd.DataFrame({\"Account_date\":date_prediction})\n test = time_subset(date_dataframe)\n test = holiday_features(test)\n test = weather_feature(test)\n return test\n\n#--------------------------------------------------------------------->计算数据的峰度\ndef kurtosis_compute(data):\n data_mean = np.mean(data)\n data_var = np.var(data)+0.1\n data_sc = np.mean((data - data_mean) ** 3)\n data_ku = np.mean((data - data_mean) ** 4) / pow(data_var, 2) # 计算峰度\n period2_of_month(data_ku)\n return data_ku\n\n# <-------------------------------------------------------------->构建每一个时间分布上的特征\ndef time_agg(train,test_df,vars_to_agg,vars_be_agg): # 构建时间特征与峰度\n for var in vars_to_agg:\n print(var)\n print('var')\n agg = train.groupby(var)[vars_be_agg].agg([\"sum\", \"mean\", \"std\", \"skew\", \"median\", \"min\", \"max\",\"count\",\n kurtosis_compute])\n print('agg')\n print(agg)\n if isinstance(var, list):\n agg.columns = pd.Index([\"fare_by_\" + \"_\".join(var) + \"_\" + str(e) for e in agg.columns.tolist()])\n else:\n agg.columns = pd.Index([\"fare_by_\" + var + \"_\" + str(e) for e in agg.columns.tolist()])\n train = pd.merge(train, agg.reset_index(), on=var, how = \"left\")\n test_df = pd.merge(test_df, agg.reset_index(), on=var, how = \"left\")\n return train, test_df\n\n\n\n#<---------------------------------------------------------->添加所要所要预测的距离该sku第一次售卖的时间的长度,权重\ndef add_time_diff(train_data,test_data):\n min_date = train_data[\"Account_date\"].min()\n date_train_diff = train_data[\"Account_date\"]-min_date\n date_test_diff = test_data[\"Account_date\"]-min_date\n train_data[\"date_diff\"] = date_train_diff.apply(lambda x:x.days)\n train_data = train_data[train_data[\"date_diff\"]>0]\n train_data[\"date_diff\"] = train_data[\"date_diff\"].apply(lambda x : np.exp(1/x))\n test_data[\"date_diff\"] = date_test_diff.apply(lambda x:x.days)\n test_data = test_data[test_data[\"date_diff\"] > 0]\n test_data[\"date_diff\"] = test_data[\"date_diff\"].apply(lambda x :np.exp(1/x))\n return train_data,test_data\n\n\n#--------------------------------------------------------------------------->构建随机森林模型\ndef construct_randomforest_model(train_feature,train_targe,test):\n rf = RandomForestRegressor(n_estimators=50, max_features=30, max_depth=8, oob_score=True)#<---------参数有待调整\n # rf = RandomForestRegressor(n_estimators=300, max_features=50, max_depth=20, oob_score=True) # <---------参数有待调整\n rf.fit(train_feature,train_targe)\n result = rf.predict(test)\n return result\n\n#------------------------------------------------------------------------------>构建KNN模型\ndef construct_KNN_model(train_feature,train_targe,test):\n knn_model = KNeighborsRegressor(n_neighbors=10, leaf_size=13, n_jobs=-1)#<--------------参数有待调整\n knn_model.fit(train_feature,train_targe)\n knn_test_pre = knn_model.predict(test)\n return knn_test_pre\n\n\n#------------------------------------------------------------------------------->对输出的结果进行规范化\ndef prediction_result_Regularization(train,result,test,date_parameter):\n prediction_sales = result\n prediction_date = test[\"Account_date\"]\n DC_code = [np.unique(train[\"Dc_code\"])[0]]*15\n Munit = [np.unique(train[\"Munit\"])[0]] * 15\n Dc_name = [np.unique(train[\"Dc_name\"])[0]]*15\n Wrh = [np.unique(train[\"Wrh\"])[0]] * 15\n Warehouse_name = [np.unique(train[\"Warehouse_name\"])[0]]*15\n Sku_name = [np.unique(train[\"Sku_name\"])[0]] * 15\n sku_id = [np.unique(train[\"Sku_id\"])[0]]*15\n prediction_date_algorithm = [date_parameter]*15\n prediction_df = pd.DataFrame({\"Sku_id\":sku_id,\n \"Account_date\":prediction_date,\n \"Forecast_qty\":prediction_sales,\n \"Belonged_date\":prediction_date_algorithm,\n \"Dc_code\":DC_code,\n \"Munit\":Munit,\n \"Dc_name\": Dc_name,\n \"Wrh\": Wrh,\n \"Warehouse_name\": Warehouse_name,\n \"Sku_name\": Sku_name\n })\n return prediction_df\n\n#------------------------------------------------------------------------------->得到特征的输出结果\ndef prediction_feature_targe(sku_i,end):\n print('sku_i_Data')\n print(sku_i)\n train = time_subset(sku_i)\n test = creat_test(end)\n vars_be_agg = \"Sales_qty\"\n vars_to_agg = [\"dayofweek\", \"weekofyear\", \"month\", \"day\", \"year\", \"period_of_month\", \"period2_of_month\",\n \"week_of_month\", \"quarter\",[\"month\", \"dayofweek\"], [\"quarter\", \"month\"],\n 'Weekday','Chinese_festival','Solar_festival','Term_festival','Lunar_festival',\n 'min','max^2','min_x_max','min^2','max_wind^2','min_wind_x_max_wind','min_wind^2',\n 'general_Label','中到大雨','中雨','多云','多云转晴','大到暴雨','大暴雨','大雨','小到中雨',\n '小到中雨转中到大雨','小到中雨转中雨','小到中雪','小雨','小雨转中雨','小雨转多云',\n '小雨转小到中雨','小雨转晴','晴','暴雨','阴','阴转多云','阴转小雨','雨夹雪','雷阵雨',\n 'wind_direction_Label','东北风','东南风','东风','北风','南风','无持续风向','西北风','西南风'\n ]\n data = time_agg(train, test, vars_to_agg, vars_be_agg)\n train_feature_data = data[0].fillna(0)\n test_feature_data = data[1].fillna(0)\n train_test_data = add_time_diff(train_feature_data,test_feature_data)\n train_feature_data_result = train_test_data[0]\n test_feature_data_result = train_test_data[1]\n print(train_feature_data_result)\n train_feature_data = train_feature_data_result.drop([\"Account_date\",\"Sku_id\",\"Sales_qty\",\n 'Price',\"Gross_profit_rate\",'Dc_code',\n 'Dc_name','Munit','Wrh','Warehouse_name',\n 'Sku_name'],axis=1)\n prediction_feature = list(train_feature_data.columns)\n prediction_target = [\"Sales_qty\"]\n return train,test,train_feature_data_result,test_feature_data_result,prediction_feature,prediction_target\n\n\n#<------------------------------------------------------------------------>构建的随机森林模型得到最终的结果\ndef prediction_sku_sales_RandomForestRegressor_model(sku_i,date_parameter,end):\n prediction_feature_targe_data = prediction_feature_targe(sku_i,end)\n train = prediction_feature_targe_data[0]\n print('train')\n print(train)\n test = prediction_feature_targe_data[1]\n print('test')\n print(test)\n train_feature_data_result = prediction_feature_targe_data[2]\n print('train_feature_data_result')\n print(train_feature_data_result)\n test_feature_data_result = prediction_feature_targe_data[3]\n print('test_feature_data_result')\n print(test_feature_data_result)\n prediction_feature = prediction_feature_targe_data[4]\n print('prediction_feature')\n print(prediction_feature)\n prediction_target = prediction_feature_targe_data[5]\n print('prediction_target')\n print(prediction_target)\n prediction_result = list(construct_randomforest_model(train_feature_data_result[prediction_feature],\n train_feature_data_result[prediction_target],\n test_feature_data_result[prediction_feature]))\n result = prediction_result_Regularization(train,prediction_result,test,date_parameter)\n return result\n\n\n#<------------------------------------------------------->构建的KNN模型得到最终的结果\ndef prediction_sku_sales_knn_model(sku_i,date_parameter,end):\n prediction_feature_targe_data = prediction_feature_targe(sku_i,end)\n train = prediction_feature_targe_data[0]\n test = prediction_feature_targe_data[1]\n train_feature_data_result = prediction_feature_targe_data[2]\n test_feature_data_result = prediction_feature_targe_data[3]\n prediction_feature = prediction_feature_targe_data[4]\n prediction_target = prediction_feature_targe_data[5]\n prediction_result = list(itertools.chain.from_iterable(construct_KNN_model(train_feature_data_result[prediction_feature],\n train_feature_data_result[prediction_target],\n test_feature_data_result[prediction_feature])))\n result = prediction_result_Regularization(train,prediction_result,test,date_parameter)\n return result\n\n#<------------------------------------------------------------------>进行模型的融合\ndef RF_KNN_model_merge(sku_i,date_parameter,end):\n RF_result = prediction_sku_sales_RandomForestRegressor_model(sku_i,date_parameter,end)\n knn_result = prediction_sku_sales_knn_model(sku_i,date_parameter,end)\n RF_knn_merge_result = RF_result.copy()\n RF_knn_merge_result[\"Forecast_qty\"] = 0.5*(RF_result[\"Forecast_qty\"]+knn_result[\"Forecast_qty\"])\n return RF_knn_merge_result\n\n\n#<--------------------------------------------------------------------------->定义一个空的数据框\ndef empty_dataframe():\n data = pd.DataFrame(columns = [\"account_date\",'DC_code'\n \"forecast_qty\",\"sku_id\",\"belonged_date\"])\n return data\n\n#<------------------------------------------------------------------>最后预测\ndef prediction_result(end,data):\n date_parameter = date_parameter_read(end)\n final = pd.DataFrame(columns=['Belonged_date',])\n result_forecast = [RF_KNN_model_merge(x,date_parameter,end) for x in data]\n result_forecast = pd.concat(result_forecast).reset_index(drop=True)\n print(\"鲜丰水果预测成功\")\n return result_forecast\n\n#<----------------------------------------------------------------------->规整化数据\ndef Consolidation_data(data):\n data_result = pd.DataFrame({\"Belonged_date\":data[\"Belonged_date\"],\n \"Account_date\":data[\"Account_date\"],\n \"Forecast_qty\":data[\"Forecast_qty\"],\n \"Sku_id\":data[\"Sku_id\"],\n \"Dc_name\": data[\"Dc_name\"],\n \"Dc_code\": data[\"Dc_code\"],\n \"Munit\": data[\"Munit\"],\n \"Warehouse_name\": data[\"Warehouse_name\"],\n \"Sku_name\": data[\"Sku_name\"],\n \"Wrh\": data[\"Wrh\"]\n })\n data_result[\"Belonged_date\"] = data_result[\"Belonged_date\"].apply(lambda x: x.strftime(\"%Y-%m-%d\"))\n data_result[\"Account_date\"] = data_result[\"Account_date\"].apply(lambda x: x.strftime(\"%Y-%m-%d\"))\n return data_result\n\n\n#------------------------------------------------------------------------>设置总函数\ndef main_function(start_date,end_date,n):\n start_time = time.time()\n DC_detail_list = diff_DC(n)\n print('总共参与预测的配送中心有:',DC_detail_list)\n result_data = pd.DataFrame()\n for i in tqdm(DC_detail_list):\n print('正在进行该配送中心的计算',i)\n sku_id = read_oracle_data(start_date, end_date,i)\n good_id = list(set(sku_id))\n print('总共预测的sku有:', list(good_id))\n data = all_pre_data(good_id,i,start_date,end_date)\n print(str(int(i))+'该配送中心的数据读取完成')\n #------------------------------如果有的配送中心的SKU在某段时间没有销售做一个逻辑判断\n if len(data):\n final_data = prediction_result(end_date, data)\n final_data = Consolidation_data(final_data)\n result_data = result_data.append(final_data)\n else:\n pass\n end_time =time.time()\n total_time = end_time-start_time\n print('程序运行结束,总耗时'+ str(total_time) + '秒')\n return result_data\n\nstart_date = '20181220'\nend_date = '20190615'\nfinal = main_function(start_date,end_date,1)\nfinal.to_csv('D:/jimmy-ye/AI_supply_chain/data/forecast_holiday/final_holiday_weather.csv',encoding='utf_8_sig')\n\n","sub_path":"xianfengsg/forecaset/V1.3/RF_forecast.py","file_name":"RF_forecast.py","file_ext":"py","file_size_in_byte":28272,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"16958684","text":"#!/usr/bin/env python\n# -*- coding:UTF-8 -*-\n'''\n@Description: 工资记录API\n@Author: Zpp\n@Date: 2020-04-10 14:17:37\n@LastEditors: Zpp\n@LastEditTime: 2020-04-22 15:35:09\n'''\nfrom flask import Blueprint, request\nfrom collection.v2.salary import SalaryModel\nfrom ..token_auth import auth, validate_current_access\nfrom libs.code import ResultDeal\nfrom validate import validate_form\nfrom validate.v2.salary import params\n\nroute_salary = Blueprint('Salary', __name__, url_prefix='/v2/Salary')\nvalidate = validate_form(params)\n\n\n@route_salary.route('/ImportSalary', methods=['POST'], endpoint='ImportSalary')\n@auth.login_required\n@validate_current_access\n@validate.form('ImportSalary')\ndef ImportSalary():\n result = SalaryModel().ImportSalaryRequest(request.files.get('file'), request.form.get('payment_time'))\n\n if type(result).__name__ == 'str':\n return ResultDeal(msg=result, code=-1)\n\n return ResultDeal(data=result)\n\n\n@route_salary.route('/DelSalary', methods=['POST'], endpoint='DelSalary')\n@auth.login_required\n@validate_current_access\n@validate.form('DelSalary')\ndef DelSalary():\n result = SalaryModel().DelSalaryRequest(request.form.getlist('rid[]'))\n\n if type(result).__name__ == 'str':\n return ResultDeal(msg=result, code=-1)\n\n return ResultDeal(data=result)\n\n\n@route_salary.route('/QuerySalaryByParam', methods=['POST'], endpoint='QuerySalaryByParam')\n@auth.login_required\n@validate_current_access\n@validate.form('QuerySalaryByParam')\ndef QuerySalaryByParam():\n params = {}\n Ary = ['name', 'company', 'payment_time']\n for i in Ary:\n if request.form.get(i):\n params[i] = request.form.get(i)\n\n result = SalaryModel().QuerySalaryByParamRequest(\n params=params,\n page=int(request.form.get('page')),\n page_size=int(request.form.get('page_size'))\n )\n\n if type(result).__name__ == 'str':\n return ResultDeal(msg=result, code=-1)\n\n return ResultDeal(data=result)\n\n\n@route_salary.route('/ImportAttendance', methods=['POST'], endpoint='ImportAttendance')\n@auth.login_required\n@validate_current_access\n@validate.form('ImportAttendance')\ndef ImportAttendance():\n result = SalaryModel().ImportAttendanceRequest(request.files.get('file'), request.form.get('attendance_time'))\n\n if type(result).__name__ == 'str':\n return ResultDeal(msg=result, code=-1)\n\n return ResultDeal(data=result)\n\n\n@route_salary.route('/DelAttendance', methods=['POST'], endpoint='DelAttendance')\n@auth.login_required\n@validate_current_access\n@validate.form('DelAttendance')\ndef DelAttendance():\n result = SalaryModel().DelAttendanceRequest(request.form.getlist('rid[]'))\n\n if type(result).__name__ == 'str':\n return ResultDeal(msg=result, code=-1)\n\n return ResultDeal(data=result)\n\n\n@route_salary.route('/QueryAttendanceByParam', methods=['POST'], endpoint='QueryAttendanceByParam')\n@auth.login_required\n@validate_current_access\n@validate.form('QueryAttendanceByParam')\ndef QueryAttendanceByParam():\n params = {}\n Ary = ['name', 'attendance_time']\n for i in Ary:\n if request.form.get(i):\n params[i] = request.form.get(i)\n\n result = SalaryModel().QueryAttendanceByParamRequest(\n params=params,\n page=int(request.form.get('page')),\n page_size=int(request.form.get('page_size'))\n )\n\n if type(result).__name__ == 'str':\n return ResultDeal(msg=result, code=-1)\n\n return ResultDeal(data=result)\n","sub_path":"trunk/routes/v2/salary.py","file_name":"salary.py","file_ext":"py","file_size_in_byte":3454,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"619550803","text":"\"\"\"resttest URL Configuration\n\nThe `urlpatterns` list routes URLs to views. For more information please see:\n https://docs.djangoproject.com/en/3.1/topics/http/urls/\nExamples:\nFunction views\n 1. Add an import: from my_app import views\n 2. Add a URL to urlpatterns: path('', views.home, name='home')\nClass-based views\n 1. Add an import: from other_app.views import Home\n 2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')\nIncluding another URLconf\n 1. Import the include() function: from django.urls import include, path\n 2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))\n\"\"\"\n\nfrom django.contrib import admin\nfrom django.urls import path\nfrom rest_framework.urlpatterns import format_suffix_patterns\nfrom quickstart import views\nfrom quickstart.models import Dogs, Breed\n\n\n#Sets the URLs that Django will use for the requests and assigns it a view\nurlpatterns = [\n path(\n 'dogs/',\n views.DogList.as_view(),\n name='GET_POST_DogList'\n ),\n path(\n 'dogs//',\n views.DogDetail.as_view(),\n name='GET_PUT_DELETE_DogDetail'\n ),\n path(\n 'breeds/',\n views.BreedList().as_view(),\n name='GET_POST_BreedList'\n ),\n path(\n 'breeds//',\n views.BreedDetail.as_view(),\n name='GET_PUT_DELETE_Breed_Detail'\n ),\n]\n","sub_path":"resttest/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1388,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"216426642","text":"#Sort an array of 0s, 1s and 2s\n\ndef seggreegate_arr(arr):\n zerocount = 0\n onecount = 0\n twocount = 0\n\n for x in range(len(arr)):\n if arr[x] == 0:\n zerocount+=1\n\n if arr[x] == 1:\n onecount+=1\n \n if arr[x] == 2:\n twocount+=1\n \n print(zerocount)\n print(onecount)\n print(twocount)\n\n\n arr.clear()\n\n arr = [0] * zerocount\n \n arr.extend([1]* onecount)\n\n arr.extend([2]* twocount)\n\n\n\n print(arr)\n\n\n \n\n\nif __name__ == '__main__':\n arr = [0, 1, 1, 0, 1, 2, 1, 2, 0, 0, 0, 1]\n\n seggreegate_arr(arr)\n\n'''\nT.C = O(n)\n\nS.C = O(1)\n\n\n\n\n'''","sub_path":"mycodes/arrays/p23/p23.py","file_name":"p23.py","file_ext":"py","file_size_in_byte":636,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"238998140","text":"# uncompyle6 version 3.7.4\n# Python bytecode 2.7 (62211)\n# Decompiled from: Python 3.6.9 (default, Apr 18 2020, 01:56:04) \n# [GCC 8.4.0]\n# Embedded file name: build/bdist.linux-x86_64/egg/acclaim_badges/forms.py\n# Compiled at: 2017-06-02 15:35:20\nfrom django import forms\nfrom acclaim_badges.models import AcclaimToken\nfrom acclaim_badges.models import BadgeCourse\nfrom acclaim_badges.models import AcclaimApi\nfrom openedx.core.djangoapps.content.course_overviews.models import CourseOverview\n\nclass BadgeCourseForm(forms.ModelForm):\n\n def __init__(self, *args, **kwargs):\n super(BadgeCourseForm, self).__init__(*args, **kwargs)\n acclaim_api = AcclaimApi()\n template_list = acclaim_api.template_choices()\n badge_default = [('', 'Select Badge')]\n course_default = [('', 'Select Course')]\n self.fields['badge_template'] = forms.ChoiceField(choices=badge_default + template_list)\n make_tuple = lambda x: (\n str(x.id), x.display_name)\n courses = CourseOverview.objects.all()\n self.fields['edx_course'] = forms.ChoiceField(choices=course_default + map(make_tuple, courses))\n\n class Meta:\n model = BadgeCourse\n fields = ['badge_template', 'edx_course']\n\n\nclass AcclaimTokenForm(forms.ModelForm):\n auth_token = forms.CharField(widget=forms.PasswordInput())\n\n class Meta:\n model = AcclaimToken\n fields = ['auth_token', 'organization_id', 'url']","sub_path":"pycfiles/acclaim_badges-0.1.0-py2.7/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":1447,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"550390079","text":"import io\nimport json\nimport os\nimport shutil\nfrom datetime import datetime\n\nimport torchvision.models as models\nimport torchvision.transforms as transforms\nfrom PIL import Image\nfrom flask import Flask, jsonify, request\n\n\napp = Flask(__name__)\nmodel = models.densenet121(pretrained=True) # Trained on 1000 classes from ImageNet\nmodel.eval() # Turns off autograd and\n\n\n\nimg_class_map = None\nmapping_file_path = 'index_to_name.json' # Human-readable names for Imagenet classes\nif os.path.isfile(mapping_file_path):\n with open (mapping_file_path) as f:\n img_class_map = json.load(f)\n\n\n\n# Transform input into the form our model expects\ndef transform_image(infile):\n input_transforms = [transforms.Resize(255), # We use multiple TorchVision transforms to ready the image\n transforms.CenterCrop(224),\n transforms.ToTensor(),\n transforms.Normalize([0.485, 0.456, 0.406], # Standard normalization for ImageNet model input\n [0.229, 0.224, 0.225])]\n my_transforms = transforms.Compose(input_transforms)\n image = Image.open(infile) # Open the image file\n timg = my_transforms(image) # Transform PIL image to appropriately-shaped PyTorch tensor\n timg.unsqueeze_(0) # PyTorch models expect batched input; create a batch of 1\n return timg\n\n\n# Get a prediction\ndef get_prediction(input_tensor):\n outputs = model.forward(input_tensor) # Get likelihoods for all ImageNet classes\n _, y_hat = outputs.max(1) # Extract the most likely class\n prediction = y_hat.item() # Extract the int value from the PyTorch tensor\n return prediction\n\n# Make the prediction human-readable\ndef render_prediction(prediction_idx):\n stridx = str(prediction_idx)\n class_name = 'Unknown'\n if img_class_map is not None:\n if stridx in img_class_map is not None:\n class_name = img_class_map[stridx][1]\n\n return prediction_idx, class_name\n\n\n@app.route('/', methods=['GET'])\ndef root():\n return jsonify({'msg' : 'Try POSTing to the /predict endpoint with an RGB image attachment'})\n\n\n@app.route('/predict', methods=['POST'])\ndef predict():\n if request.method == 'POST':\n if os.path.exists(\"/home/azureuser/articherons-flask-test/runs/detect/exp\"):\n shutil.rmtree(\"/home/azureuser/articherons-flask-test/runs/detect/exp\") \n file = request.files['file']\n image_name = str(datetime.timestamp(datetime.now())) + \".jpg\"\n image_path = os.path.join(os.getcwd(), \"runs\", \"detect\", \"exp\", image_name)\n print(image_path)\n file.save(\"test.jpg\")\n print(\"avant\")\n os.system(\"python /home/azureuser/articherons-flask-test/yolov5/detect.py --save-txt --weights /home/azureuser/Downloads/best.pt --img 832 --conf 0.4 --source /home/azureuser/articherons-flask-test/test.jpg \" + \"--pathSave \" + image_path)\n print(\"apres\")\n file_label = open('/home/azureuser/articherons-flask-test/runs/detect/exp/labels/test.txt', 'r') \n nb_boites = len(file_label.readlines())\n print(\"Le nombre de boite est de : \" + str(nb_boites) )\n if file is not None:\n input_tensor = transform_image(file)\n prediction_idx = get_prediction(input_tensor)\n class_id, class_name = render_prediction(prediction_idx)\n return jsonify({'class_id': class_id, 'class_name': class_name, 'nb_billons': nb_boites, \"image_name\" : image_name })\n #return jsonify({\"nb_billons\": nb_boites})\n\nif __name__ == '__main__':\n app.run()\n","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":3746,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"412416280","text":"# -*- coding: utf-8 -*-\n\n'''\n\n Copyright 2014-2015 The pyd3t Developers\n\n Licensed under the Apache License, Version 2.0 (the \"License\");\n you may not use this file except in compliance with the License.\n You may obtain a copy of the License at\n\n http://www.apache.org/licenses/LICENSE-2.0\n\n Unless required by applicable law or agreed to in writing, software\n distributed under the License is distributed on an \"AS IS\" BASIS,\n WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n See the License for the specific language governing permissions and\n limitations under the License.\n\n'''\n\nimport itertools\nimport logging\nfrom collections import deque\n\nimport d3t\nimport devs\nfrom d3t.components import TransporterClass\n\n\nclass TransporterModel(TransporterClass):\n\n MODEL_OUTPUT_CLASS = set([\n 'init',\n 'jobstart',\n 'pickup',\n 'departure',\n 'arrival',\n 'delivery',\n 'jobend',\n 'emptyqueue',\n ])\n MODEL_INPUT_CLASS = set(['submit', 'cancel'])\n\n HAS_PICKUP_PHASE = False\n HAS_DELIVERY_PHASE = False\n\n new_job_id = itertools.count()\n\n def __init__(\n self, space, id, position,\n pickup_period_function=None,\n delivery_period_function=None,\n ):\n \"\"\"\n Construct new Transporter instance.\n\n Parameters\n ----------\n space\n Transport space\n\n\n \"\"\"\n self._logger = logging.getLogger(\n 'd3t.' + self.__class__.__name__ + '.{}'.format(id)\n )\n self._logger.debug('Set up logging.')\n self._space = space\n self._id = id\n self._position = position\n self._ita = 0.0\n self.HAS_INITIAL_PHASE = (\n 'init' in self.OUTPUT_PORTS\n or 'emptyqueue' in self.OUTPUT_PORTS\n )\n self._pickup_period_function = pickup_period_function\n self._delivery_period_function = delivery_period_function\n self.HAS_PICKUP_PHASE = (pickup_period_function is not None)\n self.HAS_DELIVERY_PHASE = (delivery_period_function is not None)\n self._initial = self.HAS_INITIAL_PHASE\n self._idle = not self.HAS_INITIAL_PHASE\n self._pickup = False\n self._travel = False\n self._delivery = False\n self._cancel = False\n self._payload = set()\n self._scheduled_pickup_set = set()\n self._scheduled_destination = position\n self._scheduled_delivery_set = set()\n self._queue = deque()\n self._current_job_id = None\n self._next_job_id = next(TransporterModel.new_job_id)\n\n def _is_delivery_imminent(self):\n ret = bool(\n self._travel and self._scheduled_delivery_set and not self._cancel\n )\n self._logger.debug('Delivery imminent: {}'.format(ret))\n return ret\n\n def _is_delivery(self):\n ret = (\n self._delivery if self.HAS_DELIVERY_PHASE\n else self._is_delivery_imminent()\n )\n self._logger.debug('Delivery: {}'.format(ret))\n return ret\n\n def _is_job_end(self):\n ret = (\n (self._pickup and self._cancel)\n or\n (self._travel and not self._is_delivery_imminent())\n or\n self._is_delivery()\n ) if self.HAS_DELIVERY_PHASE else self._travel\n self._logger.debug('Job end: {}'.format(ret))\n return ret\n\n def _is_job_start(self):\n ret = bool(\n self._queue and (self._is_job_end() or self._idle)\n )\n self._logger.debug('Job start: {}'.format(ret))\n return ret\n\n def _next_pickup_set(self):\n ret = (\n self._queue[0][0] if self._is_job_start()\n else self._scheduled_pickup_set\n )\n self._logger.debug('Next pick-up set: {}'.format(ret))\n return ret\n\n def _is_pickup_imminent(self):\n ret = bool(\n self._next_pickup_set() if self._is_job_start() else False\n )\n self._logger.debug('Pick-up imminent: {}'.format(ret))\n return ret\n\n def _is_pickup(self):\n ret = (\n self._pickup if self.HAS_PICKUP_PHASE\n else self._is_pickup_imminent()\n )\n self._logger.debug('Pick-up: {}'.format(ret))\n return ret\n\n def _is_travel_imminent(self):\n ret = (\n (self._is_job_start() and not self._is_pickup_imminent())\n or\n (self._pickup and not self._cancel)\n ) if self.HAS_PICKUP_PHASE else self._is_job_start()\n self._logger.debug('Pick-up imminent: {}'.format(ret))\n return ret\n\n def internal_transition(self):\n\n self._logger.debug('Internal transition')\n\n if self.HAS_INITIAL_PHASE:\n if self._initial:\n self._initial = False\n self._idle = True\n self._logger.info('INITIAL -> IDLE')\n return\n\n delivery_imminent = self._is_delivery_imminent()\n delivery = self._is_delivery()\n job_end = self._is_job_end()\n job_start = self._is_job_start()\n next_pickup_set = self._next_pickup_set()\n pickup_imminent = self._is_pickup_imminent()\n pickup = self._is_pickup()\n travel_imminent = self._is_travel_imminent()\n\n # FROM transition\n if pickup:\n self._payload |= next_pickup_set\n self._logger.info('Pick-up loads {}, new payload: {}'.format(\n next_pickup_set, self._payload\n ))\n\n if self._travel:\n self._position = self._scheduled_destination\n self._logger.info('Arrive at position {}'.format(self._position))\n\n if delivery:\n self._payload -= self._scheduled_delivery_set\n self._logger.info('Deliver loads {}, new payload: {}'.format(\n self._scheduled_delivery_set, self._payload\n ))\n\n # NEXTJOB transition\n if job_start:\n next_job = self._queue.popleft()\n (\n self._scheduled_pickup_set,\n self._scheduled_destination,\n self._scheduled_delivery_set,\n ) = next_job\n self._current_job_id = self._next_job_id\n self._next_job_id = next(TransporterModel.new_job_id)\n self._logger.info(\n 'Start new job {}: {}'.format(self._current_job_id, next_job)\n )\n\n # TO transition\n if self.HAS_PICKUP_PHASE and pickup_imminent:\n self._ita = self._pickup_period_function(\n self._position,\n self._scheduled_pickup_set\n )\n self._pickup = True\n self._idle = self._travel = self._delivery = False\n self._cancel = False\n return\n\n if travel_imminent:\n self._ita = self._space.distance(\n self._position, self._scheduled_destination\n )\n self._travel = True\n self._idle = self._pickup = self._delivery = False\n self._cancel = False\n self._logger.info('-> TRAVEL to destination {}, ETA {}'.format(\n self._scheduled_destination,\n self._ita\n ))\n return\n\n if self.HAS_DELIVERY_PHASE and delivery_imminent:\n self._ita = self._delivery_period_function(\n self._position,\n self._scheduled_delivery_set\n )\n self._delivery = True\n self._travel = False\n return\n\n if job_end and not self._queue:\n self._ita = devs.infinity\n self._idle = True\n self._pickup = self._travel = self._delivery = False\n self._cancel = False\n self._logger.info('-> IDLE')\n return\n\n raise RuntimeError(\"Transporter delta_int error\")\n\n def external_transition(self, elapsed_time, input_events):\n\n self._ita = max(self._ita - elapsed_time, 0.0)\n\n if 'cancel' in self.INPUT_PORTS:\n # look for cancel event\n for event in input_events:\n if event.type == 'cancel':\n # received cancel event\n self._cancel = True\n self._queue.clear()\n if self._travel:\n self._scheduled_destination, self._ita = (\n self._space.intermediate_point(\n self._position,\n self._scheduled_destination,\n self._ita\n )\n )\n\n # look for exactly one submit event\n for event in input_events:\n if event.type == 'submit':\n jobs = event[1]\n if event[2]: # replace?\n self._queue.clear()\n self._queue.extend(jobs)\n\n def confluent_transition(self, input_events):\n self.internal_transition()\n self.external_transition(0.0, input_events)\n\n def output(self):\n\n ret = list()\n\n current_position = (\n self._scheduled_destination if self._travel else self._position\n )\n\n next_destination = (\n self._queue[0][1]\n if self._is_job_start() and self._is_travel_imminent()\n else self._scheduled_destination\n )\n\n current_payload = set()\n if {'jobstart', 'jobend'} & self.OUTPUT_PORTS:\n if (\n ('jobstart' in self.OUTPUT_PORTS and self._is_job_start())\n or\n ('jobend' in self.OUTPUT_PORTS and self._is_job_end())\n ):\n current_payload |= self._payload\n\n if self._is_delivery():\n current_payload -= self._scheduled_delivery_set\n\n if self.HAS_PICKUP_PHASE and self._pickup:\n current_payload |= self._scheduled_pickup_set\n\n if 'init' in self.OUTPUT_PORTS and self._initial:\n ret.append(d3t.InitEvent(self._id, current_position))\n\n if 'jobstart' in self.OUTPUT_PORTS and self._is_job_start():\n ret.append(d3t.JobstartEvent(self._id, self._next_job_id))\n\n if 'pickup' in self.OUTPUT_PORTS and self._is_pickup():\n job_id = (\n self._next_job_id\n if self._is_job_start()\n else self._current_job_id\n )\n ret.append(d3t.PickupEvent(\n self._id, job_id, self._next_pickup_set()\n ))\n\n if 'departure' in self.OUTPUT_PORTS and self._is_travel_imminent():\n job_id = (\n self._next_job_id\n if self._is_job_start()\n else self._current_job_id\n )\n ret.append(d3t.DepartureEvent(\n self._id, job_id, next_destination, self._space.distance(\n current_position, next_destination\n )\n ))\n\n if 'arrival' in self.OUTPUT_PORTS and self._travel:\n ret.append(d3t.ArrivalEvent(\n self._id, self._current_job_id, self._scheduled_destination\n ))\n\n if 'delivery' in self.OUTPUT_PORTS and self._is_delivery():\n ret.append(d3t.DeliveryEvent(\n self._id, self._current_job_id, self._scheduled_delivery_set\n ))\n\n if 'jobend' in self.OUTPUT_PORTS and self._is_job_end():\n ret.append(d3t.JobendEvent(self._id, self._current_job_id))\n\n if 'emptyqueue' in self.OUTPUT_PORTS:\n if (\n (self._is_job_end() and not self._queue)\n or self._initial\n ):\n ret.append(d3t.EmptyqueueEvent(self._id))\n\n self._logger.info('Output events {}'.format(ret))\n return ret\n\n def time_advance(self):\n if self._idle:\n return 0.0 if self._queue else devs.infinity\n if self.HAS_INITIAL_PHASE and self._initial:\n return 0.0\n return self._ita\n","sub_path":"Hausarbeit/Programm/d3t-0.1.post.dev8.pre/d3t/transporter.py","file_name":"transporter.py","file_ext":"py","file_size_in_byte":12040,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"197259845","text":"#!/usr/bin/env python3\n\nimport os\nimport requests\nimport json\nfrom bs4 import BeautifulSoup\nimport csv\nimport pandas as pd\n\n\ndef print_areas():\n url = 'https://27crags.com'\n r = requests.get(url)\n soup = BeautifulSoup(r.content, 'html.parser')\n\n areas_script = soup.find_all('script')[14].string[:-127] + '}'\n\n areas_data = json.loads(areas_script)\n areas_DF = pd.DataFrame(areas_data['areas'])\n areas_DF.columns = [\"Name\",\"Country\",\"Area_Id\"]\n areas_sweden_DF = areas_DF[areas_DF['Country']=='Sweden']\n #areas_spain_DF = areas_spain_DF.drop_duplicates()\n print(areas_sweden_DF)\n\n \ndef print_grades():\n url = 'https://27crags.com'\n r = requests.get(url)\n soup = BeautifulSoup(r.content, 'html.parser')\n\n grades_script = soup.find_all('script')[8].string[24:-234]\n \n grades_data = json.loads(grades_script)\n grades_DF = pd.DataFrame.from_dict(grades_data, orient='index')\n grades_DF.columns = [\"US\",\"Hueco\",\"Australian\",\"Font\",\"French\",\"UIAA\"]\n print(grades_DF[:-1])\n \n\ndef my_routes(area, grade_min, grade_max):\n url = 'https://27crags.com/areas/' + area + '/routelist?grade_min=' + grade_min + '&grade_max=' + grade_max + '&Sport=1'\n r = requests.get(url)\n print(url)\n\n soup = BeautifulSoup(r.content, 'html.parser')\n\n routes_script = soup.find(\"script\", {\"class\": \"js-react-on-rails-component\", \"data-component-name\": \"RouteList\"}).string\n \n routes_data = json.loads(routes_script)\n \n keys = routes_data['routes'][0].keys()\n\n with open('routes.csv','w') as f:\n dict_writer = csv.DictWriter(f, keys)\n dict_writer.writeheader()\n dict_writer.writerows(routes_data['routes'])\n \n routes_DF = pd.read_csv('routes.csv')\n result_DF = routes_DF.loc[(routes_DF['grade_int'] >= int(grade_min)) & (routes_DF['grade_int'] <= int(grade_max))]\n result_DF = result_DF.loc[result_DF['genre'] == 'Sport']\n result_DF = result_DF.sort_values(by='rating', ascending=False)\n os.remove('routes.csv')\n \n result_DF.drop(['video_count','discussion_count','crimpers','slopers','jugs','fingery','powerful',\n 'dyno','endurance','technical','mental','roof','overhang','vertical','slab',\n 'traverse','sitstart','topslasthold','tradgear_required','dangerous','crack',\n 'pockets','tufas'], axis=1, inplace=True)\n \n result_DF.to_csv('result_area_' + area + '_grade_min' + grade_min + '_grade_max' + grade_max + '.csv', index=False)\n\n\nprint_areas()\nlocation = str(input('Enter area_id: '))\n\n\nprint_grades() \ng_min = str(input('Enter minimum climbing level: '))\ng_max = str(input('Enter maximum climbing level: '))\n\nmy_routes(location, g_min, g_max)\n\nprint('Your file has been created and saved correctly.')\n","sub_path":"Desktop/Web scraping/PRAC1.py","file_name":"PRAC1.py","file_ext":"py","file_size_in_byte":2765,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"28841294","text":"access_template = [\n 'switchport mode access', 'switchport access vlan {}',\n 'switchport nonegotiate', 'spanning-tree portfast',\n 'spanning-tree bpduguard enable'\n]\n\ntrunk_template = [\n 'switchport trunk encapsulation dot1q', 'switchport mode trunk',\n 'switchport trunk allowed vlan {}'\n]\naccess_template.append('Введите номер VLAN:')\ntrunk_template.append('Введите разрешенные VLANы:')\ntemplates={'access': access_template, 'trunk': trunk_template }\nportmode=input('Введите режим работы интерфейса (access/trunk): ')\nintf=input('Введите тип и номер интерфейса: ')\nquestion=templates[portmode].pop(-1)\nvlans=input(question)\nprint('interface '+intf+'\\n'+('\\n').join(templates[portmode]).format(vlans))\n","sub_path":"tasks/Par5/5.3a.py","file_name":"5.3a.py","file_ext":"py","file_size_in_byte":801,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"331530227","text":"def solution(array, commands):\n answer = []\n for index in range(len(commands)):\n condition = commands[index]\n each = array[condition[0] - 1:condition[1]]\n each.sort()\n answer.append(each[condition[2] - 1])\n\n return answer\n\n\ndef other(array, commands):\n answer = []\n for command in commands:\n index, last_index, point = command\n answer.append(sorted(array[index - 1:last_index])[point - 1])\n return answer\n\n\nprint(solution([1, 5, 2, 6, 3, 7, 4], [[2, 5, 3], [4, 4, 1], [1, 7, 3]]))\nprint(other([1, 5, 2, 6, 3, 7, 4], [[2, 5, 3], [4, 4, 1], [1, 7, 3]]))\n","sub_path":"programmers-1/KNumber.py","file_name":"KNumber.py","file_ext":"py","file_size_in_byte":612,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"591492254","text":"# -*- coding: utf-8 -*-\nimport tensorflow as tf\nimport numpy as np\nimport random\nimport os\n\ntf.logging.set_verbosity(tf.logging.ERROR)\n\n\nclass REPTILE:\n \"\"\" This class defines the model trained with the REPTILE algorithm.\n\n \"\"\"\n\n def __init__(self, sess, args, seed, n_train_tasks, input_shape):\n random.seed(seed)\n np.random.seed(seed)\n tf.set_random_seed(seed)\n\n self.lr = args.lr\n self.meta_lr = args.meta_lr\n self.meta_epochs = args.meta_epochs\n self.K = args.K\n self.num_updates = args.num_updates\n self.bn = args.bn\n self.dataset = args.dataset\n self.sess = sess\n self.summary = False\n self.summary_dir = args.summary_dir\n\n if(self.summary_dir):\n self.summary = True\n self.summary_interval = 100\n summaries_list_metatrain = []\n summaries_list_val = []\n summaries_list_test_restore_val = []\n else:\n self.summary_dir = \"no_summary\"\n\n self.stop_grad = args.stop_grad\n\n self.n_queries = args.n_queries\n # dataset specific variables\n self.n_classes = 1\n\n self.input_shape = input_shape\n\n self.n_train_tasks = n_train_tasks\n\n # number of tasks to sample per meta-training iteration\n\n self.n_sample_tasks = 8\n self.flatten = tf.keras.layers.Flatten()\n\n # build model\n self.layers = []\n if(args.filters == \"\"):\n self.filter_sizes = []\n else:\n self.filter_sizes = [int(i) for i in args.filters.split(' ')]\n self.kernel_sizes = [int(i) for i in args.kernel_sizes.split(' ')]\n if(len(self.filter_sizes) == 1):\n self.layers.append(\n tf.keras.layers.Conv2D(\n filters=self.filter_sizes[0],\n kernel_size=self.kernel_sizes[0],\n input_shape=(None,) + input_shape,\n strides=1,\n padding='same',\n activation='relu',\n name='conv_last'))\n\n if(self.bn):\n self.layers.append(\n tf.keras.layers.BatchNormalization(\n name='bn_c_last'))\n\n else:\n self.layers.append(\n tf.keras.layers.Conv2D(\n filters=self.filter_sizes[0],\n kernel_size=self.kernel_sizes[0],\n input_shape=(None,) + input_shape,\n strides=1,\n padding='same',\n activation='relu',\n name='conv0'))\n if(self.bn):\n self.layers.append(\n tf.keras.layers.BatchNormalization(\n name='bn_c0'))\n\n for i in range(1, len(self.filter_sizes)):\n if(i != len(self.filter_sizes) - 1):\n self.layers.append(\n tf.keras.layers.Conv2D(\n filters=self.filter_sizes[i],\n kernel_size=self.kernel_sizes[i],\n strides=1,\n padding='same',\n activation='relu',\n name='conv' + str(i)))\n if(self.bn):\n self.layers.append(\n tf.keras.layers.BatchNormalization(\n name='bn_c' + str(i)))\n\n else:\n self.layers.append(\n tf.keras.layers.Conv2D(\n filters=self.filter_sizes[i],\n kernel_size=self.kernel_sizes[i],\n strides=1,\n padding='same',\n activation='relu',\n name='conv_last'))\n if(self.bn):\n self.layers.append(\n tf.keras.layers.BatchNormalization(\n name='bn_c_last'))\n\n if(args.dense_layers == \"\"):\n self.dense_sizes = []\n else:\n self.dense_sizes = [int(i) for i in args.dense_layers.split(' ')]\n\n for i in range(0, len(self.dense_sizes)):\n if(len(self.filter_sizes) == 0 and i == 0):\n self.layers.append(\n tf.keras.layers.Dense(\n units=self.dense_sizes[i],\n activation='relu',\n input_shape=(\n None,\n ) + input_shape,\n name='dense' + str(i)))\n else:\n self.layers.append(\n tf.keras.layers.Dense(\n units=self.dense_sizes[i],\n activation='relu',\n name='dense' + str(i)))\n if(self.bn):\n self.layers.append(tf.keras.layers.BatchNormalization(\n name='bn_d' + str(i + len(self.filter_sizes))))\n\n self.layers.append(\n tf.keras.layers.Dense(\n units=self.n_classes,\n name='dense_last'))\n\n # loss function\n self.loss_fct = tf.nn.sigmoid_cross_entropy_with_logits\n\n self.X_finetune = tf.placeholder(\n tf.float32, (None,) + input_shape, name='X_finetune')\n self.Y_finetune = tf.placeholder(\n tf.float32, (None, self.n_classes), name='Y_finetune')\n\n self.X_outer_loop = tf.placeholder(\n tf.float32, (None,) + input_shape, name='X_outer_loop')\n self.Y_outer_loop = tf.placeholder(\n tf.float32, (None, self.n_classes), name='Y_outer_loop')\n\n self.construct_forward = tf.make_template(\n 'construct_forward', self.feed_forward)\n\n finetune_output = self.construct_forward(\n self.X_finetune, training=True)\n\n finetune_loss = tf.reduce_mean(\n self.loss_fct(\n labels=self.Y_finetune,\n logits=finetune_output))\n\n self.inner_loop_optimizer = tf.train.GradientDescentOptimizer(\n self.lr)\n finetune_update_op = self.inner_loop_optimizer.minimize(finetune_loss)\n for i in range(1, self.num_updates):\n with tf.control_dependencies([finetune_update_op]):\n finetune_output = self.construct_forward(\n self.X_finetune, training=True)\n finetune_loss = tf.reduce_mean(\n self.loss_fct(\n labels=self.Y_finetune,\n logits=finetune_output))\n\n finetune_update_op = self.inner_loop_optimizer.minimize(\n finetune_loss)\n\n if(self.bn):\n self.updated_bn_model = self.assign_stats(self.X_finetune)\n with tf.control_dependencies([finetune_update_op]):\n with tf.control_dependencies([self.updated_bn_model]):\n self.outer_loop_output = self.construct_forward(\n self.X_outer_loop, training=True)\n\n else:\n with tf.control_dependencies([finetune_update_op]):\n self.outer_loop_output = self.construct_forward(\n self.X_outer_loop, training=True)\n\n self.outer_loop_loss = tf.reduce_mean(\n self.loss_fct(\n labels=self.Y_outer_loop,\n logits=self.outer_loop_output))\n\n self.meta_optimizer = tf.train.AdamOptimizer(self.meta_lr)\n\n self.meta_gradient = self.meta_optimizer.compute_gradients(\n self.outer_loop_loss)\n\n self.placeholder_gradients = []\n for grad_var in self.meta_gradient:\n self.placeholder_gradients.append(\n (tf.placeholder(tf.float32, shape=grad_var[0].get_shape()), grad_var[1]))\n\n self.meta_update_op = self.meta_optimizer.apply_gradients(\n self.placeholder_gradients)\n\n self.test_output = self.construct_forward(\n self.X_finetune, training=False)\n self.test_loss = tf.reduce_mean(self.loss_fct(\n labels=self.Y_finetune,\n logits=self.test_output))\n\n self.my_acc, self.my_precision, self.my_recall, self.my_specificity, self.my_f1_score, self.my_auc_pr = self.compute_metrics(\n self.test_output, self.Y_finetune)\n\n val_finetune_output = self.construct_forward(\n self.X_finetune, training=True)\n val_finetune_loss = tf.reduce_mean(\n self.loss_fct(\n labels=self.Y_finetune,\n logits=val_finetune_output))\n self.val_finetune_update_op = self.inner_loop_optimizer.minimize(\n val_finetune_loss, name='val_finetune_update_op')\n\n if(self.summary):\n summaries_list_val.append(\n tf.summary.scalar('val_test_loss', self.test_loss))\n summaries_list_val.append(\n tf.summary.scalar('val_accuracy', self.my_acc))\n summaries_list_val.append(\n tf.summary.scalar('val_precision', self.my_precision))\n summaries_list_val.append(\n tf.summary.scalar('val_recall', self.my_recall))\n summaries_list_val.append(\n tf.summary.scalar('val_specificity', self.my_specificity))\n summaries_list_val.append(\n tf.summary.scalar('val_f1_score', self.my_f1_score))\n summaries_list_val.append(\n tf.summary.scalar('val_auc_pr', self.my_auc_pr))\n summaries_list_test_restore_val.append(\n tf.summary.scalar('test_loss_1', self.test_loss))\n summaries_list_test_restore_val.append(\n tf.summary.scalar('accuracy_1', self.my_acc))\n summaries_list_test_restore_val.append(\n tf.summary.scalar('precision_1', self.my_precision))\n summaries_list_test_restore_val.append(\n tf.summary.scalar('recall_1', self.my_recall))\n summaries_list_test_restore_val.append(\n tf.summary.scalar('specificity_1', self.my_specificity))\n summaries_list_test_restore_val.append(\n tf.summary.scalar('f1_score_1', self.my_f1_score))\n summaries_list_test_restore_val.append(\n tf.summary.scalar('auc_pr_1', self.my_auc_pr))\n self.merged_test_restore_val = tf.summary.merge(\n summaries_list_test_restore_val)\n self.merged_val = tf.summary.merge(\n summaries_list_val)\n\n self.saver = tf.train.Saver()\n\n base_path = '/home/USER/Documents'\n if (not (os.path.exists(base_path))):\n base_path = '/home/ubuntu/Projects'\n if (not (os.path.exists(base_path))):\n base_path = '/home/USER/Projects'\n if (not (os.path.exists(base_path))):\n base_path = '/home/ceesgniewyk/Projects'\n\n self.checkpoint_path = base_path + '/MAML/checkpoints_REPTILE/'\n if (not (os.path.exists(self.checkpoint_path))):\n os.mkdir(self.checkpoint_path)\n if (not (os.path.exists(os.path.join(self.checkpoint_path, self.summary_dir)))):\n os.mkdir(os.path.join(self.checkpoint_path, self.summary_dir))\n\n def compute_metrics(self, logits, labels, logits_are_predictions=False):\n \"\"\"compute non-running performance metrics.\n\n Parameters\n ----------\n logits : tensor\n labels : tensor\n\n\n Returns\n -------\n acc : tensor\n accuracy.\n precision : tensor\n precision.\n recall : tensor\n recall.\n specificity : tensor\n specificity.\n f1_score : tensor\n F1 score.\n auc_pr : tensor\n AUC-PR.\n\n \"\"\"\n if(logits_are_predictions):\n predictions = logits\n else:\n predictions = tf.cast(\n tf.greater(\n tf.nn.sigmoid(logits),\n 0.5),\n tf.float32)\n TP = tf.count_nonzero(predictions * labels, dtype=tf.float32)\n TN = tf.count_nonzero((predictions - 1) *\n (labels - 1), dtype=tf.float32)\n FP = tf.count_nonzero(predictions * (labels - 1), dtype=tf.float32)\n FN = tf.count_nonzero((predictions - 1) * labels, dtype=tf.float32)\n acc = tf.reduce_mean(tf.to_float(tf.equal(predictions, labels)))\n\n precision = tf.cond(tf.math.equal((TP + FP), 0),\n true_fn=lambda: 0.0, false_fn=lambda: TP / (TP + FP))\n recall = TP / (TP + FN)\n specificity = TN / (TN + FP)\n f1_score = tf.cond(\n tf.math.equal(\n (precision + recall),\n 0),\n true_fn=lambda: 0.0,\n false_fn=lambda: 2 * precision * recall / (\n precision + recall))\n\n auc_pr = tf.metrics.auc(labels=labels, predictions=tf.nn.sigmoid(\n logits), curve='PR', summation_method='careful_interpolation')[1]\n\n return [acc, precision, recall, specificity, f1_score, auc_pr]\n\n def feed_forward(self, inp, training, no_head=False):\n \"\"\"computes an output tensor by feeding the input through the network.\n\n Parameters\n ----------\n inp : tensor\n input tensor.\n training : bool\n argument for Batch normalization layers.\n\n Returns\n -------\n out : tensor\n output tensor.\n\n \"\"\"\n if(len(self.input_shape) < 3 and len(self.filter_sizes) > 0):\n h = tf.expand_dims(inp, -1)\n else:\n h = inp\n\n n_layers_no_head = len(self.layers) - len(self.dense_sizes) - 1\n if(self.bn):\n n_layers_no_head = len(self.layers) - len(self.dense_sizes) * 2 - 1\n\n for i in range(n_layers_no_head):\n if('conv' in self.layers[i].name):\n h = self.layers[i](h)\n # if(self.dataset == 'MIN'):\n h = tf.layers.max_pooling2d(\n h, pool_size=2, strides=2, padding='same')\n elif('bn' in self.layers[i].name):\n h = self.layers[i](h, training=training)\n\n if(self.bn and 'bn_c_last' in self.layers[i].name):\n h = self.flatten(h)\n\n elif(not(self.bn) and 'conv_last' in self.layers[i].name):\n h = self.flatten(h)\n\n if(no_head):\n return h\n else:\n if(n_layers_no_head < 1):\n i = -1\n for j in range(i + 1, len(self.layers)):\n h = self.layers[j](h)\n return h\n\n def metatrain_op(self, epoch, X_train_a, Y_train_a, X_train_b, Y_train_b):\n \"\"\"performs one meta-training iteration.\n\n Parameters\n ----------\n X_train_a : tensor\n contains features of the K datapoints sampled for the inner loop (adaptation) updates of each meta-training task.\n Y_train_a : tensor\n contains labels of the K datapoints sampled for the inner loop (adaptation) updates of each meta-training task.\n X_train_b : tensor\n contains features sampled for the outer loop updates of each meta-training task.\n Y_train_b : tensor\n contains labels sampled for the outer loop updates of each meta-training task.\n\n Returns\n -------\n metatrain_loss : float\n sum of the losses computed on the sampled outer loop batch of each sampled meta-training task.\n train_summaries : list\n training summaries.\n\n \"\"\"\n\n meta_grads_list = []\n reptile_grads_list = []\n metatrain_loss = 0\n old_vars = []\n for layer_idx in range(0, len(self.layers)):\n layer_weights = self.layers[layer_idx].get_weights()\n old_vars.append(layer_weights)\n\n old_vars_trainable = []\n for layer_idx in range(0, len(self.layers)):\n layer_weights = self.layers[layer_idx].get_weights()[:2]\n old_vars_trainable.append(layer_weights)\n for i in range(0, self.n_sample_tasks):\n feed_dict = {\n self.X_finetune: X_train_a[i],\n self.Y_finetune: Y_train_a[i],\n self.X_outer_loop: X_train_b[i],\n self.Y_outer_loop: Y_train_b[i]}\n outer_loss = self.sess.run(self.outer_loop_loss, feed_dict)\n\n metatrain_loss += outer_loss\n new_vars = []\n for layer_idx in range(0, len(self.layers)):\n layer_weights = self.layers[layer_idx].get_weights()[:2]\n new_vars.append(layer_weights)\n reptile_grads_task = []\n for l_idx in range(len(old_vars)):\n for var_idx in range(len(old_vars_trainable[l_idx])):\n reptile_grads_task.append(\n old_vars_trainable[l_idx][var_idx] -\n new_vars[l_idx][var_idx])\n reptile_grads_list.append(reptile_grads_task)\n\n for layer_idx in range(0, len(self.layers)):\n self.layers[layer_idx].set_weights(old_vars[layer_idx])\n\n avg_meta_grads = np.mean(reptile_grads_list, axis=0)\n meta_feed_dict = {}\n for i in range(len(avg_meta_grads)):\n\n meta_feed_dict[self.placeholder_gradients[i]\n [0]] = avg_meta_grads[i]\n self.sess.run(self.meta_update_op, meta_feed_dict)\n\n train_summaries = tf.Summary(\n value=[\n tf.Summary.Value(\n tag='metatrain_loss',\n simple_value=metatrain_loss),\n ])\n\n return metatrain_loss, train_summaries\n\n def val_op(self, K_X_val, K_Y_val, val_test_X, val_test_Y):\n \"\"\"performs one validation episode.\n\n Parameters\n ----------\n K_X_val : array\n contains features of the K datapoints sampled for adaptation to the validation task(s).\n K_Y_val : array\n contains labels of the K datapoints sampled for adaptation to the validation task(s).\n val_test_X : array\n contains features of the test set(s) of the validation task(s).\n val_test_Y : array\n contains labels of the test set(s) of the validation task(s).\n\n Returns\n -------\n val_summaries : list\n validation summaries.\n val_test_loss : float\n loss computed on the test set after adaptation.\n acc : float\n accuracy computed on the test set after adaptation.\n precision : float\n precision computed on the test set after adaptation.\n recall : float\n recall computed on the test set after adaptation.\n specificity : float\n specificity computed on the test set after adaptation.\n f1_score : float\n F1 score computed on the test set after adaptation.\n auc_pr : float\n AUC-PR computed on the test set after adaptation.\n\n \"\"\"\n\n # save current network parameters (including bn stats)\n old_vars = []\n for layer_idx in range(0, len(self.layers)):\n layer_weights = self.layers[layer_idx].get_weights()\n old_vars.append(layer_weights)\n\n val_test_feed_dict = {\n self.X_finetune: val_test_X, self.Y_finetune: val_test_Y\n }\n\n for i in range(self.num_updates):\n self.sess.run(self.val_finetune_update_op, {\n self.X_finetune: K_X_val, self.Y_finetune: K_Y_val})\n\n if(self.bn):\n # assign batch normalization stats using the available adaptation\n # set\n self.sess.run(self.updated_bn_model, {self.X_finetune: K_X_val})\n\n if(self.summary):\n self.sess.run(tf.local_variables_initializer())\n val_summaries, val_test_loss, acc, precision, recall, specificity, f1_score, auc_pr = self.sess.run(\n [self.merged_val, self.test_loss, self.my_acc, self.my_precision, self.my_recall, self.my_specificity, self.my_f1_score, self.my_auc_pr], feed_dict=val_test_feed_dict)\n else:\n self.sess.run(tf.local_variables_initializer())\n val_test_loss, acc, precision, recall, specificity, f1_score, auc_pr = self.sess.run(\n [self.test_loss, self.my_acc, self.my_precision, self.my_recall, self.my_specificity, self.my_f1_score, self.my_auc_pr], feed_dict=val_test_feed_dict)\n val_summaries = None\n\n # resetting old networks parameters (including bn stats)\n for layer_idx in range(0, len(self.layers)):\n self.layers[layer_idx].set_weights(old_vars[layer_idx])\n\n return val_summaries, val_test_loss, acc, precision, recall, specificity, f1_score, auc_pr\n\n def assign_stats(self, K_finetune_samples):\n \"\"\" compute BN stats (mean and variance) using the given adaptation set and assign them to the BN layers in the network.\n\n Parameters\n ----------\n K_finetune_samples : tensor\n conatins the features of the K datapoints sampled for adaptation.\n\n Returns\n -------\n out : tensor\n output of the last batch normalization layer (when it is computed using session.run, the BN stats are assigned)\n\n \"\"\"\n\n if(len(self.input_shape) < 3 and len(self.filter_sizes) > 0):\n h = tf.expand_dims(K_finetune_samples, -1)\n else:\n h = K_finetune_samples\n\n for i in range(len(self.layers)):\n if('dense_last' in self.layers[i].name):\n out = h\n return out\n elif('bn_c' in self.layers[i].name):\n mean, var = tf.nn.moments(h, [0, 1, 2])\n assign_op_1 = self.layers[i].variables[-1].assign(var)\n assign_op_2 = self.layers[i].variables[-2].assign(mean)\n with tf.control_dependencies([assign_op_1, assign_op_2]):\n h = self.layers[i](h, training=False)\n elif('bn_d' in self.layers[i].name):\n mean, var = tf.nn.moments(h, 0)\n assign_op_3 = self.layers[i].variables[-1].assign(var)\n assign_op_4 = self.layers[i].variables[-2].assign(mean)\n with tf.control_dependencies([assign_op_3, assign_op_4]):\n h = self.layers[i](h, training=False)\n elif('conv' in self.layers[i].name):\n h = self.layers[i](h)\n h = tf.layers.max_pooling2d(\n h, pool_size=2, strides=2, padding='same')\n if('bn_c_last' in self.layers[i].name):\n h = self.flatten(h)\n","sub_path":"MAMLs_Reptiles/Omniglot/metalearning_algorithms/reptile_class.py","file_name":"reptile_class.py","file_ext":"py","file_size_in_byte":22854,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"553861231","text":"\"\"\"File to be invoked after data collection\n\"\"\"\nimport time\nimport preprocessor as p\nfrom langdetect import detect\nfrom profanity_check import predict\nimport os\nimport glob\nimport json\nimport pandas as pd\nimport datetime as dt\n\nCURRENT_WORKDIR = os.getcwd()\n\n\nclass Preprocess(object):\n def __init__(self):\n self.veteran_stats = dict()\n self.civilian_stats = dict()\n self.cleaned_veteran_tweets = list()\n self.cleaned_civilian_tweets = list()\n self.list_of_pages = list()\n\n def pre_process_data(self, tweet, data_type=\"soldiers\"):\n lang = 'na'\n try:\n lang = detect(tweet)\n except:\n print(\"Can't detect a lang\")\n finally:\n if lang == 'en' or lang == 'na': #Only parsing English tweets\n self.check_profanity(tweet, data_type=data_type)\n self.aggregate_stats_from_tweet(tweet, data_type=data_type)\n p.set_options(p.OPT.URL, p.OPT.EMOJI, p.OPT.MENTION)\n cleaned_tweet = p.clean(tweet)\n cleaned_tweet = cleaned_tweet.replace(\"#\", \"\")\n number_of_words = len(cleaned_tweet)\n if data_type == \"soldiers\":\n self.veteran_stats[\n 'words'] = number_of_words if self.veteran_stats.get(\n 'words') is None else (\n self.veteran_stats['words'] + number_of_words)\n self.cleaned_veteran_tweets.append(cleaned_tweet)\n else:\n self.civilian_stats[\n 'words'] = number_of_words if self.civilian_stats.get(\n 'words') is None else (\n self.civilian_stats['words'] + number_of_words)\n self.cleaned_civilian_tweets.append(cleaned_tweet)\n\n def check_profanity(self, tweet, data_type):\n if predict([tweet]) == 1:\n if data_type == 'soldiers':\n self.veteran_stats['curses'] = 1 if self.veteran_stats.get(\n 'curses') is None else (self.veteran_stats['curses'] + 1)\n else:\n self.civilian_stats['curses'] = 1 if self.civilian_stats.get(\n 'curses') is None else (self.civilian_stats['curses'] + 1)\n\n def aggregate_stats_from_df(self, vet_df, civ_df):\n self.veteran_stats['retweets'] = vet_df.retweets_count.sum()\n self.veteran_stats['replies'] = vet_df.replies_count.sum()\n self.veteran_stats['likes'] = vet_df.likes_count.sum()\n self.civilian_stats['retweets'] = civ_df.retweets_count.sum()\n self.civilian_stats['replies'] = civ_df.replies_count.sum()\n self.civilian_stats['likes'] = civ_df.likes_count.sum()\n\n def aggregate_stats_from_tweet(self, tweet, data_type):\n token_string = p.tokenize(tweet)\n number_of_hashtags = token_string.count('$HASHTAG$')\n number_of_emojis = token_string.count('$EMOJI$')\n number_of_urls = token_string.count('$URL$')\n number_of_mentions = token_string.count('$MENTION$')\n if data_type == \"soldiers\":\n self.veteran_stats[\n 'hashtags'] = number_of_hashtags if self.veteran_stats.get(\n 'hashtags') is None else (\n int(self.veteran_stats['hashtags']) +\n number_of_hashtags)\n self.veteran_stats[\n 'emojis'] = number_of_emojis if self.veteran_stats.get(\n 'emojis') is None else (self.veteran_stats['emojis'] +\n number_of_emojis)\n self.veteran_stats[\n 'urls'] = number_of_urls if self.veteran_stats.get(\n 'urls') is None else (self.veteran_stats['urls'] +\n number_of_urls)\n self.veteran_stats[\n 'mentions'] = number_of_mentions if self.veteran_stats.get(\n 'mentions') is None else (self.veteran_stats['mentions'] +\n number_of_mentions)\n else:\n self.civilian_stats[\n 'hashtags'] = number_of_hashtags if self.civilian_stats.get(\n 'hashtags') is None else (self.civilian_stats['hashtags'] +\n number_of_hashtags)\n self.civilian_stats[\n 'emojis'] = number_of_emojis if self.civilian_stats.get(\n 'emojis') is None else (self.civilian_stats['emojis'] +\n number_of_emojis)\n self.civilian_stats[\n 'urls'] = number_of_urls if self.civilian_stats.get(\n 'urls') is None else (self.civilian_stats['urls'] +\n number_of_urls)\n self.civilian_stats[\n 'mentions'] = number_of_mentions if self.civilian_stats.get(\n 'mentions') is None else (self.civilian_stats['mentions'] +\n number_of_mentions)\n\n def process_data(self):\n pass\n\n def get_combined_file(self, folder_name=\"soldiers\", force_rewrite=False):\n os.chdir(\"{}/data/{}\".format(CURRENT_WORKDIR, folder_name))\n all_filenames = [i for i in glob.glob('*.{}'.format('csv'))]\n if not os.path.exists(\n \"combined_{}.csv\".format(folder_name)) or force_rewrite:\n combined_csv = pd.concat([pd.read_csv(f) for f in all_filenames])\n combined_csv.to_csv(\"combined_{}.csv\".format(folder_name),\n index=False,\n encoding='utf-8-sig')\n print(\"combined_{}.csv saved\".format(folder_name))\n else:\n print(\"File exists. Skipping. Use force_rewrite to overwrite.\")\n\n def get_dataframe(self, data_type=\"soldiers\"):\n os.chdir(\"{}/data/{}\".format(CURRENT_WORKDIR, data_type))\n return pd.read_csv(\"combined_{}.csv\".format(data_type))\n\n def clean_veteran_tweet_and_create_df(self, vet_df):\n vet_tweets = list(vet_df['tweet'])\n for i in range(len(vet_tweets)):\n if i % 100 == 0:\n print(\"{}/{}\".format(i, len(vet_tweets)))\n if 'http://pandora.com/' in vet_tweets[i]:\n continue\n self.pre_process_data(vet_tweets[i], \"soldiers\")\n self.cleaned_vet_tweet_df = pd.DataFrame(\n {'tweets': self.cleaned_veteran_tweets})\n self.cleaned_vet_tweet_df.to_csv('cleaned_vet_tweet_df.csv',\n header=False,\n mode='a')\n print(\"veteran stats: {}\".format(self.veteran_stats))\n print('cleaned_vet_tweet_df.csv created')\n with open('{}.txt'.format(dt.datetime.utcnow()), 'w') as file:\n file.write(json.dumps(self.veteran_stats))\n\n def clean_civilian_tweet_and_create_df(self, civ_df):\n civ_tweets = list(civ_df['tweet'])\n for i in range(len(civ_tweets)):\n if i % 100 == 0:\n print(\"{}/{}\".format(i, len(civ_tweets)))\n self.pre_process_data(vet_tweets[i], \"civilians\")\n self.cleaned_civ_tweet_df = pd.DataFrame(\n {'tweets': self.cleaned_civilian_tweets})\n self.cleaned_civ_tweet_df.to_csv('cleaned_civ_tweet_df.csv',\n header=False,\n mode='a')\n print(\"civilian stats: {}\".format(self.civilian_stats))\n print('cleaned_civ_tweet_df.csv created')\n with open('{}.txt'.format(dt.datetime.utcnow()), 'w') as file:\n file.write(json.dumps(self.civilian_stats))\n\n\nif __name__ == \"__main__\":\n # start_time = time.time()\n # pr = Preprocess()\n # vet_df = pr.get_dataframe()\n # vet_df = vet_df[['tweet', 'time', 'photos', 'replies_count', 'retweets_count', 'likes_count', 'retweet']]\n # vet_df = vet_df[(vet_df['retweet'] == False)]\n # print(\"Vet Shape: {}\".format(vet_df.shape))\n # pr.clean_veteran_tweet_and_create_df(vet_df)\n # print(\"--- %s seconds ---\" % (time.time() - start_time))\n\n start_time = time.time()\n pr = Preprocess()\n civ_df = pr.get_dataframe(data_type=\"civilians\")\n civ_df = civ_df[[\n 'tweet', 'time', 'photos', 'replies_count', 'retweets_count',\n 'likes_count', 'retweet'\n ]]\n civ_df = civ_df[(civ_df['retweet'] == False)]\n print(\"Civ Shape: {}\".format(civ_df.shape))\n\n # pr.clean_veteran_tweet_and_create_df(vet_df)\n # print(\"--- %s seconds ---\" % (time.time() - start_time))\n\n # pr.aggregate_stats_from_df(vet_df, civ_df) # Stats directly extracted from the twitter data\n","sub_path":"supporting_scripts/post_data_collection.py","file_name":"post_data_collection.py","file_ext":"py","file_size_in_byte":8776,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"147281420","text":"# coding: utf-8\n\"\"\"Simple lightGBM model\n\n1. Split train data into offline train set and test set\n2. Use all train data to train a new lasso model\n3. Predict the value of test data\n\"\"\"\n\nimport sys\n\nimport numpy as np\nimport pandas as pd\nfrom sklearn.svm import SVR\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.preprocessing import MinMaxScaler\nfrom sklearn.metrics import mean_squared_error as mse\n\nsys.path.append('../')\nfrom util.feature import add_feature\n\ntrain = pd.read_csv('../data/d_train_20180102.csv')\ntrain = add_feature(train)\n\n# splits into male and female\ntrain_m = train.loc[train['性别'] == 0, :]\ntrain_f = train.loc[train['性别'] == 1, :]\nlog = tuple()\nscaler = MinMaxScaler()\nregressor = SVR()\n\nfor sets in [train_m, train_f]:\n XALL = sets.loc[:, [column for column in train.columns if column not in \n ['id', '性别', '体检日期', '血糖', '乙肝表面抗原', '乙肝表面抗体', '乙肝e抗原', '乙肝e抗体', '乙肝核心抗体']]]\n \n XALL.fillna(XALL.median(), inplace=True)\n columns = XALL.columns\n\n scaler.fit(XALL)\n XALL = scaler.transform(XALL)\n XALL = pd.DataFrame(XALL, columns=columns)\n\n yALL = sets.loc[:, '血糖']\n\n X_train, X_test, y_train, y_test = train_test_split(XALL, yALL,\n test_size=0.3, random_state=2018)\n\n regressor.fit(X_train, y_train)\n pred_train = regressor.predict(X_train)\n pred_test = regressor.predict(X_test)\n log += ((mse(y_train, pred_train), mse(y_test, pred_test)), )\n \nfor score1, score2 in log:\n print('train\\'s mse: {0}, test\\'s mse: {1}'.format(score1, score2))\n","sub_path":"a_zx_scheme/basic_analysis&offline/splittedgender_svr.py","file_name":"splittedgender_svr.py","file_ext":"py","file_size_in_byte":1678,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"249176192","text":"import gtk\nimport os\nfrom lxml import etree\nfrom simpleContent import SimpleContent\nfrom dragSourceEventBox import DragSourceEventBox\nfrom expand import Expand\nfrom colorChooserButton import ColorChooserButton\nfrom pythonValue import PythonValue\nfrom elementValue import ElementValue\nfrom shadow import Shadow\nfrom align import Align\nfrom valueValidator import ValueValidator\nimport constants\nfrom baseElement import BaseElement\n\nclass Diamond(BaseElement):\n def __init__(self, name, box, manager, parent):\n BaseElement.__init__(self)\n self.manager = manager\n self.containerName = name\n self.box = box\n self.parentContainer = parent\n self.childObjects = []\n self.expand = None\n if type(self.parentContainer).__name__ == 'Container':\n self.expand = Expand(self)\n self.fillColorButton = ColorChooserButton(self, 'Select fill color')\n self.borderColorButton = ColorChooserButton(self, 'Select border color')\n self.shadow = Shadow(self)\n\n newVbox = gtk.VBox()\n self.set_border_width(0)\n self.modify_bg(gtk.STATE_NORMAL, gtk.gdk.color_parse(\"black\"))\n\n newHbox = gtk.HBox()\n eB = gtk.EventBox()\n eB.set_border_width(2)\n eB.modify_bg(gtk.STATE_NORMAL, gtk.gdk.color_parse(\"lightgray\"))\n\n labelEvent = DragSourceEventBox(self)\n newHbox.connect('button-press-event', self.showProperties)\n labelEvent.modify_bg(gtk.STATE_NORMAL, gtk.gdk.color_parse(\"lightgray\"))\n label = gtk.Label(' '+name)\n label.set_alignment(0.0, 0.5)\n\n if type(self.parentContainer).__name__ == 'Container':\n labelEvent.drag_source_set(gtk.gdk.BUTTON1_MASK,[],0)\n self.drag_dest_set(0,[],0)\n self.connect('drag_motion', self.motion_cb)\n self.connect('drag_drop', self.drop_cb)\n\n labelEvent.add(label)\n newHbox.pack_start(labelEvent,True,True,2)\n iconEvent = gtk.EventBox()\n iconEvent.set_border_width(2)\n iconEvent.modify_bg(gtk.STATE_NORMAL, gtk.gdk.color_parse(\"lightgray\"))\n iconEvent.connect('button-release-event', self.deleteClicked)\n icon = gtk.Image()\n icon.set_from_file(os.path.split(os.path.realpath(__file__))[0]+'/delete.png')\n iconEvent.add(icon)\n newHbox.pack_end(iconEvent,False,True,2)\n\n newVbox.pack_start(newHbox,False)\n\n sc = SimpleContent(self,manager)\n self.box.pack_start(sc)\n self.childObjects.append(sc)\n\n newVbox.pack_start(self.box)\n eB.add(newVbox)\n self.add(eB)\n\n def deleteClicked(self, widget, w):\n dialog = gtk.MessageDialog(None,0,gtk.MESSAGE_QUESTION,gtk.BUTTONS_YES_NO,'Delete '+self.containerName+' with whole content?')\n response = dialog.run()\n if response == gtk.RESPONSE_YES:\n if self.parentContainer == None:\n self.manager.clearAll()\n else:\n self.parentContainer.deleteChild(self)\n self.manager.clearProperties()\n dialog.destroy()\n\n def showProperties(self, widget, w):\n if self.manager.lastHighligted:\n self.manager.lastHighligted.modify_bg(gtk.STATE_NORMAL, gtk.gdk.color_parse(\"black\"))\n self.manager.lastHighligted = self\n self.modify_bg(gtk.STATE_NORMAL, gtk.gdk.color_parse(\"green\"))\n box = self.manager.wTree.get_widget('vbox_properties')\n for w in box.children():\n box.remove(w)\n\n hbox = gtk.HBox()\n label = gtk.Label('Fill color')\n label.set_alignment(0.01, 0.5)\n hbox.pack_start(label,False)\n hbox.pack_end(PythonValue(self,'Fill color'),False)\n hbox.pack_end(ElementValue(self,'Fill color'),False)\n box.pack_start(hbox,False)\n box.pack_start(self.fillColorButton, False)\n\n box.pack_start(gtk.Label(' '),False)\n\n hbox = gtk.HBox()\n label = gtk.Label('Border color')\n label.set_alignment(0.01, 0.5)\n hbox.pack_start(label,False)\n hbox.pack_end(PythonValue(self,'Border color'),False)\n hbox.pack_end(ElementValue(self,'Border color'),False)\n box.pack_start(hbox,False)\n box.pack_start(self.borderColorButton, False)\n\n box.pack_start(gtk.Label(' '),False)\n box.pack_start(self.shadow, False)\n box.pack_start(gtk.Label(' '),False)\n if self.expand:\n box.pack_start(self.expand, False)\n box.pack_start(gtk.Label(' '),False)\n box.show_all()\n\n def setElementValue(self, attrib, value):\n if attrib == 'Fill color':\n self.fillColorButton.color = value\n if value:\n self.fillColorButton.set_label(self.fillColorButton.color)\n else: self.fillColorButton.set_label('')\n elif attrib == 'Border color':\n self.borderColorButton.color = value\n if value:\n self.borderColorButton.set_label(self.borderColorButton.color)\n else:\n self.borderColorButton.set_label('')\n\n def colorChanged(self, newColor, attrib):\n pass\n\n def getApp(self):\n if self.containerName == 'Diamond':\n app = etree.Element('Diamond')\n else:\n app = etree.Element('Ellipse')\n if self.fillColorButton.color:\n app.attrib['fill'] = self.fillColorButton.getColor()\n if self.borderColorButton.color:\n app.attrib['border'] = self.borderColorButton.getColor()\n if self.childObjects[0].content != None:\n app.append(self.childObjects[0].content.getApp())\n if self.shadow.padding > 0 or self.shadow.buttonColor.color:\n shadow = self.shadow.getXMLFormat()\n shadow.append(app)\n return shadow\n return app\n\n @staticmethod\n def validate(element, dataElement):\n fill = element.get('fill')\n if fill:\n if not ValueValidator.validate(fill, dataElement):\n return False, 'Unknown element attribute for fill color: ' + fill\n border = element.get('border')\n if border:\n if not ValueValidator.validate(border, dataElement):\n return False, 'Unknown element attribute for border color: ' + border\n return True, None","sub_path":"plugin/appearance/diamond.py","file_name":"diamond.py","file_ext":"py","file_size_in_byte":6302,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"361571211","text":"#i -*- coding: utf8 -*-\r\n# _..._\r\n# .-' '-.\r\n# / _ _\\ meterorama\r\n# /':. (o) /__)\r\n# /':. .,_ | | descriptive, colorful ascii meters\r\n#|': ; / \\ /_/\r\n#/ ; `\"`\" }\r\n# ':., {\r\n# ; }\r\n#::. ;\\/\\ /\\ {\r\n# ':. ;``\"``\\\r\n#'::' / ;\r\n# '::' / |\r\n#:' _.-`; ;\r\n#-;` ; | |\r\n# ; ; | |\r\n# ; ; ; / ,--.........,\r\n# ; ;/ ; .' -='.\r\n# ; ; / / .\\ '\r\n#; /` .\\ _,==\" \\ .'\r\n# .'. _ ,_'\\.\\~\" //`. \\ .'\r\n#___~' \\ \\- | | /,\\ ` \\ ..'\r\n# ==\"'' |`| | | ==\"''\\.==''\r\n#\" |`| |`| ===\"`\r\n# \\\\ |`| / /==\"`\r\n# /,\\ / /= )\")\r\n# _')\")\r\n#~\";`\r\n# ;|\r\n# |\r\n#\\ |\r\n# \\|\r\n\r\nimport argparse\r\nimport code\r\nimport math\r\n\r\nclass Meter(object):\r\n\r\n def __init__(self, args):\r\n self.length = args.length\r\n self.value = args.value\r\n self.name = args.name\r\n self.colors = args.color\r\n self.target = args.target\r\n self.description = args.description\r\n self.meter_char = args.meter_char\r\n self.empty_char = args.empty_char\r\n\r\n self.build()\r\n\r\n def build(self):\r\n\r\n chunk_size = self.length / len(self.colors)\r\n\r\n def colorize(text, position):\r\n\r\n if (position % chunk_size == 0):\r\n color_index = int(position / chunk_size)\r\n if (color_index + 1 > len(self.colors)):\r\n color_index = len(self.colors) - 1;\r\n #print position, len(self.colors), color_index\r\n color = self.colors[color_index]\r\n return \"\\003%s%s\" % (color, text)\r\n else:\r\n return \"%s\" % (text)\r\n #color = self.colors[-1]\r\n\r\n bar = '['\r\n\r\n if (self.value <= self.length):\r\n for i in range(0, self.value):\r\n bar = bar + colorize(self.meter_char, i)\r\n bar = bar + '\\003\\002\\002'\r\n for i in range(0, self.length - self.value):\r\n bar = bar + self.empty_char\r\n bar = bar + '\\003\\002\\002]'\r\n\r\n elif (self.value > self.length):\r\n for i in range(0, self.length):\r\n bar = bar + colorize(self.meter_char, i)\r\n bar = bar + '\\003\\002\\002]\\003%s' % (self.colors[-1])\r\n for i in range(0, self.value - self.length):\r\n bar = bar + self.meter_char\r\n bar = bar + '\\003\\002\\002'\r\n\r\n else:\r\n pass\r\n\r\n pre = '%s-meter for %s:' % (self.name, self.target)\r\n post = '%s' % (\"\".join(self.description))\r\n self.meter_string = '%s %s %s' % (pre, bar, post)\r\n\r\n\r\n def output(self):\r\n print(self.meter_string)\r\n\r\n\r\n\r\nif __name__ == '__main__':\r\n\r\n parser = argparse.ArgumentParser()\r\n\r\n parser.add_argument(\r\n '--name',\r\n default = 'rage',\r\n help = 'rage'\r\n )\r\n\r\n parser.add_argument(\r\n '--color',\r\n nargs = '*',\r\n default = [3, 8, 4, 5],\r\n help = 'COLOR1 COLOR2 ...'\r\n )\r\n\r\n parser.add_argument(\r\n '--meter-char',\r\n default = '=',\r\n help = 'TARGET'\r\n )\r\n\r\n parser.add_argument(\r\n '--empty-char',\r\n default = ' ',\r\n help = 'TARGET'\r\n )\r\n\r\n parser.add_argument(\r\n '--length',\r\n type = int,\r\n default = 20,\r\n help = 'COLOR'\r\n )\r\n\r\n parser.add_argument(\r\n '--value',\r\n type = int,\r\n default = 20,\r\n help = 'VALUE'\r\n )\r\n\r\n parser.add_argument(\r\n 'target',\r\n help = 'TARGET'\r\n )\r\n\r\n parser.add_argument(\r\n 'description',\r\n nargs = '*'\r\n )\r\n\r\n args = parser.parse_args()\r\n meter = Meter(args)\r\n meter.output()\r\n\r\n\r\n","sub_path":"meter.py","file_name":"meter.py","file_ext":"py","file_size_in_byte":3486,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"202209574","text":"# -*- coding: cp1252 -*-\n#SHA-2\n#My SHA-2-256, this is for educational purposes so it'll be slow.\n#If you want to do real encryption go to a proper programmer.\n#This stuff would run faster on a GPU because it's mostly\n#bitwise operations\n\n#This is the hashing algoritm used by Bitcoin as well as most of\n#the internet\n\n#Made for python 3\n\n#Sources:-\n#https://en.wikipedia.org/wiki/SHA-2\n\ndef text2bin(message):\n\n output = ''\n for i in message:\n output += bin(ord(i))[2:].zfill(8)\n\n return output\n\n#bitwise rotation of a number rotdist bits to\n#the right inside the field defined by bit_length\ndef ROR(number, rotdist, bit_length):\n\n #number = bin(number)[2:]\n #make it a x bit binary number\n a = number.zfill(bit_length)\n #print a\n \n #loop over the number of rotations needed\n for i in range(rotdist):\n\n #perform one rotation at a time\n b = ''\n #do the loop first. If it were not\n #for the looping we could use pythons\n #internal bitwise shift tools\n b = b + a[bit_length - 1]\n #move all the other elements in the string\n for l in range(bit_length - 1):\n\n b = b + a[l]\n \n \n #alter a to ensure perminance over\n #all the shifts in the code\n #a = int(b, 2)\n a = b\n\n #print a\n return a\n'''\n=================================================\n====Now we start the SHA-2 specific functions====\n=================================================\n'''\n\n\n#this is the function for padding the message out\n#it is the same for SHA1 and SHA2\n\ndef padding_function(message):\n \n #first convert the message to a string\n binary_message = text2bin(message)\n\n\n #record the initial message length for later\n message_length = len(binary_message)\n\n #add a one to the end of the message string. Now were in the\n #padding stage\n p = binary_message + '1'\n\n #extend the length of p until len(p)%512 = 448\n while len(p)%512 != 448:\n p += '0'\n\n #the final padding step, add the length of the starting message\n\n p += bin(message_length)[2:].zfill(64)\n\n block_no = int(len(p)/512)\n\n #splits the padded message into blocks\n blocks = []\n for i in range(block_no):\n\n blocks.append(p[i * 512: (i * 512) + 512])\n\n return blocks\n\n'''\n===========================================================\n==== These are the functions that are used by the loop ====\n===========================================================\n'''\n#^ = XOR\n#& = AND\n#� = NOT\n#ROR(x, n) = rotate x right by n\n#x >> n = right shift x by n\n\n\n#s0 = ROR(x,7) ^ ROR(x,18) ^ x >> 3\ndef sigma_0(x):\n\n sigma0 = int(ROR(bin(x)[2:], 7, 32), 2) ^ int(ROR(bin(x)[2:], 18, 32), 2) ^ (x >> 3)\n\n return sigma0\n\n#s1 = ROR(x,17) ^ ROR(x,19) ^ x >> 10\ndef sigma_1(x):\n\n sigma1 = int(ROR(bin(x)[2:], 17, 32), 2) ^ int(ROR(bin(x)[2:], 19, 32), 2) ^ (x >> 10)\n\n return sigma1\n\n#e0 = ROR(x,2) ^ ROR(x,13) ^ ROR(x,22)\ndef Eta_0(x):\n\n Eta_0 = int(ROR(bin(x)[2:], 2, 32), 2) ^ int(ROR(bin(x)[2:], 13, 32), 2) ^ int(ROR(bin(x)[2:], 22, 32), 2)\n\n return Eta_0\n\n#e1 = ROR(x,6) ^ ROR(x,11) ^ ROR(x,25)\ndef Eta_1(x):\n\n Eta_1 = int(ROR(bin(x)[2:], 6, 32), 2) ^ int(ROR(bin(x)[2:], 11, 32), 2) ^ int(ROR(bin(x)[2:], 25, 32), 2)\n\n return Eta_1\n\n#Chr = (x & y) ^ (�x & z)\ndef Chr(x, y, z):\n NOT = 0b11111111111111111111111111111111\n Chr = (x & y) ^ ((x ^ NOT) & z)\n\n return Chr\n\n#Maj = (x & y) ^ (x & z) ^ (y & z)\ndef Maj(x, y, z):\n Maj = (x & y) ^ (x & z) ^ (y & z)\n return Maj\n\n'''\n=======================================================================\n==== These are the big functions that do the major bits of SHA-256 ====\n=======================================================================\n'''\n\n\ndef gen_keys(block):\n\n keys = []\n for i in range(16):\n\n keys.append(int(block[(i * 32) : ((i * 32) + 32)], 2))\n\n #this is wrong and won't work for longer messages. I NEED TO FIX THIS\n\n #make 48 new keys using the formula:\n #key[i] = key[i-16] ^ s0(key[i-15]) ^ key[i-7] ^ s1(key[i-2])\n for l in range(16,64):\n\n S0 = sigma_0(keys[l-15])\n S1 = sigma_1(keys[l-2])\n\n w = (keys[l-16] + S0) % pow(2,32) \n\n w = (w + keys[l-7]) % pow(2,32) \n\n w = (w + S1) % pow(2,32) \n\n keys.append(w) \n\n return keys\n\n'''\n=========================================================\n==== This is the function that actually does SHA-256 ====\n=========================================================\n'''\n\n#This is all done in one big function. I probably could break it\n#down to make it more readable but fuck it\ndef SHA_256(message):\n\n #initialise the first h values\n h0 = 0x6a09e667\n h1 = 0xbb67ae85\n h2 = 0x3c6ef372\n h3 = 0xa54ff53a\n h4 = 0x510e527f\n h5 = 0x9b05688c\n h6 = 0x1f83d9ab\n h7 = 0x5be0cd19\n\n #initialise the round constants\n k = [0x428a2f98, 0x71374491, 0xb5c0fbcf, 0xe9b5dba5, 0x3956c25b, 0x59f111f1, 0x923f82a4, 0xab1c5ed5,\n 0xd807aa98, 0x12835b01, 0x243185be, 0x550c7dc3, 0x72be5d74, 0x80deb1fe, 0x9bdc06a7, 0xc19bf174,\n 0xe49b69c1, 0xefbe4786, 0x0fc19dc6, 0x240ca1cc, 0x2de92c6f, 0x4a7484aa, 0x5cb0a9dc, 0x76f988da,\n 0x983e5152, 0xa831c66d, 0xb00327c8, 0xbf597fc7, 0xc6e00bf3, 0xd5a79147, 0x06ca6351, 0x14292967,\n 0x27b70a85, 0x2e1b2138, 0x4d2c6dfc, 0x53380d13, 0x650a7354, 0x766a0abb, 0x81c2c92e, 0x92722c85,\n 0xa2bfe8a1, 0xa81a664b, 0xc24b8b70, 0xc76c51a3, 0xd192e819, 0xd6990624, 0xf40e3585, 0x106aa070,\n 0x19a4c116, 0x1e376c08, 0x2748774c, 0x34b0bcb5, 0x391c0cb3, 0x4ed8aa4a, 0x5b9cca4f, 0x682e6ff3,\n 0x748f82ee, 0x78a5636f, 0x84c87814, 0x8cc70208, 0x90befffa, 0xa4506ceb, 0xbef9a3f7, 0xc67178f2]\n\n #convert the message into 512 bit blocks by padding it out\n blocks = padding_function(message)\n\n for p in blocks:\n\n #generate the keys to be used. This is where the block\n #actually gets processed. Everything else just uses these\n #keys\n keys = gen_keys(p)\n\n a = h0\n b = h1\n c = h2\n d = h3\n e = h4\n f = h5\n g = h6\n h = h7\n #run the main loop 64 times. You can find a flow diagram for\n #this on wikipedia\n for i in range(64):\n\n e1 = Eta_1(e)\n\n ch = Chr(e, f, g)\n\n e0 = Eta_0(a)\n\n maj = Maj(a, b, c)\n\n temp1 = (h + e1) % pow(2, 32)\n temp1 = (temp1 + ch) % pow(2, 32)\n temp1 = (temp1 + k[i]) % pow(2, 32)\n temp1 = (temp1 + keys[i]) % pow(2, 32)\n\n temp2 = (e0 + maj) % pow(2, 32)\n\n\n h = g\n g = f\n f = e\n e = (d + temp1) % pow(2, 32)\n d = c\n c = b\n b = a\n a = (temp1 + temp2) % pow(2, 32)\n\n\n\n #save there results by mod adding them to the\n #previous results\n h0 = (h0 + a) % pow(2, 32)\n h1 = (h1 + b) % pow(2, 32)\n h2 = (h2 + c) % pow(2, 32)\n h3 = (h3 + d) % pow(2, 32)\n h4 = (h4 + e) % pow(2, 32)\n h5 = (h5 + f) % pow(2, 32)\n h6 = (h6 + g) % pow(2, 32)\n h7 = (h7 + h) % pow(2, 32)\n\n\n #convert the results to 32 bit binary words for the final\n #result\n h0 = bin(h0)[2:].zfill(32)\n h1 = bin(h1)[2:].zfill(32)\n h2 = bin(h2)[2:].zfill(32)\n h3 = bin(h3)[2:].zfill(32)\n h4 = bin(h4)[2:].zfill(32)\n h5 = bin(h5)[2:].zfill(32)\n h6 = bin(h6)[2:].zfill(32)\n h7 = bin(h7)[2:].zfill(32)\n\n #add the results using the endian convention\n result = h0 + h1 + h2 + h3 + h4 + h5 + h6 + h7\n result = hex(int(result, 2))\n\n return result\n \n'''\n=========================================================\n==== End of functions, start of code using functions ====\n=========================================================\n'''\n\n\n\nmessage = 'The quick brown fox jumps over the lazy dog and again and again'\n\n\nresult = SHA_256(message)\n\n\nprint(result)\n","sub_path":"SHA-2.py","file_name":"SHA-2.py","file_ext":"py","file_size_in_byte":7939,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"182270240","text":"# base imports\nimport numpy as np\nimport grpc\nimport tensorflow as tf\nfrom tensorflow_serving.apis import predict_pb2\nfrom tensorflow_serving.apis import prediction_service_pb2_grpc\n\nimport config_file as config\n\n################################################################################\n# Prepare to use gRPC endpoint from tf serving \n################################################################################\n\n# gRPC API expects a serialized PredictRequest protocol buffer as input\n# Establish a gRPC channel and a stub\ndef create_grpc_stub(host, port=8500):\n hostport = '{}:{}'.format(host, port)\n channel = grpc.insecure_channel(hostport)\n stub = prediction_service_pb2_grpc.PredictionServiceStub(channel)\n\n return stub\n\n\n# Call model and signature to make predictions on the image\ndef grpc_request(stub, data_sample, model_name=config.MODEL_NAME, \\\n signature_name='serving_default'):\n request = predict_pb2.PredictRequest()\n request.model_spec.name = model_name\n request.model_spec.signature_name = signature_name\n shp = [dim for dim in data_sample.shape]\n request.inputs['input_tensor'].CopyFrom(tf.make_tensor_proto(data_sample,\n shape=shp))\n # I think I need to increase this waiting time to something like 20sec? done\n result = stub.Predict(request, 100.0)\n\n return result\n\n\n################################################################################\n# Detection\n################################################################################\n\n# call previous functions and make inference\ndef run_inference_for_single_image(host, data_sample):\n stub = create_grpc_stub(host)\n rs_grpc = grpc_request(stub, data_sample)\n\n # outputs of interest\n outputs = ['num_detections',\n 'detection_boxes',\n 'detection_scores', \n 'detection_classes']\n\n # add outputs of interest to a dict as arrays according to their dimension\n shape = []\n output_dict = {}\n for output in outputs:\n shape = tf.TensorShape(rs_grpc.outputs[output].tensor_shape).as_list()\n shape = shape[1:]\n output_dict[output] = np.array(rs_grpc.outputs[output].float_val).reshape(shape)\n shape = []\n\n # num_detections is an int\n num_detections = int(output_dict.pop('num_detections'))\n output_dict['num_detections'] = num_detections\n\n # detection_classes should be ints.\n output_dict['detection_classes'] = output_dict['detection_classes'].astype(np.int64)\n\n return output_dict\n\n\n# Define and retrieve attributes of objects identified on a single image\ndef get_detections(frame_sequence, im_width, im_height, boxes, classes, scores, cat_index, min_score_thresh):\n detections = []\n for i in range(boxes.shape[0]):\n if scores is None or scores[i] > min_score_thresh:\n box = tuple(boxes[i].tolist())\n ymin, xmin, ymax, xmax = box\n (left, right, top, bottom) = (xmin * im_width,\n xmax * im_width,\n ymin * im_height,\n ymax * im_height)\n detections.append(\n {'frame_sequence': frame_sequence,\n 'class': int(classes[i]), #cast numpy.int64 to python int\n 'coordinates': {\n 'left': left,\n 'right': right,\n 'bottom': bottom,\n 'top': top},\n 'score': scores[i]\n }\n )\n return detections\n","sub_path":"detections_grpc_video.py","file_name":"detections_grpc_video.py","file_ext":"py","file_size_in_byte":3647,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"247545050","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sun Feb 12 16:39:34 2017\n\n@author: Avokald\n\"\"\"\n\n\ndef dict_interdiff(d1, d2):\n if len(d1) == len(d2):\n res1 = {}\n for i in list(d1.keys()):\n res1[i] = (d1[i] >= d2[i])\n return (res1, {})\n else:\n resu1 = {}\n resu2 = {}\n err1 = list(d1.keys())\n err2 = list(d2.keys())\n err3 = err1 + err2\n print(err1, err2, err3)\n for i in err3:\n if i in d1 and i in d2:\n resu1[i] = d1[i] + d2[i]\n elif i in d1:\n resu2[i] = d1[i]\n elif i in d2:\n resu2[i] = d2[i]\n return (resu1, resu2)\n\n\ndef main():\n print(dict_interdiff({1: 1, 2: 2, 3: 3, 4: 4, 5: 4}, \\\n {1: 1, 2: 2, 3: 3, 4: 5}))\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"6.00.1x/midterm/problem6.py","file_name":"problem6.py","file_ext":"py","file_size_in_byte":890,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"140420844","text":"import json\n\n\ndef get_data(json_file):\n try:\n with open(json_file, 'r') as f:\n return json.load(f)\n except (FileNotFoundError, json.decoder.JSONDecodeError):\n return {}\n\n\ndef update_data(json_file, new_data):\n file_to_update = get_data(json_file)\n file_to_update.append(new_data)\n with open(json_file, 'w') as file:\n json.dump(file_to_update, file)\n return True\n","sub_path":"flask3/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":412,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"482301503","text":"import sqlite3\nimport urllib\nimport re\nfrom urllib.request import urlopen\nfrom bs4 import BeautifulSoup, NavigableString\nfrom phyllo.phyllo_logger import logger\nimport nltk\nfrom itertools import cycle\n\nnltk.download('punkt')\n\nfrom nltk import sent_tokenize\n\ndef parseRes2(soup, title, url, cur, author, date, collectiontitle):\n chapter = '-'\n sen = \"\"\n s=''\n h=''\n s1=[]\n s2=[]\n i=1\n [e.extract() for e in soup.find_all('font')]\n [e.extract() for e in soup.find_all('sup')]\n [e.extract() for e in soup.find_all('a')]\n [e.extract() for e in soup.find_all('br')]\n [e.extract() for e in soup.find_all('table')]\n getp=soup.find_all('p')\n for p in getp:\n # make sure it's not a paragraph without the main text\n try:\n if p['class'][0].lower() in ['border', 'pagehead', 'shortborder', 'smallboarder', 'margin',\n 'internal_navigation']: # these are not part of the main t\n continue\n except:\n pass\n\n if p.b:\n chapter=p.b.text\n else:\n i=i+1\n sen=str(p.text).strip()\n j=1\n if i>190:\n rsen = ''\n s=sen.split('\\n')\n for b in s:\n rsen=rsen+' '+b.strip()\n sen=rsen\n for s in sent_tokenize(sen):\n if s.isspace():\n continue\n sentn = str(s).strip()\n num = j\n cur.execute(\"INSERT INTO texts VALUES (?,?,?,?,?,?,?, ?, ?, ?, ?)\",\n (None, collectiontitle, title, 'Latin', author, date, chapter,\n num, sentn, url, 'prose'))\n j += 1\n\n\ndef main():\n # get proper URLs\n siteURL = 'http://www.thelatinlibrary.com'\n biggsURL = 'http://www.thelatinlibrary.com/bebel.html'\n biggsOPEN = urllib.request.urlopen(biggsURL)\n biggsSOUP = BeautifulSoup(biggsOPEN, 'html5lib')\n textsURL = []\n\n # remove some unnecessary urls\n while (\"http://www.thelatinlibrary.com/index.html\" in textsURL):\n textsURL.remove(\"http://www.thelatinlibrary.com/index.html\")\n textsURL.remove(\"http://www.thelatinlibrary.com/classics.html\")\n textsURL.remove(\"http://www.thelatinlibrary.com/neo.html\")\n logger.info(\"\\n\".join(textsURL))\n\n title='FACETIARUM BEBELIANARUM'\n\n author = 'Heinrich Bebel'\n author = author.strip()\n collectiontitle = 'LIBER FACETIARUM BEBELIANARUM'\n collectiontitle=collectiontitle.strip()\n date = '1472-1518'\n\n with sqlite3.connect('texts.db') as db:\n c = db.cursor()\n c.execute(\n 'CREATE TABLE IF NOT EXISTS texts (id INTEGER PRIMARY KEY, title TEXT, book TEXT,'\n ' language TEXT, author TEXT, date TEXT, chapter TEXT, verse TEXT, passage TEXT,'\n ' link TEXT, documentType TEXT)')\n c.execute(\"DELETE FROM texts WHERE author = 'Heinrich Bebel'\")\n parseRes2(biggsSOUP, title, biggsURL, c, author, date, collectiontitle)\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"phyllo/extractors/bebelDB.py","file_name":"bebelDB.py","file_ext":"py","file_size_in_byte":3089,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"514064221","text":"import matplotlib.pyplot as plt\nimport pandas as pd\n\ndf=pd.read_csv('nflx.csv',parse_dates=True,index_col=0)\n\ndf['25ma']=df['Adj Close'].rolling(window=25,min_periods=0).mean()\nrm=df['25ma']\nrstd = df['Adj Close'].rolling(window=25,min_periods=0).std()\n\nax1=plt.subplot2grid((6,1),(0,0),rowspan=5,colspan=1)\nax2=plt.subplot2grid((6,1),(5,0),rowspan=1,colspan=1)\nax1.plot(df.index,df['25ma'])\nax1.plot(df.index,df['Adj Close'])\nax2.bar(df.index,df['Volume'])\nplt.show()\n","sub_path":"股票成交量与均线绘制.py","file_name":"股票成交量与均线绘制.py","file_ext":"py","file_size_in_byte":469,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"525163549","text":"from django.urls import path, include\nfrom rest_framework.routers import DefaultRouter\nfrom orders import views\nfrom orders.views import OrdersViewSet\nfrom django.conf.urls import include\nfrom rest_framework.documentation import include_docs_urls\n\n# 路径 功能\n# /orders 列出所有 order\n# /orders/{id} 列出具体的一个订单\n# /orders/create 只接受 Post 请求创建一个订单\n# /orders/{id}/cancel 接受 Get 请求,取消一个订单。\n\norders_list = OrdersViewSet.as_view({\n 'get': 'list'\n})\n\norders_detail = OrdersViewSet.as_view({\n 'get': 'retrieve',\n})\n\n\norders_cancel = OrdersViewSet.as_view({\n 'get': 'cancel'\n})\n\nrouter = DefaultRouter()\nrouter.register(r'', views.OrdersViewSet, 'order_list')\n\nurlpatterns = [\n path('', orders_list, name='orders-list'),\n path('/', orders_detail, name='orders-detail'),\n path('/create', include(router.urls)),\n path('//cancel', orders_cancel, name='orders-cancel'),\n # path('api-auth/', include('rest_framework.urls',\n # namespace='rest_framework')),\n]\n","sub_path":"week09/homework/order/orders/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1139,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"79972194","text":"from django import forms\nfrom django.utils.translation import gettext_lazy as _\nfrom allauth.account import app_settings\nfrom allauth.account.forms import SignupForm\n\n\nclass CustomSignupForm(SignupForm):\n\n first_name = forms.CharField(\n max_length=30,\n label=_(\"first name\").capitalize(),\n widget=forms.TextInput(attrs={\"placeholder\": _(\"first name\").capitalize()}),\n )\n\n last_name = forms.CharField(\n max_length=150,\n label=_(\"last name\").capitalize(),\n widget=forms.TextInput(attrs={\"placeholder\": _(\"last name\").capitalize()}),\n )\n\n username = forms.CharField(\n label=_(\"Username\"),\n min_length=app_settings.USERNAME_MIN_LENGTH,\n widget=forms.TextInput(\n attrs={\"placeholder\": _(\"Username\"), \"autocomplete\": \"username\"}\n ),\n help_text=_(\"Required. 20 characters or fewer. Letters, digits and _ only.\"),\n )\n\n email = forms.EmailField(\n widget=forms.TextInput(\n attrs={\n \"type\": \"email\",\n \"placeholder\": _(\"E-mail address\"),\n \"autocomplete\": \"email\",\n }\n )\n )\n\n field_order = [\n \"first_name\",\n \"last_name\",\n \"username\",\n \"email\",\n \"email2\", # ignored when not present\n \"password1\",\n \"password2\", # ignored when not present\n ]\n\n def save(self, request):\n user = super(CustomSignupForm, self).save(request)\n user.first_name = self.cleaned_data[\"first_name\"]\n user.last_name = self.cleaned_data[\"last_name\"]\n user.save()\n return user\n","sub_path":"services/core/apps/accounts/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":1620,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"570301276","text":"\"\"\"\nData format for POST requests. Used solely for testing purposes.\nTo add a message we simply supply a string for the appropriate field:\n\n data['entry'][0]['messaging'][0]['message']['text'] = \"some text to send\"\n\nRest can be left as is.\n\"\"\"\ndata = { \"object\": \"page\",\n \"entry\": [{\n \"time\": 1460245674269,\n \"id\": \"PAGE_ID\",\n \"messaging\": [{\n \"sender\": {\n \"id\": \"USER_ID\"\n },\n \"timestamp\": 1460245672080,\n \"recipient\": {\n \"id\": \"PAGE_ID\"\n },\n \"message\" : {\n \"text\": \"\",\n \"seq\": 216,\n \"mid\": \"mid.1460245671959:dad2ec9421b03d6f78\" # message id\n }\n }]\n}]}\n\n__all__ = ['data']\n","sub_path":"tests/_post_data.py","file_name":"_post_data.py","file_ext":"py","file_size_in_byte":780,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"361068298","text":"# coding=utf-8\nfrom gensim.models import word2vec\nimport jieba\nimport logging\nfrom argparse import ArgumentParser\nfrom sklearn.decomposition import PCA\nfrom matplotlib import pyplot\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom wordcloud import WordCloud\nfrom phase_segment import phase_seg\nimport csv\nfrom matplotlib.font_manager import FontProperties\ndef cha_names(dict):\n f = open(dict,'r')\n lines = f.readlines()\n names = []\n for line in lines:\n names.append(line.split(' ')[0])\n return names\n\ndef plotData(plt, data, label):\n x = [p[0] for p in data]\n y = [p[1] for p in data]\n return(plt.plot(x, y, label = label))\n\ndef wordcloud(text):\n wc = WordCloud(font_path=\"NotoSerifCJKtc-Black.otf\", #設置字體\n background_color=\"white\", #背景顏色\n max_words = 2000 ,) #停用字詞\n wc.generate_from_frequencies(text)\n # 視覺化呈現\n plt.imshow(wc)\n plt.axis(\"off\")\n # plt.figure(figsize=(10,6), dpi = 100)\n plt.show()\n\ndef main():\n logging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s', level=logging.INFO)\n names = cha_names(\"user_dict_{0}\".format(args.dict))\n datas = []\n target = [1,2,9,10,18,19]\n\n for i in range(1,20):\n lines = phase_seg(args.novel_root.format(i), \"user_dict_{0}\".format(args.dict))\n if i in target:\n freq = {}\n for name in names:\n freq[name]=0\n for words in lines:\n for word in words:\n if word in names:\n freq[word] +=1\n # wordcloud(freq)\n datas.append([((names.index(name)+1)*5,freq[name]) for name in names])\n if i ==19:\n f = open('mycsvfile.csv','w',encoding = 'utf-8-sig')\n w = csv.DictWriter(f,fieldnames = names)\n w.writeheader()\n w.writerow(freq)\n f.close()\n print(datas)\n p=[]\n fig , ax = plt.subplots()\n labels=['one','two','eight','nine','eighteen','nineteen']\n for data in datas:\n p.append(plotData(plt,data,labels[datas.index(data)]))\n plt.legend()\n print(p)\n myfont = FontProperties(fname=r'/Users/rex/Downloads/NotoSerifCJKtc-hinted/NotoSerifCJKtc-Black.otf')\n # plt.legend(handles=p,labels=['one','two','eight','nine','eighteen','nineteen'], loc='upper left')\n plt.title('各角色在特定卷數所出現的頻率', fontproperties=myfont)\n plt.xticks(range(5,125,5),names,fontproperties=myfont)\n ax.legend()\n plt.show()\n # # model = word2vec.Word2Vec(words, size=400, alpha=0.001,min_alpha=0.00001, min_count=5, iter=50)\n # # model.save(\"/Users/rex/data_science/models/word2vec_{0}.model\".format(i))\n\n\n\nif __name__ == '__main__':\n parser = ArgumentParser()\n parser.add_argument(\"--dict\", default=5, type=int)\n parser.add_argument(\"-n\", \"--novel_root\", default='suck',type=str)\n parser.add_argument(\"-m\", \"--mode\", default='train', type=str)\n parser.add_argument(\"-t\", \"--topn\", default=10, type=int)\n args = parser.parse_args()\n if args.mode=='train':\n main()","sub_path":"final_project/analyzing.py","file_name":"analyzing.py","file_ext":"py","file_size_in_byte":3122,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"144339971","text":"import pandas as pd\nimport math\nimport matplotlib.pyplot as plt\nfrom gurobipy import *\nimport numpy as np\nfrom mpl_toolkits.basemap import Basemap\nimport networkx as nx\n\n\n\ndata = pd.read_csv(r'C:\\Users\\USER\\Documents\\Imperial College London\\Core Module\\Network Analytics\\Assignment\\A2\\tempData.txt', delimiter=r\"\\s+\")\n\ndef distance_on_unit_sphere(lat1, long1, lat2, long2):\n \n# Convert latitude and longitude to\n# spherical coordinates in radians.\n degrees_to_radians = math.pi / 180.0\n \n # phi = 90 - latitude\n phi1 = (90.0 - lat1) * degrees_to_radians\n phi2 = (90.0 - lat2) * degrees_to_radians\n \n # theta = longitude\n theta1 = long1 * degrees_to_radians\n theta2 = long2 * degrees_to_radians\n \n # Compute spherical distance from spherical coordinates.\n \n # For two locations in spherical coordinates\n # (1, theta, phi) and (1, theta', phi')\n # cosine( arc length ) =\n # sin phi sin phi' cos(theta-theta') + cos phi cos phi'\n # distance = rho * arc length\n \n cos = (math.sin(phi1) * math.sin(phi2) * math.cos(theta1 - theta2) + math.cos(phi1) * math.cos(phi2))\n arc = math.acos(cos)\n \n # Remember to multiply arc by the radius of the earth\n # in your favorite set of units to get length.\n return arc\n\ndef distance_table_transform(data):\n transData = {'From': '', \n 'To': '',\n 'Distance': ''\n }\n fList = []\n toList = []\n dis = []\n for i in arange(len(data)):\n for n in arange(len(data)):\n if (i != n):\n fList.append(i + 1)\n toList.append(n + 1)\n dis.append((6373 * distance_on_unit_sphere((data['latitude'][i] / 1000), (data['longitude'][i] / 1000), (data['latitude'][n] / 1000), (data['longitude'][n] / 1000))))\n else:\n fList.append(i + 1)\n toList.append(n + 1)\n dis.append(0)\n transData['From'] = fList\n transData['To'] = toList\n transData['Distance'] = dis\n return transData\n\ndef distance_matrix_transform(transData):\n ### Distance Matrix\n Pdtrans = pd.DataFrame.from_dict(transData)\n DistanceMatrix = pd.pivot_table(Pdtrans, index = ['From'], columns = ['To'])\n pd.DataFrame.to_csv(DistanceMatrix, r'C:\\Users\\USER\\Documents\\Imperial College London\\Core Module\\Network Analytics\\Assignment\\A2\\DistanceMatrix.csv')\n return DistanceMatrix\n\ndef draw_map_scatter(data , mapscale=0.5, markersize=5):\n lat = []\n long = []\n for x in data['latitude']: lat.append(x / 1000)\n for x in data['longitude']: long.append(x / 1000)\n \n minlat = min(lat) - mapscale\n maxlat = max(lat) + mapscale\n minlong = min(long) - mapscale\n maxlong = max(long) + mapscale\n meanlat = mean(lat)\n meanlong = mean(long)\n \n plt.figure(figsize= (20,20))\n map = Basemap(projection='lcc', \n llcrnrlon = minlong ,\n llcrnrlat = minlat,\n urcrnrlon = maxlong,\n urcrnrlat = maxlat ,\n lat_0 = meanlat,\n lon_0 = meanlong,\n resolution = 'h')\n x,y = map(long,lat)\n map.fillcontinents(color = 'coral', lake_color ='blue')\n map.drawcoastlines(linewidth = 4)\n map.drawcountries(linewidth = 2)\n map.plot(x,y, 'ro', markersize= markersize)\n map.drawlsmask(ocean_color= 'blue',land_color='blue')\n plt.show()\n\n\ntransData = distance_table_transform(data)\nDistanceMatrix = distance_matrix_transform(transData)\ndraw_map_scatter(data,0.2, 7)\n\n\n### 2c\n\n\n# Optimize model\n\ndef model_setup ():\n m = Model()\n #m.params.LazyConstraints = 1\n m.setObjective(GRB.MINIMIZE)\n dmc = {}\n for i in arange(38):\n for j in arange(38):\n dmc[i,j] = float(DistanceMatrix.loc[i + 1][j])\n vars = {}\n for i in range(38):\n for j in range(i + 1):\n vars[i,j] = m.addVar(obj=dmc[i,j], vtype=GRB.BINARY,\n name='e' + str(i) + '_' + str(j))\n vars[j,i] = vars[i,j]\n m.update()\n for i in range(38):\n m.addConstr(quicksum(vars[i,j] for j in range(38)) == 2)\n vars[i,i].ub = 0\n m.update()\n m._vars = vars\n res = {}\n return m\n\ndef optimize_tour (n = 38):\n m = model_setup()\n while True:\n m.reset()\n m.optimize()\n selected = {}\n for i in arange(38):\n for j in arange(i+1):\n if m._vars[i,j].X >=0.5:\n selected[i,j] = (i,j)\n G = nx.Graph()\n G.add_edges_from(selected)\n cycle = nx.cycle_basis(G)\n if len(cycle) == 1 and len(cycle[0])==n:\n m.optimize()\n res = selected\n break\n else:\n print(sort(cycle[-1]))\n if len(cycle) >2:\n print(cycle[1]+cycle[-1])\n m.addConstr(quicksum(m._vars[k,l] for k in sort(cycle[0]) for l in sort(cycle[1] + cycle[-1])) >= 2)\n else:\n m.addConstr(quicksum(m._vars[k,l] for k in sort(cycle[0]) for l in sort(cycle[-1])) >= 2)\n resnorm =[]\n for i in res.keys():\n resnorm.append(i)\n return resnorm\n\ndef optimize_tour_map(selected, DistanceMatrix, data,mapscale = 0.5, res = 'f'):\n udv = []\n for i in range(0,len(selected)):\n udv.append((selected[i][0] +1,selected[i][1] +1,DistanceMatrix.loc[selected[i][0] + 1][selected[i][1]] ))\n \n g =nx.Graph()\n node = arange(1,39)\n g.add_nodes_from(node)\n g.add_weighted_edges_from(udv)\n lat = []\n long = []\n for x in data['latitude']: lat.append(x / 1000)\n for x in data['longitude']: long.append(x / 1000)\n minlat = min(lat) - mapscale\n maxlat = max(lat) + mapscale\n minlong = min(long) - mapscale\n maxlong = max(long) + mapscale\n meanlat = mean(lat)\n meanlong = mean(long)\n position = {}\n\n \n plt.figure(figsize= (20,20))\n map = Basemap(projection='lcc', \n llcrnrlon = minlong ,\n llcrnrlat = minlat,\n urcrnrlon = maxlong,\n urcrnrlat = maxlat ,\n lat_0 = meanlat,\n lon_0 = meanlong,\n resolution = res\n )\n for i in range(0,len(data)):\n position[i] = map(data['longitude'][i]/1000,data['latitude'][i]/1000)\n pos = {}\n for i in range(1,len(data)+1):\n pos[i] = array([position[i-1][0],position[i-1][1]])\n\n #map.fillcontinents(color = 'coral', alpha = .0)\n map.drawcoastlines(linewidth = 4)\n map.drawcountries(linewidth = 2)\n map.bluemarble()\n #map.drawlsmask(ocean_color= 'blue',land_color='blue')\n nx.draw_networkx_nodes(g, pos = pos, node_size = 50)\n nx.draw_networkx_edges(g, pos= pos, edge_color='lightblue', width = 5, alpha = 1)\n plt.show()\n\n\n\n\nres = optimize_tour()\noptimize_tour_map(res, DistanceMatrix, data,mapscale = 0.5, res = 'f')\n\n\n\n\n","sub_path":"Network_Group_1/Network_Group_1/Network_Group_1_2.py","file_name":"Network_Group_1_2.py","file_ext":"py","file_size_in_byte":6892,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"304079766","text":"import collections\n\n\n# Definition for a binary tree node.\nclass TreeNode:\n def __init__(self, val=0, left=None, right=None):\n self.val = val\n self.left = left\n self.right = right\n\n\n# Iterative - BFS\nclass Solution:\n def maxLevelSum(self, root: TreeNode) -> int:\n max_level_sum, max_level, level = float('-inf'), 0, 0\n q = collections.deque()\n q.append(root) # (node, level)\n\n while q:\n level_sum = 0\n level += 1\n for _ in range(len(q)):\n node = q.popleft()\n level_sum += node.val\n if node.left:\n q.append(node.left)\n if node.right:\n q.append(node.right)\n if max_level_sum < level_sum:\n max_level_sum, max_level = level_sum, level\n\n return max_level\n\n\n# Recursive - BFS\nclass Solution:\n def __init__(self):\n self.levels = {}\n\n def maxLevelSum(self, root: TreeNode) -> int:\n self.sum_vals(root, 1, self.levels)\n return max(self.levels, key=self.levels.get)\n\n def sum_vals(self, node, depth, levels):\n if not node:\n return None\n levels[depth] = levels.get(depth, 0) + node.val\n self.sum_vals(node.left, depth+1, levels)\n self.sum_vals(node.right, depth+1, levels)\n return levels\n","sub_path":"Problems/Leetcode/1161_MaximumLevelSumOfABinaryTree.py","file_name":"1161_MaximumLevelSumOfABinaryTree.py","file_ext":"py","file_size_in_byte":1372,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"185322013","text":"import block\nimport transaction\nimport base64\nimport signEncrypt as se\n\nclass BlockChain:\n\n difficulty = 2\n\n class Node:\n\n def __init__(self):\n self.next = None\n self.prev = None\n self.data = None\n\n def __init__(self):\n self.getKeys()\n print(\"Creating BlockChain...\")\n self.createGenesis()\n self.tail = self.genesis\n self.genTransactions()\n self.length = 1\n self.mineBlock()\n self.verifyIntegrity()\n\n def getKeys(self):\n print(\"Reading Pair A...\")\n keysA = []\n with open('publicA.key', 'r') as puKeyA:\n keysA.append(puKeyA.read())\n with open('privateA.key', 'r') as prKeyA:\n keysA.append(prKeyA.read())\n\n public = keysA[0]\n private = keysA[1]\n self.puKeyA = public\n self.prKeyA = private\n print(\"Reading Pair B...\")\n keysB = []\n with open('publicB.key', 'r') as puKeyB:\n keysB.append(puKeyB.read())\n with open('privateB.key', 'r') as prKeyB:\n keysB.append(prKeyB.read())\n public = keysB[0]\n private = keysB[1]\n self.puKeyB = public\n self.prKeyB = private\n self.users = {}\n self.users[self.puKeyA] = \"A\"\n self.users[self.puKeyB] = \"B\"\n self.users[\"A\"] = self.puKeyA\n self.users[\"B\"] = self.puKeyB\n '''\n print(\"a\", self.puKeyA)\n print(\"b\", self.puKeyB)\n print(\"ap\", self.prKeyA)\n print(\"bp\", self.prKeyB)\n '''\n\n def genTransactions(self):\n self.transactions = []\n self.transactions.append(transaction.Transaction(40, self.puKeyA, self.puKeyB, self.prKeyA, time.time()))\n '''\n self.transactions[0].unsign(self.transactions[0].origID)\n print(self.puKeyA == self.transactions[0].origID)\n print(self.puKeyB == self.transactions[0].destID)\n print(\"Original = 40\", \"New =\", self.transactions[0].amtToAdd)\n print(self.puKeyB)\n print(\"***\")\n print(self.transactions[0].destID)\n print(\"***\")\n '''\n self.transactions.append(transaction.Transaction(15, self.puKeyB, self.puKeyA, self.prKeyB, time.time()))\n self.transactions.append(transaction.Transaction(60, self.puKeyA, self.puKeyB, self.prKeyA, time.time()))\n self.transactions.append(transaction.Transaction(20, self.puKeyA, self.puKeyB, self.prKeyA, time.time()))\n self.transactions.append(transaction.Transaction(50, self.puKeyB, self.puKeyA, self.prKeyB, time.time()))\n\n def createGenesis(self):\n data = transaction.Transaction(100, None, self.puKeyA, None, time.time())\n gen = block.Block(0, [data])\n gen.idx = 0\n self.genNonce(gen)\n self.genesis = self.Node()\n self.genesis.data = gen\n\n def addBlock(self, data):\n node = self.Node()\n node.data = data\n node.prev = self.tail\n self.tail.next = node\n self.tail = node\n self.length += 1\n\n def getLatest(self):\n return self.tail\n\n #Remove transaction if it fails, could take an array instead of one\n #Must check if transaction is valid, ie they have enough money\n #Do I decrypt the ID here?\n #Does my encryption actually handle lists at all?\n def mineBlock(self):\n while len(self.transactions) > 0:\n t = self.transactions[0]\n t.unsign(t.origID)\n accepted = False\n if int(t.amtToAdd) >= 0:\n bal = self.getBalance(t.origID)\n print\n #print(\"Sender:\", self.users[t.origID])\n #print(bal)\n #print(t.amtToAdd)\n if bal >= int(t.amtToAdd):\n accepted = True\n print(\"Transaction \" + str(self.length) + \" (Amount: \" + str(t.amtToAdd) + \" ): \" + self.users[t.origID] + \"->\" + self.users[t.destID] + \" Accepted\")\n b = block.Block(self.length, [t])\n b.prevHash = self.tail.data.hash\n print(\"Mining Block \" + str(self.length) + \"... \", end = \"\")\n self.genNonce(b)\n print(\"(\" + str(b.nonce) + \", \" + str(b.hash) + \")\")\n self.addBlock(b)\n accepted = True\n if not accepted:\n print(\"Transaction \" + str(b.idx) + \" (Amount: \" + str(t.amtToAdd) + \"): \" + self.users[t.origID] + \"->\" + self.users[t.destID] + \" Declined\")\n del self.transactions[0]\n\n #Use private key to authenticate\n def getBalance(self, puKey):\n cur = self.genesis\n bal = 0\n #print(\"BL len:\", self.length)\n while True:\n #print(\"Looped\")\n ts = cur.data.data\n for t in ts:\n if (not t.origID is None) and t.origID == puKey:\n #print(\"Is orig\")\n bal -= int(t.amtToAdd)\n elif t.destID == puKey:\n #print(\"Is dest\")\n bal += int(t.amtToAdd)\n if cur.next is None:\n break\n cur = cur.next\n return bal\n\n def verifyIntegrity(self):\n cur = self.genesis\n valid = True\n print(\"Chain Verification...\", end=\"\")\n while True:\n '''\n if cur.data.idx == 0:\n if not cur.data.verifyPrint():\n print(\"Hashwrong\", cur.data.idx)\n valid = False\n break\n '''\n #elif not cur.data.verify():\n if not cur.data.verify():\n print(\"Hashwrong\", cur.data.idx)\n valid = False\n break\n if not cur.next is None:\n if not cur.data.hash == cur.next.data.prevHash:\n print(\"prevhashwrong\", cur.data.idx)\n valid = False\n break\n\n if cur.next is None:\n break\n cur = cur.next\n if valid:\n print(\"Verified\")\n print(\"Amount in A's Wallet: \" + str(self.getBalance(self.users[\"A\"])))\n print(\"Amount in B's Wallet: \" + str(self.getBalance(self.users[\"B\"])))\n else:\n print(\"Unverified\")\n\n\n #SWITCH TO BASE 16\n def genNonce(self, b):\n hash = base64.b16encode(b.genHash()).decode()\n while not hash[0:BlockChain.difficulty] == \"00\":\n b.nonce += 1\n hash = base64.b16encode(b.genHash()).decode()\n b.hash = hash\n #if b.idx == 0:\n #base64.b16encode(b.genHashPrint()).decode()\n\n#When your program runs, it should take in two private and public key pairs (as if you ahve two people)\n#Output creating \"block chain...\", \"Reading pair A...\", \"Reading pair B...\", \"Mining Block 1...\", \"Done Mining Block 1...\"\n\nif __name__ == \"__main__\":\n chain = BlockChain()\n","sub_path":"blockChain.py","file_name":"blockChain.py","file_ext":"py","file_size_in_byte":6905,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"113706146","text":"# https://www.epfl.ch/labs/mmspg/research/page-58317-en-html/page-58332-en-html/page-58333-en-html/iqa/\r\n\r\nimport numpy as np\r\nimport pandas as pd\r\nimport matplotlib.pyplot as plt\r\nfrom sklearn.preprocessing import minmax_scale\r\n\r\npd.set_option('display.max_rows', 1000)\r\npd.set_option('display.max_columns', 1000)\r\npd.set_option('display.width', 1000)\r\n\r\ndf = pd.read_csv('JPEGXR.VQMT.csv')\r\ndf['mos'] = minmax_scale(df['mos'], feature_range=(1,5), axis=0)\r\n\r\nminMOS = df['mos'].min()\r\nmaxMOS = df['mos'].max()\r\nseries = np.linspace(maxMOS, minMOS, 20)\r\n\r\nresults = pd.DataFrame()\r\nfor i in series:\r\n\tfiltered = df[['mos', 'DSSIM', 'SSIMULACRA', 'Butteraugli', 'Butteraugli_XL', 'Butteraugli_XL_3m', 'Butteraugli_XL_2s', 'Butteraugli_XL_3s', 'Butteraugli_XL_6s', 'Butteraugli_XL_12s']][df['mos'] >= i]\r\n\tresults = results.append(filtered.corr('spearman')[['mos']].T.reset_index(), ignore_index=True, sort=False)\r\n\r\nresults.index = series\r\nresults = results.drop(['mos', 'index'], axis=1)\r\nresults = results.dropna(thresh=1)\r\n\r\nprint(results)\r\n\r\nplt.figure(figsize=(1920/96, 1080/96), dpi=96)\r\n#plt.plot(results.index, results['DSSIM'], label='DSSIM')\r\n#plt.plot(results.index, results['SSIMULACRA'], label='SSIMULACRA')\r\nplt.plot(results.index, results['Butteraugli_XL'], label='Butteraugli_XL')\r\n#plt.plot(results.index, results['Butteraugli_XL_3m'], label='Butteraugli_XL_3m')\r\nplt.plot(results.index, results['Butteraugli_XL_2s'], label='Butteraugli_XL_2s')\r\nplt.plot(results.index, results['Butteraugli_XL_3s'], label='Butteraugli_XL_3s')\r\nplt.plot(results.index, results['Butteraugli_XL_6s'], label='Butteraugli_XL_6s')\r\nplt.plot(results.index, results['Butteraugli_XL_12s'], label='Butteraugli_XL_12s')\r\nplt.legend()\r\nplt.ylabel('SROCC')\r\nplt.xlabel('JPEG XR MOS')\r\nplt.savefig('plot.png', bbox_inches='tight')\r\n","sub_path":"NormAnalysis/JPEGXR/JPEGXR.graphs.py","file_name":"JPEGXR.graphs.py","file_ext":"py","file_size_in_byte":1819,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"596905812","text":"import os\nimport struct\nimport numpy as np\nimport sys\nimport gzip\nimport matplotlib.pyplot as plt\nfrom neuralnet import NeuralNetMLP\n\n\n# http://yann.lecun.com/exdb/mnist/\n\n\ndef func001():\n print('hello functin' % '111' );\n\n\ndef load_mnist(path, kind='train'):\n \"\"\"Load MNIST data from path\"\"\"\n\n labels_path = os.path.join(path, '%s-labels-idx1-ubyte' % kind)\n\n images_path = os.path.join(path, '%s-images-idx3-ubyte' % kind)\n\n with open(labels_path, 'rb') as lbpath:\n magic, n = struct.unpack('>II', lbpath.read(8))\n\n labels = np.fromfile(lbpath, dtype=np.uint8)\n\n with open(images_path, 'rb') as imgpath:\n magic, num, rows, cols = struct.unpack(\">IIII\", imgpath.read(16))\n images = np.fromfile(imgpath, dtype=np.uint8).reshape(len(labels), 784)\n\n images = ((images / 255.) - 0.5) * 2\n\n return images, labels\n\n\nif (sys.version_info > (3, 0)):\n writemode = 'wb'\nelse:\n writemode = 'w'\n\nzipped_mnist = [f for f in os.listdir('./') if f.endswith('ubyte.gz')]\nfor z in zipped_mnist:\n with gzip.GzipFile(z, mode='rb') as decompressed, open(z[:-3], writemode) as outfile:\n outfile.write(decompressed.read())\n\nX_train, y_train = load_mnist('', kind='train')\nprint('Rows: %d, columns: %d' % (X_train.shape[0], X_train.shape[1]))\n\nX_test, y_test = load_mnist('', kind='t10k')\nprint('Rows: %d, columns: %d' % (X_test.shape[0], X_test.shape[1]))\n\n\nfig, ax = plt.subplots(nrows=2, ncols=5, sharex=True, sharey=True,)\nax = ax.flatten()\nfor i in range(10):\n img = X_train[y_train == i][0].reshape(28, 28)\n ax[i].imshow(img, cmap='Greys')\n\nax[0].set_xticks([])\nax[0].set_yticks([])\nplt.tight_layout()\n# plt.savefig('images/12_5.png', dpi=300)\nplt.show()\n\n\nfig, ax = plt.subplots(nrows=5, ncols=5, sharex=True, sharey=True,)\nax = ax.flatten()\nfor i in range(25):\n img = X_train[y_train == 7][i].reshape(28, 28)\n ax[i].imshow(img, cmap='Greys')\n\nax[0].set_xticks([])\nax[0].set_yticks([])\nplt.tight_layout()\n# plt.savefig('images/12_6.png', dpi=300)\nplt.show()\n\n\nn_epochs = 20\n\nnn = NeuralNetMLP(n_hidden=100,\n l2=0.01,\n epochs=n_epochs,\n eta=0.0005,\n minibatch_size=100,\n shuffle=True,\n seed=1)\n\nnn.fit(X_train=X_train[:55000],\n y_train=y_train[:55000],\n X_valid=X_train[55000:],\n y_valid=y_train[55000:])\n\nplt.plot(range(nn.epochs), nn.eval_['cost'])\nplt.ylabel('Cost')\nplt.xlabel('Epochs')\n#plt.savefig('images/12_07.png', dpi=300)\nplt.show()\n\n\nplt.plot(range(nn.epochs), nn.eval_['train_acc'],\n label='training')\nplt.plot(range(nn.epochs), nn.eval_['valid_acc'],\n label='validation', linestyle='--')\nplt.ylabel('Accuracy')\nplt.xlabel('Epochs')\nplt.legend()\n#plt.savefig('images/12_08.png', dpi=300)\nplt.show()\n\ny_test_pred = nn.predict(X_test)\nacc = (np.sum(y_test == y_test_pred)\n .astype(np.float) / X_test.shape[0])\n\nprint('Test accuracy: %.2f%%' % (acc * 100))\n\n\nmiscl_img = X_test[y_test != y_test_pred][:25]\ncorrect_lab = y_test[y_test != y_test_pred][:25]\nmiscl_lab = y_test_pred[y_test != y_test_pred][:25]\n\nfig, ax = plt.subplots(nrows=5, ncols=5, sharex=True, sharey=True,)\nax = ax.flatten()\nfor i in range(25):\n img = miscl_img[i].reshape(28, 28)\n ax[i].imshow(img, cmap='Greys', interpolation='nearest')\n ax[i].set_title('%d) t: %d p: %d' % (i+1, correct_lab[i], miscl_lab[i]))\n\nax[0].set_xticks([])\nax[0].set_yticks([])\nplt.tight_layout()\n#plt.savefig('images/12_09.png', dpi=300)\nplt.show()\n","sub_path":"labs/hello_fs.py","file_name":"hello_fs.py","file_ext":"py","file_size_in_byte":3526,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"389732828","text":"from __future__ import division\nfrom __future__ import print_function\n\nimport time\nimport argparse\nimport numpy as np\nimport os\n\nimport torch\nimport torch.nn.functional as F\nimport torch.optim as optim\n\nfrom earlystopping import EarlyStopping\nfrom models import GCN\nfrom sample import Sampler\nfrom metric import accuracy\n\n#from pygcn.utils import load_data, accuracy\n#from pygcn.models import GCN\n\nfrom metric import accuracy\nfrom utils import load_citation, load_reddit_data\nfrom models import *\nfrom earlystopping import EarlyStopping\n\n# Training settings\nparser = argparse.ArgumentParser()\nparser.add_argument('--no-cuda', action='store_true', default=False,\n help='Disables CUDA training.')\nparser.add_argument('--fastmode', action='store_true', default=False,\n help='Validate during training pass.')\nparser.add_argument('--seed', type=int, default=42, help='Random seed.')\nparser.add_argument('--epochs', type=int, default=400,\n help='Number of epochs to train.')\nparser.add_argument('--lr', type=float, default=0.01,\n help='Initial learning rate.')\nparser.add_argument('--lradjust',action='store_true', default=False, help = 'Enable leraning rate adjust.(ReduceLROnPlateau)')\nparser.add_argument('--weight_decay', type=float, default=5e-4,\n help='Weight decay (L2 loss on parameters).')\nparser.add_argument('--hidden', type=int, default=128,\n help='Number of hidden units.')\nparser.add_argument('--dropout', type=float, default=0.5,\n help='Dropout rate (1 - keep probability).')\nparser.add_argument('--withbn', action='store_true', default=False, help='Enable Bath Norm GCN')\nparser.add_argument('--nhiddenlayer', type=int, default=0, help='The number of hidden layers.(may outdated)')\nparser.add_argument('--debug', action='store_true', default=False, help=\"Enable the detialed training output.\")\nparser.add_argument('--dataset', default=\"cora\", help=\"The data set\")\nparser.add_argument(\"--earlystopping\", type=int, default=0, help=\"The patience of earlystopping. Do not adopt the earlystopping when it equals 0.\")\nparser.add_argument(\"--normalization\", default=\"AugNormAdj\", help=\"The normalization on the adj matrix.\")\nparser.add_argument(\"--debug_samplingpercent\", type=float, default=1.0,help=\"The percent of the preserve edges (debug only)\")\nparser.add_argument(\"--gpu\", type=int, default=0,help=\"The gpu to be applied\")\nparser.add_argument(\"--mixmode\", action=\"store_true\", default=False, help=\"Enable CPU GPU mixing mode.\")\n\nargs = parser.parse_args()\n\nif args.debug:\n print(args)\nargs.cuda = not args.no_cuda and torch.cuda.is_available()\n\n#no fix seed here\nnp.random.seed(args.seed)\ntorch.manual_seed(args.seed)\nif args.cuda:\n torch.cuda.manual_seed(args.seed)\n\n# Load data\n#if args.dataset=='cora':\n# adj, features, labels, idx_train, idx_val, idx_test = load_data()\n#else:\n\n# train_adj = torch.Tensor([1]) #only for reddit\n# train_features = torch.Tensor([1]) #only for reddit\n# if args.dataset == 'reddit':\n# adj, train_adj, features, train_features, labels, idx_train, idx_val, idx_test = load_reddit_data()\n# elif args.dataset == 'pubmed':\n# adj, features, labels, idx_train, idx_val, idx_test = load_citation(args.dataset, \"BingGeNormAdj\")\n# else:\n# adj, features, labels, idx_train, idx_val, idx_test = load_citation(args.dataset, \"BingGeNormAdj\")\n\nsampler = Sampler(args.dataset)\n\n# get labels and indexes\nlabels, idx_train, idx_val, idx_test = sampler.get_label_and_idxes()\nnfeat = sampler.nfeat\nnclass = sampler.nclass\nprint(\"nclass: %d\\tnfea:%d\" % (nclass, nfeat))\n\n# Model and optimizer\nmodel = GCNBS(nfeat=nfeat,\n nhid=args.hidden,\n nclass=nclass,\n withbn=args.withbn,\n nhiddenlayer=args.nhiddenlayer,\n dropout=args.dropout,\n mixmode=args.mixmode)\n\n# model = GCNFlatRes(nfeat=nfeat,\n# nhid=args.hidden,\n# nclass=nclass,\n# withbn=args.withbn,\n# nreslayer=args.nhiddenlayer,\n# dropout=args.dropout)\n\noptimizer = optim.Adam(model.parameters(),\n lr=args.lr, weight_decay=args.weight_decay)\n#scheduler = optim.lr_scheduler.ReduceLROnPlateau(optimizer, patience=1000, factor=0.5)\nscheduler = optim.lr_scheduler.MultiStepLR(optimizer, milestones=[100, 200, 300], gamma=0.5)\n\n# convert to cuda\nif args.cuda:\n model.cuda()\n # features = features.cuda()\n # adj = adj.cuda()\n # train_adj = train_adj.cuda() #only for reddit\n # train_features = train_features.cuda() #only for reddit\n #Maybe not the best practice here.\n labels = labels.cuda()\n idx_train = idx_train.cuda()\n idx_val = idx_val.cuda()\n idx_test = idx_test.cuda()\n\n\nif args.cuda or args.mixmode:\n labels = labels.cuda()\n idx_train = idx_train.cuda()\n idx_val = idx_val.cuda()\n idx_test = idx_test.cuda()\n\n\n# set early_stopping\nif args.earlystopping > 0:\n early_stopping = EarlyStopping(patience=args.earlystopping, verbose=False)\n\n\n# define the training function.\n\ndef train(epoch, train_adj, train_fea, val_adj=None, val_fea=None):\n if val_adj is None:\n val_adj = train_adj\n val_fea = train_fea\n t = time.time()\n\n #adjust lr\n if args.lradjust:\n #scheduler.step(loss_val)\n scheduler.step()\n\n model.train()\n optimizer.zero_grad()\n output = model(train_fea, train_adj)\n #special for reddit\n if sampler.learning_type == \"inductive\":\n loss_train = F.nll_loss(output, labels[idx_train])\n acc_train = accuracy(output, labels[idx_train])\n else:\n loss_train = F.nll_loss(output[idx_train], labels[idx_train])\n acc_train = accuracy(output[idx_train], labels[idx_train])\n \n loss_train.backward()\n optimizer.step()\n\n #We can not apply the fastmode for the reddit dataset.\n if sampler.learning_type == \"inductive\" or not args.fastmode:\n # Evaluate validation set performance separately,\n # deactivates dropout during validation run.\n model.eval()\n output = model(val_fea, val_adj)\n \n loss_val = F.nll_loss(output[idx_val], labels[idx_val])\n acc_val = accuracy(output[idx_val], labels[idx_val])\n\n if args.earlystopping > 0:\n early_stopping(loss_val, model)\n \n if args.debug and epoch % 1 == 0:\n print('Epoch: {:04d}'.format(epoch+1),\n 'loss_train: {:.4f}'.format(loss_train.item()),\n 'acc_train: {:.4f}'.format(acc_train.item()),\n 'loss_val: {:.4f}'.format(loss_val.item()),\n 'acc_val: {:.4f}'.format(acc_val.item()),\n 'time: {:.4f}s'.format(time.time() - t))\n return (loss_train.item(), acc_train.item(), loss_val.item(), acc_val.item())\n\n\ndef test(test_adj,test_fea):\n model.eval()\n output = model(test_fea, test_adj)\n loss_test = F.nll_loss(output[idx_test], labels[idx_test])\n acc_test = accuracy(output[idx_test], labels[idx_test])\n if args.debug:\n print(\"Test set results:\",\n \"loss= {:.4f}\".format(loss_test.item()),\n \"accuracy= {:.4f}\".format(acc_test.item()))\n return (loss_test.item(), acc_test.item())\n\n# Visualize \n#params = list(model.named_parameters())\n#print(params[0])\n#exit()\n\n# Train model\nt_total = time.time()\nloss_train = np.zeros((args.epochs, ))\nacc_train = np.zeros((args.epochs, ))\nloss_val = np.zeros((args.epochs, ))\nacc_val = np.zeros((args.epochs, ))\n\nfor epoch in range(args.epochs):\n #(train_adj, train_fea) = sampler.stub_sampler(normalization=args.normalization, cuda=args.cuda)\n (train_adj, train_fea) = sampler.randomedge_sampler(percent=args.debug_samplingpercent, normalization=args.normalization, cuda=args.cuda)\n if sampler.learning_type == \"transductive\":\n outputs = train(epoch, train_adj, train_fea)\n else:\n (val_adj, val_fea) = sampler.get_test_set(normalization=args.normalization, cuda=args.cuda)\n outputs = train(epoch, train_adj, train_fea, val_adj, val_fea)\n loss_train[epoch], acc_train[epoch], loss_val[epoch], acc_val[epoch] = outputs[0], outputs[1], outputs[2], outputs[3]\n\n if args.earlystopping > 0 and early_stopping.early_stope:\n print(\"Early stopping.\")\n model.load_state_dict(early_stopping.load_checkpoint())\n break\n\nnp.savetxt('./results_'+args.dataset+'_'+str(args.nhiddenlayer), np.vstack((loss_train, loss_val, acc_train, acc_val)), delimiter='\\t')\n\nif args.debug:\n print(\"Optimization Finished!\")\n print(\"Total time elapsed: {:.4f}s\".format(time.time() - t_total))\n\n# Testing\n(test_adj, test_fea) = sampler.get_test_set(normalization=args.normalization, cuda=args.cuda)\n(train_adj, train_fea) = sampler.stub_sampler(normalization=args.normalization, cuda=args.cuda)\n(loss_test, acc_test) = test(test_adj, test_fea)\nprint(\"%.6f\\t%.6f\\t%.6f\\t%.6f\\t%.6f\\t%.6f\"%(loss_train[-1], loss_val[-1], loss_test, acc_train[-1],acc_val[-1],acc_test))\n\n\n","sub_path":"src/train_hwb.py","file_name":"train_hwb.py","file_ext":"py","file_size_in_byte":8944,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"313495413","text":"#!/usr/bin/env python3\n\nimport sys\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nfrom sklearn.decomposition import PCA\n\n\n#df = pd.read_ctab(sys.argv[1], index_col=\"t_name\")\n\n# posit_list = []\n# negat_list = []\nfor i, line in enumerate(open(sys.argv[1])):\n if i == 0:\n continue\n fields = line.rstrip(\"\\n\").split(\"\\t\")\n # protein_region =\n \n if fields[2] == \"+\":\n promoter_left = int(fields[3])-500\n promoter_right = int(fields[3])+ 500 \n promoter_left = max(promoter_left, 1)\n # posit_list = posit_list.append(promoter_plus)\n else:\n fields[2] == \"-\"\n promoter_left = int(fields[4])+500\n promoter_right = int(fields[4])- 500 \n promoter_left = max(promoter_left, 1)\n \n \n \n # negat_list = negat_list.append(promoter_minus)\n print(fields[1],promoter_left,promoter_right,fields[5],sep='\\t')\n# print(posit_list, negat_list)\n \n\n\n\n\n ","sub_path":"day5-lunch/exercise-02.py","file_name":"exercise-02.py","file_ext":"py","file_size_in_byte":969,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"91154028","text":"import pygame\n\nclass Game():\n \"\"\"Is the main game object.\"\"\"\n def __init__(self):\n pygame.init()\n self.screen = pygame.display.set_mode((640, 240))\n\n def run(self):\n \"\"\"Runs the main event loop.\"\"\"\n running = True\n while running:\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n running = False\n\n pygame.display.flip()\n\nif __name__ == '__main__':\n Game().run()","sub_path":"docs/tutorial1/intro3.py","file_name":"intro3.py","file_ext":"py","file_size_in_byte":478,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"416356211","text":"# -*- coding: utf-8 -*-\n\n\"\"\"\n-------------------------------------------------\n File Name: __init__.py\n Description :\n Author : patrick\n date: 2019/11/10\n-------------------------------------------------\n Change Activity:\n 2019/11/10:\n-------------------------------------------------\n\"\"\"\n__author__ = 'patrick'\n\n\"\"\"\nFor Python Class and Instance\n1. Class Definition\n2. Instance\n3. Class initialization\n\nhttp://gohom.win/2015/10/20/pyObject/\n\n- [oop - What is a metaclass in Python? - Stack Overflow](http://stackoverflow.com/questions/100003/what-is-a-metaclass-in-python)\n- [深刻理解Python中的元类(metaclass) - 伯乐在线](http://blog.jobbole.com/21351/)\n- [使用元类 - 廖雪峰的官方网站](http://www.liaoxuefeng.com/wiki/0014316089557264a6b348958f449949df42a6d3a2e542c000/0014319106919344c4ef8b1e04c48778bb45796e0335839000)\n- [Python基础:元类](http://www.cnblogs.com/russellluo/p/3409602.html)\n- [在Python中使用class decorator和metaclass](http://blog.zhangyu.so/python/2016/02/19/class-decorator-and-metaclass-in-python/)\n\"\"\"\n","sub_path":"python-lessons/day_10_class/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":1111,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"408830065","text":"import os\nfrom typing import List\n\nimport numpy as np\n\n\ndef load_spectrum(path: str) -> np.array:\n \"\"\"Loads a spectrum from a file with x, y coordinates (two columns, nothing more).\n\n Args:\n path (str): Path to file\n\n Returns:\n np.array: The spectrum loaded in a numpy array.\n \"\"\"\n return np.loadtxt(path)\n\n\ndef normalize_spectrum(spectrum: np.array, lower: float = 1000, upper: float = 1500) -> np.array:\n \"\"\"Returns the normalized and filtered spectrum\n\n Args:\n spectrum (np.array): The spectrum to normalize.\n lower (float, optional): Defaults to 1000.\n upper (float, optional): Defaults to 1500.\n\n Returns:\n np.array: The normalized spectrum.\n \"\"\"\n idx = (spectrum[:, 0] > lower) & (spectrum[:, 0] < upper)\n spectrum[:, 1] = spectrum[:, 1] / np.max(np.abs(spectrum[idx, 1]))\n return spectrum[idx]\n\n\ndef load_peaks(paths: List[str], kind_of_spectra: List[int]) -> np.array:\n \"\"\"Loads the deconvoluted peaks and concatenates them\n\n Args:\n path (str): Path to peaks file.\n kind_of_spectrum List[int]: kind of spectrum (in the order of paths) to be loaded.\n\n Returns:\n np.array: The peaks of the spectrum\n \"\"\"\n concatenate = None\n if len(paths) != len(kind_of_spectra):\n raise ValueError(f\"{len(paths)} must be equal to {len(kind_of_spectra)}\")\n for idx, path in enumerate(paths):\n if os.path.exists(path):\n peaks = np.loadtxt(path)\n peaks[:, 0] /= np.max(np.abs(peaks[:, 0]))\n peaks[:, 3] = kind_of_spectra[idx]\n if idx == 0:\n concatenate = peaks\n else:\n concatenate = np.concatenate([concatenate, peaks], axis=0)\n else:\n raise FileNotFoundError(f\"The file {path} does not exist\")\n if concatenate is None:\n raise ValueError(\"No peaks loaded\")\n return concatenate\n","sub_path":"irsa/core/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":1914,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"548137119","text":"import numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\n\ndata = [\n [16, 48, 100],\n [14, 47, 92],\n [16, 45, 88],\n [12, 45, 95],\n [18, 46, 98],\n [18, 46, 101],\n [13, 47, 97],\n [16, 48, 98],\n [18, 49, 110],\n [22, 49, 124],\n [18, 50, 102],\n [19, 51, 115],\n [16, 52, 92],\n [16, 52, 102],\n [22, 50, 104],\n [12, 51, 85],\n [20, 54, 118],\n [14, 53, 105],\n [21, 52, 111],\n [17, 53, 122],\n]\n\ndf = pd.DataFrame(data, columns=['X', 'W', 'Y'])\n\n\nb = df.cov().loc['X', 'Y'] / df.cov().loc['X', 'X']\na = df['Y'].mean() - b * df['X'].mean()\nprint('1: a={0}, b={1}'.format(a, b))\n\n\nprint('\\n')\ndef c(df, cols):\n \"\"\"centering specificed columns\"\"\"\n for col in cols:\n df[col] = df[col] - df[col].mean()\n return df\n\ndf_c = df.copy().pipe(c, ['X'])\nb_c = df_c.cov().loc['X', 'Y'] / df_c.cov().loc['X', 'X']\na_c = df_c['Y'].mean() - b_c * df_c['X'].mean()\nprint('2: a_c={0}, b_c={1}'.format(a_c, b_c))\n\n\nprint('\\n')\ndf['Y_pred'] = df['X'] * b + a\ndf['Y_resid'] = df['Y'] - df['Y_pred']\nprint('3a: Is the variance of Y equal to the variance of Y hat plus the variance of the residuals? {}'.format(round(df.cov().loc['Y', 'Y'], 5) == round(df.cov().loc['Y_pred', 'Y_pred'] + df.cov().loc['Y_resid', 'Y_resid'], 5)))\nprint('3b: Is the Pearson correlation coefficient for X and Y equal to the ratio of variances of Y hat and Y? {}'.format(round(df.corr().loc['X', 'Y'] ** 2, 5) == round(df.cov().loc['Y_pred', 'Y_pred'] / df.cov().loc['Y', 'Y'], 5)))\n\n\n\nprint('\\n')\ndef beta_weight(df, on_col, off_col):\n return (df.corr().loc[on_col, 'Y'] - df.corr().loc[off_col, 'Y'] * df.corr().loc[on_col, off_col]) / (1 - df.corr().loc[on_col, off_col] ** 2)\n\nbeta_x = beta_weight(df, 'X', 'W')\nbeta_w = beta_weight(df, 'W', 'X')\nb_x = beta_x * (df['Y'].std() / df['X'].std())\nb_w = beta_w * (df['Y'].std() / df['W'].std())\na = df['Y'].mean() - b_x * df['X'].mean() - b_w * df['W'].mean()\nprint('4a: a={0}, b_x={1}, b_w={2}'.format(a, b_x, b_w))\n\ndef std(df):\n return (df - df.mean().values) / df.std().values\n # print(df.std().values)\n\ndf_std = df.pipe(std)\nbeta_std_x = beta_weight(df_std, 'X', 'W')\nbeta_std_w = beta_weight(df_std, 'W', 'X')\nb_std_x = beta_std_x * (df_std['Y'].std() / df_std['X'].std())\nb_std_w = beta_std_w * (df_std['Y'].std() / df_std['W'].std())\na_std = df_std['Y'].mean() - b_std_x * df_std['X'].mean() - b_std_w * df_std['W'].mean()\nprint('4b: a_std={0}, b_std_x={1}, b_std_w={2}'.format(a_std, b_std_x, b_std_w))\n\ndf['Y_pred'] = a + b_x * df['X'] + b_w * df['W']\nR2 = df.corr().loc['Y_pred', 'Y'] ** 2\nprint('4c: The overall multiple correlation: {}'.format(R2))\nprint('\\t...ratio of ESS and TSS gives the same answer: {}'.format(((df['Y_pred'] - df['Y'].mean())**2).sum() / ((df['Y'] - df['Y'].mean())**2).sum()))\n\n\nprint('\\n')\nprint('5: Adjusted R2: {}'.format(1 - (1 - R2) * ((19 / 17))))\n\n\nprint('\\n')\nprint('6: histogram of residuals...')\ndf['Y_resid'] = df['Y'] - df['Y_pred']\ndf['Y_resid'].hist(bins=10)\n# plt.show()\n\n\nprint('\\n')\ndef first_order_partial_corr(df, on_col, off_col):\n return (df.corr().loc[on_col, 'Y'] - df.corr().loc[on_col, off_col] * df.corr().loc[off_col, 'Y']) / np.sqrt((1 - df.corr().loc[on_col, off_col] ** 2) * (1 - df.corr().loc[off_col, 'Y'] ** 2))\nprint('7a: first-order partial correlation between W and Y, removing X: {}'.format(first_order_partial_corr(df, 'W', 'X') ** 2))\ndef first_order_part_corr(df, on_col, off_col):\n return (df.corr().loc[on_col, 'Y'] - df.corr().loc[on_col, off_col] * df.corr().loc[off_col, 'Y']) / np.sqrt((1 - df.corr().loc[on_col, off_col] ** 2))\nprint('7a: first-order part correlation between W and Y, removing X: {}'.format(first_order_part_corr(df, 'W', 'X') ** 2))\n# The former is correlation between W and Y after we take X's influence on both of these into account.\n# THe latter is correlation between W and Y after we take into account X's influence only on W.\n# From the book: \"Variable W uniquely explains about 10.5% of the total variance in Y, and of the variance in Y not already\n# explained by X, predictor W accounts for about 19.9% of the rest.\"\n# So the former looks at how much of the leftover variation in Y after accounting for X is uniquely explained by W, and the latter\n# looks at how much of the variation in Y is uniquely explained by W. So the latter should be smaller than the first since\n# it is relatively to all variation in Y whereas the former is relatively to that which is left over after accounting for X.\n","sub_path":"estimation/causal_inference/Kline_structural_equation_modeling/chapter_2_exercises.py","file_name":"chapter_2_exercises.py","file_ext":"py","file_size_in_byte":4509,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"23169728","text":"import urllib.request\n\nfrom bs4 import BeautifulSoup\nfrom urllib.request import urlopen\n\n\nclass MelonMusic:\n def __init__(self, url):\n self.url = url\n\n def scrap(self):\n soup = BeautifulSoup(urlopen(urllib.request.Request(self.url, headers={'User-Agent' : 'Mozilla/5.0'})).read(), 'lxml')\n titles = soup.find_all(name='div', attrs={'class': 'ellipsis rank01'})\n artists = soup.find_all(name='div', attrs={'class': 'ellipsis rank02'})\n _ = 0\n for title, artist in zip(titles, artists):\n _ += 1\n print(f'{\"*\" * 50}\\n{_} Rank\\nTitle: {title.find(\"a\").text}\\nArtist: {artist.find(\"a\").text}')\n\n # for i, title in enumerate(soup.find_all(name='div', attrs={'class': 'ellipsis rank01'})):\n # print(f'{\"*\" * 50}\\n{str(i+1)} Rank\\nTitle: {title.find(\"a\").text}\\nArtist: {artists[i].find(\"a\").text}')\n\n\n\n '''\n def scrap(self):\n header = {'User-Agent' : 'Mozilla/5.0'}\n modi = urllib.request.Request(self.url, headers=header)\n soup = BeautifulSoup(urlopen(modi).read(), 'html.parser')\n ls_artist = soup.find_all(name='div', attrs={'class': 'ellipsis rank02'})\n for i, val in enumerate(soup.find_all(name='div', attrs={'class': 'ellipsis rank01'})):\n print(f'{str(i+1)} Rank\\nTitle: {val.find(\"a\").text}\\nArtist: {ls_artist[i].find(\"a\").text}')\n\n # for i, val in enumerate(soup.find_all(name='div', attrs={'class': 'ellipsis rank02'})):\n # print(str(i+1) + ' Rank')\n # print(f'Artist: {val.find(\"a\").text}')\n\n '''\n","sub_path":"lecture/scraping/melon/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":1575,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"42503139","text":"#!/usr/bin/python\nimport logging\nimport pickle\nimport time\nfrom pathlib import Path\nfrom typing import List, Tuple\n\nimport pandas\nimport requests\n\n\nclass OpenPageRank:\n def __init__(self, key: [str], wd: Path = Path.cwd()):\n self.wd = wd / \"data/PageRank\"\n Path.mkdir(self.wd, parents=True, exist_ok=True)\n self.key = key\n self.url = \"https://openpagerank.com/api/v1.0/getPageRank\"\n\n def request_page_rank(self, website: List[str]) -> List[Tuple[str, int]]:\n\n request_data = {\"domains[]\": set(website)}\n\n headers = {\"API-OPR\": self.key}\n\n output = []\n seconds = 10\n\n for x in range(10):\n\n success = False\n try:\n output = requests.get(self.url, params=request_data, headers=headers)\n output = output.json()[\"response\"]\n success = True\n except Exception as str_error:\n print(\n \"[ERROR] Cannot reach OpenPageRank. Retrying in %s seconds\"\n % seconds\n )\n print(\"[ERROR] Code: %s\" % str_error)\n time.sleep(seconds)\n seconds += seconds\n if x == 9:\n print(\"[ERROR] Failed 10 times. Exiting ...\")\n exit(1)\n\n if success:\n break\n\n result = []\n for i in output:\n result.append([i[\"domain\"], i[\"page_rank_decimal\"]])\n\n return result\n\n def get_page_rank(self, websites: pandas.DataFrame) -> pandas.DataFrame:\n save_path = self.wd\n\n missing = websites[\"target_hostname\"].tolist()\n save = None\n\n if Path(save_path / \"websites.pickle\").is_file():\n logging.info(\"Loading PageRank save\")\n save = pandas.read_csv(save_path / \"data.csv\")\n\n with open(save_path / \"websites.pickle\", \"rb\") as filehandle:\n saved = pickle.load(filehandle)\n\n missing = [x for x in missing if x not in saved]\n if len(missing) > 0:\n logging.info(\"Requesting missing PageRank scores...\")\n\n frames = []\n\n for index in range(0, len(missing), 100):\n request = missing[index : index + 100]\n frames = [*frames, *self.request_page_rank(request)]\n\n result = pandas.DataFrame(frames, columns=[\"target_hostname\", \"Score_PageRank\"])\n\n if save is not None:\n result = pandas.concat([save, result]).reset_index(drop=True)\n\n if len(missing) > 0:\n result.to_csv(path_or_buf=save_path / \"data.csv\", index=False)\n\n with open(save_path / \"websites.pickle\", \"wb\") as filehandle:\n pickle.dump(websites[\"target_hostname\"].tolist(), filehandle)\n\n return result\n\n def df_add_score(self, df: pandas.DataFrame):\n\n \"\"\"\n Returns a DataFrame extended by the 'Score_PageRank' column. This column contains the OpenPageRank. As input\n a DataFrame with the column 'target_hostname' is expected, which must contain the corresponding domain name.\n\n Parameters:\n df (DataFrame): DataFrame with the domains in the 'target_hostname' column.\n\n Returns:\n extended DataFrame (DataFrame): DataFrame that has been extended by the 'Score_PageRank' column.\n \"\"\"\n websites = pandas.DataFrame(\n df[\"target_hostname\"].unique(), columns=[\"target_hostname\"]\n )\n\n result = self.get_page_rank(websites)\n\n result = pandas.merge(df, result, on=\"target_hostname\", how=\"left\")\n return result\n","sub_path":"src/scores/PageRank/OpenPageRank.py","file_name":"OpenPageRank.py","file_ext":"py","file_size_in_byte":3603,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"317077307","text":"from django.shortcuts import get_object_or_404\r\nfrom rest_framework.decorators import action\r\nfrom rest_framework import permissions, viewsets\r\nfrom rest_framework.response import Response\r\nfrom rest_framework_extensions.mixins import NestedViewSetMixin\r\nfrom rest_framework.views import APIView\r\nfrom rest_framework.pagination import PageNumberPagination, LimitOffsetPagination\r\n\r\nfrom channels.layers import get_channel_layer\r\nfrom asgiref.sync import async_to_sync\r\n\r\nimport copy\r\nfrom requests_oauthlib import OAuth1\r\nfrom urllib.parse import urlencode\r\nfrom django.conf import settings\r\nimport requests\r\nfrom datetime import datetime\r\n\r\nfrom django.contrib.auth.models import Group\r\nfrom contest.decorators import paginate\r\nfrom contest.models import (\r\n Event,\r\n Bout,\r\n Fighter,\r\n Selection,\r\n Entry,\r\n CustomUser,\r\n ChatRoom,\r\n ChatFile, \r\n ChatMessage\r\n)\r\nfrom contest.serializers import (\r\n\tUserSerializer,\r\n\tGroupSerializer,\r\n\tEventSerializer,\r\n\tBoutSerializer,\r\n\tFighterSerializer,\r\n SelectionSerializer,\r\n EntrySerializer,\r\n ChatRoomSerializer,\r\n ChatFileSerializer,\r\n ChatMessageSerializer\r\n)\r\n\r\nfrom contest.views.common_views import StandardResultsSetPagination\r\n\r\nclass ChatRoomViewSet(NestedViewSetMixin, viewsets.ModelViewSet):\r\n \"\"\"\r\n API endpoint that allows chat rooms to be viewed or edited.\r\n \"\"\"\r\n queryset = ChatRoom.objects.all()\r\n serializer_class = ChatRoomSerializer\r\n permission_classes = [permissions.AllowAny]\r\n # permission_classes = [permissions.IsAuthenticated]\r\n\r\n @action(methods=['get'], detail=False)\r\n def get_all(self, request, **kwarg):\r\n # limit by roomPerPage\r\n idx = request.query_params.get('idx', 0)\r\n res = ChatRoom.objects.all().order_by('-last_updated').filter(id__gt=idx)\r\n rooms = ChatRoomSerializer(res, many=True).data\r\n users = settings.AUTH_USER_MODEL.objects.all()\r\n for _ in rooms:\r\n _['_id'] = _['id']\r\n for user in users:\r\n if user.id not in _['users']:\r\n _['users'].append(user.id)\r\n\r\n return Response(dict(rooms=rooms))\r\n\r\nclass ChatFileViewSet(NestedViewSetMixin, viewsets.ModelViewSet):\r\n \"\"\"\r\n API endpoint that allows chat file to be viewed or edited.\r\n \"\"\"\r\n queryset = ChatFile.objects.all()\r\n serializer_class = ChatFileSerializer\r\n permission_classes = [permissions.AllowAny]\r\n # permission_classes = [permissions.IsAuthenticated]\r\n\r\nclass ChatMessageViewSet(NestedViewSetMixin, viewsets.ModelViewSet):\r\n \"\"\"\r\n API endpoint that allows entries to be viewed or edited.\r\n \"\"\"\r\n queryset = ChatMessage.objects.all()\r\n serializer_class = ChatMessageSerializer\r\n permission_classes = [permissions.AllowAny]\r\n pagination_class = StandardResultsSetPagination\r\n # permission_classes = [permissions.IsAuthenticated]\r\n\r\n def format_message(self, data):\r\n messages = []\r\n for _ in data:\r\n _['_id'] = _['id']\r\n _['sender_id'] = _['sender']\r\n messages.append(_)\r\n return messages\r\n\r\n @action(methods=['get'], detail=False)\r\n def get_by_room(self, request, **kwarg):\r\n messages = []\r\n return Response(dict(messages=messages))\r\n\r\n @action(methods=['get'], detail=False)\r\n def get_latest(self, request, **kwarg):\r\n room_id = request.query_params.get('room_id')\r\n message = ChatMessage.objects.all().filter(room_id=room_id)\r\n messages = []\r\n if message:\r\n messages = [self.get_serializer(message.latest('timestamp')).data]\r\n messages = self.format_message(messages)\r\n\r\n return Response(dict(messages=messages))\r\n\r\n\r\n @action(methods=['get'], detail=False)\r\n def get_all(self, request, **kwarg):\r\n # limit by messagePerPage\r\n room_id = request.query_params.get('room_id')\r\n idx = int(request.query_params.get('idx', '0'))\r\n message = ChatMessage.objects.all().filter(room_id=room_id).order_by('-timestamp', 'id')\r\n page = self.paginate_queryset(message)\r\n if page is not None:\r\n serializer = self.get_serializer(page, many=True)\r\n messages = self.format_message(serializer.data)\r\n return self.get_paginated_response(messages)\r\n \r\n serializer = self.get_serializer(message, many=True)\r\n messages = self.format_message(serializer.data)\r\n return Response(dict(results=messages))","sub_path":"fighter/contest/views/chat_views.py","file_name":"chat_views.py","file_ext":"py","file_size_in_byte":4492,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"273930838","text":"\nimport unittest\n\nimport ujson\n\nfrom nanohttp import Controller, json\nfrom nanohttp.tests.helpers import WsgiAppTestCase\n\n\nclass JsonTestCase(WsgiAppTestCase):\n\n class Root(Controller):\n\n @json\n def index(self):\n return {\n 'a': 1,\n 'b': '2'\n }\n\n @json\n def via_to_dict(self):\n class Model:\n @staticmethod\n def to_dict():\n return dict(\n a=1,\n b='2'\n )\n return Model()\n\n @json\n def error(self):\n class Bad:\n pass\n return Bad()\n\n def test_json(self):\n resp, content = self.assert_get(\n '/',\n expected_headers={\n 'content-type': 'application/json; charset=utf-8'\n },\n )\n self.assertDictEqual(ujson.loads(content), {'b': '2', 'a': 1})\n\n resp, content = self.assert_get('/via_to_dict',)\n self.assertDictEqual(ujson.loads(content), {'b': '2', 'a': 1})\n\n self.assert_get('/error', status=500)\n\n\nif __name__ == '__main__': # pragma: no cover\n unittest.main()\n","sub_path":"nanohttp/tests/test_json.py","file_name":"test_json.py","file_ext":"py","file_size_in_byte":1217,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"176386261","text":"import acm\nfrom datetime import datetime\nfrom xml.etree import ElementTree\n\nimport FReportAPI\nimport NamespaceTimeFunctions\nfrom PS_Functions import get_pb_fund_shortname\nimport PS_Functions\nimport PS_XMLReportingTools\nimport ShortEndDelta\nfrom at_logging import getLogger, bp_start\n\n\nLOGGER = getLogger()\n\n\n\nael_variables = []\nael_variables.append(['clientName', 'Client Name', 'FCounterParty', acm.FCounterParty.Instances(), None, 1, 0, 'Client Name that will be passed to all reports', None, 1])\nael_variables.append(['reportTitle', 'Report Title', 'string', None, 'Risk Swap Attribution', 1, 0, 'PreProcessor Parameter', None, 1]) \nael_variables.append(['TrdFilter', 'Trade Filter:', 'FTradeSelection', acm.FTradeSelection.Instances(), None, 1, 0, 'Name of Trade Filter\\nUsed in both ShortEndDelta and BenchmarkDelta reports'])\nael_variables.append(['Currency', 'Currency:', 'FCurrency', acm.FCurrency.Instances(), 'ZAR', 1, 0, 'Currency'])\nael_variables.append(['Curve', 'Yield Curve:', 'FYieldCurve', acm.FYieldCurve.Instances(), 'ZAR-SWAP', 1, 0, 'Yield Curve']) \nael_variables.append(['filename', 'Filename', 'string', None, 'File_RiskSwapAttribution', 1, 0, 'Filename', None, 1])\nael_variables.append(['filepath', 'File Path', 'string', None, 'F:\\\\', 0, 0, 'File path where report will be saved', None, 1])\n\nclass ReportXML():\n def __init__(self, riskSwapAttrDict, paramDict):\n self.root = ElementTree.XML('')\n \n reportElement = ElementTree.SubElement(self.root, 'ReportDetail')\n totals = {'Non Over': 0.0,\n 'Over': 0.0,\n 'PnL Expect': 0.0,\n 'Total': 0.0}\n\n for bmark in riskSwapAttrDict:\n totals['Non Over'] += riskSwapAttrDict[bmark]['Non Over']\n totals['Over'] += riskSwapAttrDict[bmark]['Over']\n totals['PnL Expect'] += riskSwapAttrDict[bmark]['PnL Expect']\n totals['Total'] += riskSwapAttrDict[bmark]['Total']\n\n rowElement = ElementTree.SubElement(reportElement, 'ReportRow', attrib={'Label':'Total'})\n ElementTree.SubElement(rowElement, 'NonOver').text = str(totals['Non Over'])\n ElementTree.SubElement(rowElement, 'Over').text = str(totals['Over'])\n ElementTree.SubElement(rowElement, 'PnLExpect').text = str(totals['PnL Expect'])\n ElementTree.SubElement(rowElement, 'Total').text = str(totals['Total'])\n ElementTree.SubElement(rowElement, 'Change').text = ' '\n ElementTree.SubElement(rowElement, 'CurveT').text = ' '\n ElementTree.SubElement(rowElement, 'CurveT1').text = ' '\n\n for bmark in sorted(riskSwapAttrDict.keys(), key=lambda x: acm.FInstrument[x].ExpiryDate(), reverse=False):\n rowElement = ElementTree.SubElement(reportElement, 'ReportRow', attrib={'Label':bmark})\n ElementTree.SubElement(rowElement, 'NonOver').text = str(riskSwapAttrDict[bmark]['Non Over'])\n ElementTree.SubElement(rowElement, 'Over').text = str(riskSwapAttrDict[bmark]['Over'])\n ElementTree.SubElement(rowElement, 'PnLExpect').text = str(riskSwapAttrDict[bmark]['PnL Expect'])\n ElementTree.SubElement(rowElement, 'Total').text = str(riskSwapAttrDict[bmark]['Total'])\n ElementTree.SubElement(rowElement, 'Change').text = str(riskSwapAttrDict[bmark]['Change'])\n ElementTree.SubElement(rowElement, 'CurveT').text = str(riskSwapAttrDict[bmark]['Curve_T'])\n ElementTree.SubElement(rowElement, 'CurveT1').text = str(riskSwapAttrDict[bmark]['Curve_T-1'])\n \n parameterElement = ElementTree.SubElement(self.root, 'ReportParameters')\n generated_time = datetime.now().strftime('%Y-%m-%d %H:%M:%S')\n UTC_offset = (datetime.now() - datetime.utcnow()).seconds / 3600\n generated_time += ' (UTC+0%i:00)' % UTC_offset\n \n ElementTree.SubElement(parameterElement, 'GeneratedTime').text = generated_time\n ElementTree.SubElement(parameterElement, 'ReportDate').text = acm.Time.DateToday()\n ElementTree.SubElement(parameterElement, 'FrameworkVersion').text = paramDict['FrameworkVersion']\n ElementTree.SubElement(parameterElement, 'TradeFilter').text = paramDict['TrdFilter']\n \n def __str__(self):\n return ElementTree.tostring(self.root)\n\nclass RiskSwapAttributionReport:\n def __init__(self, endDate, ParamDict):\n self.reportEndDate = endDate\n self.shortEndDeltaParamDict = ParamDict\n self.RiskResults = {}\n self.shortEndDeltaResults = {}\n self.benchmarkDeltaResults = {}\n self.tradeFilter = ParamDict['TrdFilter']\n self.yieldCurve = ParamDict['Curve']\n self.RiskResults = {}\n if 'frameworkVersion' in ParamDict.keys():\n self.frameworkVersion = ParamDict['frameworkVersion']\n else:\n self.frameworkVersion = 'N/A'\n \n def getReportValues(self):\n # Populate RiskResults dictionary\n self.getShortEndDeltaResults()\n self.getBenchmarkDeltaResults()\n \n myKeys = self.shortEndDeltaResults.keys()\n myKeys += self.benchmarkDeltaResults.keys()\n myKeys = set(myKeys)\n \n for k in myKeys:\n self.RiskResults[k] = {}\n \n self.RiskResults[k]['Over'] = self.shortEndDeltaResults.get(k, 0)\n self.RiskResults[k]['Non Over'] = self.benchmarkDeltaResults.get(k, 0)\n \n self.RiskResults[k]['Total'] = self.RiskResults[k]['Over'] + self.RiskResults[k]['Non Over']\n\n # Create Curve Movement\n self.RiskResults[k]['Curve_T-1'], self.RiskResults[k]['Curve_T'] = PS_Functions.get_latest_price_movement(k, self.reportEndDate)\n self.RiskResults[k]['Change'] = self.RiskResults[k]['Curve_T'] - self.RiskResults[k]['Curve_T-1']\n\n # Calc PnL Expect\n self.RiskResults[k]['PnL Expect'] = self.RiskResults[k]['Change'] * self.RiskResults[k]['Total'] * 100\n \n def toXML(self):\n # Used for PDF generation\n pDict = {'FrameworkVersion':self.frameworkVersion,\n 'TrdFilter': self.tradeFilter.Name()}\n reportXML = ReportXML(self.RiskResults, pDict) \n return str(reportXML)\n \n def getShortEndDeltaResults(self):\n pDict = dict(self.shortEndDeltaParamDict) # This is required because of the \"Multiple Inputs\" behaviour of the ShortEndDelta script\n pDict['TrdFilter'] = [self.shortEndDeltaParamDict['TrdFilter']]\n pDict['Currency'] = [self.shortEndDeltaParamDict['Currency']]\n pDict['Curve'] = [self.shortEndDeltaParamDict['Curve']]\n temp = ShortEndDelta.ael_main(pDict, for_report_controller=True)\n for i in range(len(temp) - 1):\n self.shortEndDeltaResults[temp[i][0]] = temp[i][2]\n\n def getMappedYCs(self):\n ycSet = set()\n calc_space = acm.Calculations().CreateStandardCalculationsSpaceCollection() \n for ins in self.tradeFilter.Instruments():\n ins_calc = ins.Calculation()\n ycSet.add(ins_calc.MappedCreditCurve(calc_space))\n ycSet.add(ins_calc.MappedDiscountCurve(calc_space))\n ycSet.add(ins_calc.MappedRepoCurve(calc_space))\n ycSet.add(ins_calc.MappedRiskFreeDiscountCurve(calc_space))\n\n for l in ins.Legs():\n l_calc = l.Calculation()\n ycSet.add(l_calc.MappedCreditCurve(calc_space))\n ycSet.add(l_calc.MappedDiscountCurve(calc_space))\n ycSet.add(l_calc.MappedRepoCurve(calc_space))\n ycSet.add(l_calc.MappedRiskFreeDiscountCurve(calc_space))\n\n idxRef = ins.IndexReference()\n if idxRef:\n idxRef_calc = idxRef.Calculation()\n ycSet.add(idxRef_calc.MappedCreditCurve(calc_space))\n ycSet.add(idxRef_calc.MappedDiscountCurve(calc_space))\n ycSet.add(idxRef_calc.MappedRepoCurve(calc_space))\n ycSet.add(idxRef_calc.MappedRiskFreeDiscountCurve(calc_space))\n return set([acm.FYieldCurve[ycInfo.Name()] for ycInfo in ycSet if ycInfo is not None])\n\n def getBenchmarkDeltaResults(self):\n context = acm.GetDefaultContext()\n sheet_type = 'FPortfolioSheet'\n calc_space = acm.Calculations().CreateCalculationSpace(context, sheet_type)\n top_node = calc_space.InsertItem(self.tradeFilter)\n calc_space.Refresh()\n column_id = 'Benchmark Delta Instruments'\n vector = acm.FArray()\n curves = self.getMappedYCs()\n curves.add(self.yieldCurve)\n benchmarks = set()\n for yc in curves:\n for b in yc.Benchmarks():\n benchmarks.add(b)\n benchmarks = sorted(benchmarks, key=lambda x: x.Instrument().Name())\n for b in benchmarks:\n param = acm.FNamedParameters();\n param.AddParameter('instrument', b.Instrument())\n vector.Add(param) \n column_config = acm.Sheet.Column().ConfigurationFromVector(vector)\n ins_node = top_node.Iterator().Find(self.tradeFilter.Name()).Tree()\n calculation = calc_space.CreateCalculation(ins_node, column_id, column_config) \n count = 0\n for cv in calculation.Value():\n self.benchmarkDeltaResults[benchmarks[count].Instrument().Name()] = cv.Number()\n count += 1\n \n def createReport(self, filepath, filename, xslt, clientName, reportName):\n report = FReportAPI.FWorksheetReportApiParameters()\n self._setReportAPIParameters(report, filepath, filename, xslt)\n \n root = ElementTree.XML(self.toXML())\n reportParameters = root.find(\"ReportParameters\")\n \n if not reportParameters:\n reportParameters = ElementTree.SubElement(root, \"ReportParameters\")\n else:\n reportParameters = reportParameters[0]\n\n reportXml = ElementTree.tostring(root)\n reportXml = PS_XMLReportingTools._addAddress(reportXml, clientName.Name())\n reportXml = PS_XMLReportingTools._addReportParameter(reportXml, 'ReportName', reportName)\n reportXml = PS_XMLReportingTools._addRunLocation(reportXml)\n report.CreateReportByXml(reportXml)\n\n def _setReportAPIParameters(self, report, filepath, filename, xslTemplate):\n report.ambAddress = ''\n report.ambSender = ''\n report.ambSubject = ''\n report.ambXmlMessage = False\n report.clearSheetContent = False\n report.compressXmlOutput = False\n report.createDirectoryWithDate = False\n report.dateFormat = '%d%m%y'\n report.expiredPositions = False\n report.fileDateFormat = ''\n report.fileDateBeginning = False\n report.fileName = filename\n report.filePath = filepath\n report.function = None\n report.gcInterval = 5000\n report.gridOutput = False\n report.gridUseLoopbackGridClient = False\n report.gridRowPartitionCbArg = None\n report.gridRowPartitionCbClass = None\n report.gridExcludeRowCbClass = \"FReportGridCallbacks.ExcludeRowManager\"\n report.gridAggregateXmlCbClass = None\n report.gridTimeout = None\n report.gridRowSet = None\n report.grouping = 'Default'\n report.htmlToFile = False\n report.htmlToPrinter = False\n report.htmlToScreen = False\n report.includeDefaultData = False\n report.includeFormattedData = False\n report.includeFullData = False\n report.includeRawData = False\n report.instrumentParts = False\n report.instrumentRows = False\n report.maxNrOfFilesInDir = 1000\n report.multiThread = False\n report.numberOfReports = 1\n report.orders = None\n report.overridePortfolioSheetSettings = False\n report.overrideTimeSheetSettings = False\n report.overrideTradeSheetSettings = False\n report.overwriteIfFileExists = True\n report.param = None\n report.performanceStrategy = 'Periodic full GC to save memory'\n report.portfolioReportName = ''\n report.portfolioRowOnly = False\n report.portfolios = None\n report.preProcessXml = None\n report.printStyleSheet = 'FStandardCSS'\n report.printTemplate = 'FStandardTemplateClickable'\n report.reportName = ''\n report.secondaryFileExtension = '.csv'\n report.secondaryOutput = True\n report.secondaryTemplate = xslTemplate\n report.sheetSettings = {}\n report.snapshot = True\n report.storedASQLQueries = None\n report.template = None\n report.tradeFilters = None\n report.tradeRowsOnly = False\n report.trades = None\n report.updateInterval = 60\n report.workbook = None\n report.xmlToAmb = False\n report.xmlToFile = False\n report.zeroPositions = False\n report.guiParams = None\n report.reportApiObject = None\n\ndef ael_main(param):\n process_name = \"ps.risk_swap_attribution.{0}\".format(get_pb_fund_shortname(param[\"clientName\"]))\n with bp_start(process_name): \n \n param['InputType'] = 'Filter'\n param['Portfolio'] = None\n param['ReportType'] = 'Short End Delta'\n param['Outpath'] = 'NotApplicable'\n riskReport = RiskSwapAttributionReport(acm.Time().DateToday(), param)\n riskReport.getReportValues()\n if 'fileID_SoftBroker' in param.keys():\n riskReport.createReport(param['filepath'], '_'.join([param['fileID_SoftBroker'], param['filename'],\n acm.Time.DateToday().replace('-', '')]), 'ps_riskSwapAttr_csv', param['clientName'], param['reportTitle'])\n else:\n riskReport.createReport(param['filepath'], '_'.join([param['filename'], acm.Time.DateToday().replace('-', '')]),\n 'ps_riskSwapAttr_csv', param['clientName'], param['reportTitle'])\n LOGGER.info('Risk Swap Attribution Report - Completed Successfully')\n\n \ndef _convertToParamDictionary(configuration, report_name):\n riskdict = {}\n riskdict['TrdFilter'] = configuration['TrdFilter_' + report_name]\n riskdict['Currency'] = configuration['Currency_' + report_name]\n riskdict['Curve'] = configuration['Curve_' + report_name]\n riskdict['reportTitle'] = configuration['reportTitle_' + report_name]\n riskdict['clientName'] = acm.FCounterParty[configuration['clientName']]\n riskdict['filename'] = configuration['Filename_' + report_name]\n riskdict['filepath'] = configuration['OutputPath']\n riskdict['fileID_SoftBroker'] = configuration['fileID_SoftBroker']\n if riskdict['TrdFilter'] is None:\n raise ValueError(report_name + ' Tradefilter is mandatory')\n return riskdict\n","sub_path":"Python modules/PS_RiskSwapAttribution_Report.py","file_name":"PS_RiskSwapAttribution_Report.py","file_ext":"py","file_size_in_byte":14724,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"75412697","text":"#!/usr/bin/env python\n#\n# Created Feb 2017 by R.Kessler\n#\n# Revived Nov 2020 with python 3\n# + fetch survey name and write SURVEY key to kcor-input\n#\n#\n# @!@!@!@!@!@!@!@!@!@!@!@!@!@!@!@!@!@!@!@!@!@!@!@!@!@!@!@!@!@!@!@!\n#\n# WARNING: Bug in SUBSURVEY_LIST key in output SIMLIB file \n#\n# @!@!@!@!@!@!@!@!@!@!@!@!@!@!@!@!@!@!@!@!@!@!@!@!@!@!@!@!@!@!@!@!\n#\n#\n# Combine multiple data versions into a single data version.\n# Also produced combined kcor file and simLib file, and convert\n# text data into FITS format.\n#\n# Since the same filter char can appear in different data versions,\n# replace filters with sequential 'abcdef ... ABCDEF ... 0123456789'\n# Intended use is for low-z, but will work with any data samples\n# in TEXT format.\n#\n# Usage:\n# combine_dataVersions.py \n#\n# where contains\n# PRIVATE_DATA_PATH: # this is optional\n# VERSION: \n# VERSION: \n# etc ...\n# SURVEY_OUT: \n# VPEC_FILE: \n#\n# Outputs:\n# _TEXT/ ! combined data directory, TEXT format\n# _FITS/ ! combined data directory, FITS format\n# kcor_.fits ! combined kcor file\n# .SIMLIB ! combined SIMLIB file\n#\n# History\n# Apr 12 2017: D.Scolnic added key SIMLIB_ZPERR_LIST\n#\n# Oct 25 2017: RK comment out all SIMLIB_ZPERR_LIST code since it\n# causes code to crash.\n#\n# Dec 8 2017: S.Hinton - merge duplicates\n# Jan 8 2018: RK - add VPEC_FILE option\n#\n# Nov 16 2019 RK - bug fix writing AB_SED and BD17_SED\n#\n# Dec 08 2020 RK - fix to properly handle multiple surveys using\n# same filters\n#\n# Dec 17 2020 RK - write full filter names to text files (e.g., CFA3K/l)\n# for visual convenience. snana.car was modified to strip last\n# char of filter column. FITS format still single char.\n#\n# ====================================\n\nimport os, sys\nimport numpy as np\nimport time, string, getpass\nimport subprocess, shutil, logging\n\n\n# globals\n\nSNDATA_ROOT = os.environ['SNDATA_ROOT']\nHOSTNAME = os.environ['HOSTNAME']\nNOW = time.strftime(\"%c\")\nCWD = os.getcwd() \nMXFILTERS = 62\nTOPDIR_DATA = SNDATA_ROOT + '/lcmerge'\nTOPDIR_KCOR = SNDATA_ROOT + '/kcor'\n\nFILTER_CHARLIST = 'abcdefghijklmnopqrstuvwxyz' + 'ABCDEFGHIJKLMNOPQRSTUVWXYZ' + '0123456789'\n\n\nKEYNAME_VERSION = \"VERSION:\"\nKEYNAME_SURVEY_OUT = \"SURVEY_OUT:\"\nKEYNAME_VPEC_FILE = \"VPEC_FILE:\"\nKEYNAME_SIMLIB_ZPERR_LIST = \"SIMLIB_ZPERR_LIST:\"\n\n# ========== BEGIN ===========\n\ndef parseLines(Lines,key,narg,vbose):\n # Lines is input array of lines in file\n # key is key to search\n # narg is number of args to return after key\n\n arg = []\n rowList = Lines[np.char.startswith(Lines,key)]\n nrow = len(rowList)\n\n if ( nrow == 1 ):\n if ( narg==1 ):\n arg = rowList[0].split()[1]\n else:\n arg = rowList[0].split()[1:narg+1]\n elif ( nrow > 1 ):\n for row in rowList:\n arg.append(row.split()[1:narg+1])\n\n# if ( vbose > 0 ): print '\\t ', key, arg\n \n return(arg)\n\n\ndef change_filterChar(versionInfo,kcorInfo):\n\n print(f\"\\n Change filter characters:\")\n\n nver = len(kcorInfo)\n for iver in range(0,nver):\n\n kcor = kcorInfo[iver]\n\n # check of this filter/system is new, or a repeat\n iver_use = kcor.ikcor_use\n new = (iver_use == iver)\n kcor_use = kcorInfo[iver_use] \n\n #print(f\" xxx iver={iver} iver_use={iver_use} new={new}\")\n\n ifilt = 0\n for line in kcor_use.FILTER :\n filter_name = line[0] # full name of filter\n\n N = versionInfo.NFILTER_TOT\n filter_oldChar = filter_name[-1] # current filter char\n\n if new :\n filter_newChar = FILTER_CHARLIST[N] # new new char for filter\n versionInfo.FILTER_CHARLIST_OLD += filter_oldChar\n versionInfo.FILTER_CHARLIST_NEW += filter_newChar\n versionInfo.NFILTER_TOT += 1\n else:\n filter_newChar = kcor_use.FILTER_CHARLIST_NEW[ifilt]\n\n # append new char to end of filter name to ensure\n # unique filter-char for each band\n filter_name_new = filter_name + '/' + filter_newChar\n kcorInfo[iver].FILTER_NEWNAME.append(filter_name_new) # full name\n kcorInfo[iver].FILTER_CHARLIST_OLD += filter_oldChar # 1-char\n kcorInfo[iver].FILTER_CHARLIST_NEW += filter_newChar\n ifilt += 1\n\n # print band replacement for each version\n OLD = kcorInfo[iver].FILTER_CHARLIST_OLD\n NEW = kcorInfo[iver].FILTER_CHARLIST_NEW\n tmp = OLD + ' --> ' + NEW\n \n V = versionInfo.NAME[iver]\n print(f\" {V:<28.28} bands: {tmp} (new={new})\") \n\n #sys.exit(\"\\n xxx DEBUG EXIT xxx \\n\")\n\n # end change_filterChar\n\ndef combine_duplicate_filtpath(versionInfo,kcorInfo):\n\n # created Dec 2020\n # Combine multiple SURVEY_INP that have same filters.\n # First SURVEY_INP -> comma-sep list,\n # Other SURVEY_INP -> IGNORE (flag to NOT write it out)\n\n nkcor = len(kcorInfo)\n KEY_IGNORE = \"IGNORE\"\n n_match = 0\n\n for i0 in range(0,nkcor): \n kcorInfo[i0].ikcor_use = i0\n kcorInfo[i0].new = True\n\n for i0 in range(0,nkcor-1):\n for i1 in range(i0+1,nkcor):\n match = same_filters(kcorInfo[i0],kcorInfo[i1]) \n survey0 = versionInfo.SURVEY_INP[i0]\n survey1 = versionInfo.SURVEY_INP[i1]\n if match and survey0 != KEY_IGNORE:\n survey = f\"{survey0},{survey1}\"\n versionInfo.SURVEY_INP[i0] = survey\n versionInfo.SURVEY_INP[i1] = KEY_IGNORE\n kcorInfo[i1].ikcor_use = i0 \n kcorInfo[i0].new = False\n n_match += 1\n # - - - - -\n print(f\" Found {n_match} filter matches \")\n\n #sys.exit(\"\\n xxx DEBUG EXIT xxx \\n\") \n\n\ndef same_filters(kcorInfo_0,kcorInfo_1):\n # Created Dec 8 2020\n # Return true if filters are the same for each kcorInfo \n \n if kcorInfo_0.MAGSYSTEM != kcorInfo_1.MAGSYSTEM : return False\n if kcorInfo_0.FILTSYSTEM != kcorInfo_1.FILTSYSTEM : return False\n if kcorInfo_0.FILTPATH != kcorInfo_1.FILTPATH : return False\n\n for line_0,line_1 in zip(kcorInfo_0.FILTER,kcorInfo_1.FILTER) :\n file_0 = line_0[1]\n file_1 = line_1[1]\n zpoff_0 = line_0[2]\n zpoff_1 = line_1[2]\n if file_0 != file_1 : return False\n if zpoff_0 != zpoff_1 : return False\n\n return True\n\ndef write_kcor_inputFile(versionInfo,kcorInfo):\n\n fname = versionInfo.kcor_inFile\n PRIVATE_DATA_PATH = versionInfo.PRIVATE_DATA_PATH\n\n print(f\"\\n Create {fname}\")\n f = open(fname,\"wt\")\n\n f.write(f\"# Combined kcor file create by\\n# {sys.argv}\\n\" )\n f.write(f\"# User = {getpass.getuser()} \\n\")\n f.write(f\"# Host = {HOSTNAME}\\n\" )\n f.write(f\"# {NOW}\\n\" )\n f.write(f\"\\n\")\n \n f.write(f\"SN_SED: {kcorInfo[0].SN_SED} \\n\")\n\n # check all kcor files for primary(s)\n nkcor=0\n USE_BD17 = 0 ; USE_AB = 0\n for kcor in kcorInfo :\n if ( len(kcorInfo[nkcor].BD17_SED) > 0 and USE_BD17==0 ) :\n f.write(f\"BD17_SED: {kcorInfo[nkcor].BD17_SED}\\n\")\n USE_BD17 = 1\n\n if ( len(kcorInfo[nkcor].AB_SED) > 0 and USE_AB==0) :\n f.write(f\"AB_SED: {kcorInfo[nkcor].AB_SED} \\n\")\n USE_AB = 1\n nkcor += 1\n\n\n L0 = kcorInfo[0].LAMRANGE[0]\n L1 = kcorInfo[0].LAMRANGE[1] \n f.write(f\"LAMBDA_RANGE: {L0} {L1} \\n\" )\n\n # - - - - -\n # check for spectrograph (Jan 2021)\n # (warning: need to abort of spectcro_file names are different)\n n_spectro = 0\n for kcor in kcorInfo :\n spectro_file = kcor.SPECTROGRAPH\n if spectro_file is not None and n_spectro == 0 :\n f.write(f\"SPECTROGRAPH: {spectro_file} \\n\")\n n_spectro += 1\n\n # - - - - \n f.write(f\"OUTFILE: {versionInfo.kcor_outFile} \\n\")\n \n # - - - - - - - - - - - - - - -\n # loop over filter sets\n nkcor = 0\n for kcor in kcorInfo :\n V = versionInfo.NAME[nkcor]\n survey_name = versionInfo.SURVEY_INP[nkcor]\n nkcor += 1\n\n if \"IGNORE\" in survey_name : continue \n \n f.write(f\"\\n\" )\n f.write(f\"# Start filters for VERSION = {V}\\n\" )\n f.write(f\"MAGSYSTEM: {kcor.MAGSYSTEM} \\n\")\n f.write(f\"FILTSYSTEM: {kcor.FILTSYSTEM} \\n\")\n f.write(f\"FILTPATH: {kcor.FILTPATH} \\n\")\n f.write(f\"SURVEY: {survey_name} \\n\")\n \n nfilt=0\n for line in kcor.FILTER :\n filter_name = line[0]\n filter_file = line[1]\n filter_zpoff = line[2]\n filter_name_new = kcor.FILTER_NEWNAME[nfilt]\n f.write(\"FILTER: %-20.20s %s %s \\n\"\n % (filter_name_new,filter_file,filter_zpoff) )\n nfilt += 1\n f.close\n\ndef get_survey(PRIVATE_DATA_PATH,VERSION):\n\n # run snana.exe on version, then read yaml file to get SURVEY name.\n\n survey = None\n prefix = (f\"OUT_{VERSION}\")\n cmd = \"snana.exe NOFILE \"\n cmd += (f\"VERSION_PHOTOMETRY={VERSION} \")\n cmd += (f\"SNTABLE_LIST '' \")\n cmd += (f\"OPT_YAML=1 \" )\n cmd += (f\"READ_SPECTRA=F \" )\n cmd += (f\"TEXTFILE_PREFIX={prefix} \" )\n if len(PRIVATE_DATA_PATH) > 2 :\n cmd += (f\"PRIVATE_DATA_PATH={PRIVATE_DATA_PATH} \")\n cmd += (f\" > {prefix}.LOG\")\n #print(f\" xxx cmd = {cmd}\\n\")\n os.system(cmd)\n\n # read from YAML file\n yaml_file = (f\"{prefix}.YAML\")\n with open(yaml_file,\"rt\") as y :\n for line in y:\n word_list = line.split()\n if word_list[0] == \"SURVEY:\" : survey = word_list[1]\n\n # remove junk files, and be careful not to rm .* by accident\n if len(prefix) > 2 :\n cmd_rm = (f\"rm {prefix}.*\")\n os.system(cmd_rm)\n\n #sys.exit(f\"\\n xxx {cmd_rm}\\n\")\n\n return survey \n\n # end get_survey\n\ndef run_kcor(versionInfo):\n inFile = versionInfo.kcor_inFile\n logFile = versionInfo.kcor_logFile\n cmd = 'kcor.exe ' + inFile + ' > ' + logFile\n print(f\" Run kcor program ... \")\n os.system(cmd)\n\n # check for fatal error in log file\n f = open(logFile,\"rt\")\n Lines = f.readlines()\n f.close\n if any(\" ABORT \" in s for s in Lines):\n msg = \"\\nFATAL error running kcor program:\\n Check %s\\n\" % inFile\n sys.exit(msg)\n \nclass VERSION_INFO:\n def __init__(self,filename):\n f = open(filename,\"rt\")\n Lines = f.readlines()\n\n # read name output version\n reader = np.array([x.split() for x in Lines\n if x.startswith(KEYNAME_SURVEY_OUT)])\n \n SOUT = reader[0:,1][0]\n self.SURVEY_OUT = SOUT\n self.VERSION_OUT_TEXT = SOUT + '_TEXT'\n self.VERSION_OUT_FITS = SOUT + '_FITS'\n self.AUXFILE_README = SOUT + '_TEXT.README'\n self.AUXFILE_IGNORE = SOUT + '_TEXT.IGNORE'\n self.AUXFILE_LIST = SOUT + '_TEXT.LIST'\n\n \n self.kcor_inFile = 'kcor_' + SOUT + '.input'\n self.kcor_outFile = 'kcor_' + SOUT + '.fits'\n self.kcor_logFile = 'kcor_' + SOUT + '.log'\n self.simlibFile = SOUT + '.SIMLIB'\n \n reader = np.array([x.split() for x in Lines\n if x.startswith('PRIVATE_DATA_PATH:')])\n if ( len(reader) > 0 ):\n tmpDir = reader[0:,1][0]\n self.PRIVATE_DATA_PATH = os.path.expandvars(tmpDir)\n else:\n self.PRIVATE_DATA_PATH = \"\"\n\n\n # read optional name VPEC_FILE\n reader = np.array([x.split() for x in Lines\n if x.startswith(KEYNAME_VPEC_FILE)])\n if ( len(reader) > 0 ):\n tmpFile = reader[0:,1][0]\n self.VPEC_FILE = tmpFile\n else:\n self.VPEC_FILE = \"\"\n\n \n # define strings of old and new filter char;\n # to be filled later.\n self.FILTER_CHARLIST_OLD = \"\"\n self.FILTER_CHARLIST_NEW = \"\"\n self.NFILTER_TOT = 0\n self.NFILE = [] # to be filled later\n \n # read input versions\n reader = np.array([x.split() for x in Lines\n if x.startswith(KEYNAME_VERSION)])\n self.NAME = reader[:,1]\n self.INFILE_KCOR = reader[:,2]\n \n # Dec 8 2020 : get survey name with snana job\n print(f\"\")\n self.SURVEY_INP = []\n for V in self.NAME :\n survey_name = get_survey(self.PRIVATE_DATA_PATH,V)\n print(f\" VERSION {V:<28.28} -> {survey_name} \")\n self.SURVEY_INP.append(survey_name)\n\n print(f\"\")\n\n f.close()\n\n \nclass KCOR_INFO:\n def __init__(self,filename):\n print(f\" Parse kcor input file: {filename}\")\n\n filename_expandvars = os.path.expandvars(filename)\n # check local dir first; then check $SNDATA_ROOT/kcor\n if os.path.isfile(filename_expandvars):\n fname_local = filename_expandvars\n else:\n fname_local = TOPDIR_KCOR + '/' + filename_expandvars\n\n # open file file and read all lines into Lines\n f = open(fname_local,\"rt\")\n Lines = np.array(f.readlines())\n\n # parse SED stuff\n self.SN_SED = parseLines(Lines,'SN_SED:', 1, 1) \n self.BD17_SED = parseLines(Lines,'BD17_SED:',1, 1)\n self.AB_SED = parseLines(Lines,'AB_SED:', 1, 1)\n \n # wavelength range\n self.LAMRANGE = parseLines(Lines,'LAMBDA_RANGE:',2, 1)\n \n # filter stuff\n self.MAGSYSTEM = parseLines(Lines,'MAGSYSTEM:', 1, 1)\n self.FILTSYSTEM = parseLines(Lines,'FILTSYSTEM:',1, 1)\n self.FILTPATH = parseLines(Lines,'FILTPATH:', 1, 1)\n self.FILTER = parseLines(Lines,'FILTER:', 3, 1)\n self.NFILTER = len(self.FILTER)\n\n # optional spectrograph (Jan 2021)\n self.SPECTROGRAPH = parseLines(Lines,'SPECTROGRAPH:',1, 1)\n if len(self.SPECTROGRAPH) == 0 : self.SPECTROGRAPH = None\n #print(f\"\\t xxx SPECTROGRAPH = {self.SPECTROGRAPH} \")\n\n if ( self.NFILTER > MXFILTERS ):\n errMsg = (f\"{self.NFILTERS} filters exceeds bound of \" \\\n f\"{MXFILTERS}\" )\n sys.exit(errMsg)\n \n # define things to change later\n self.FILTER_NEWNAME = [] # to be changed later\n self.new = True # assume new filter/mag system\n self.FILTER_CHARLIST_OLD = \"\"\n self.FILTER_CHARLIST_NEW = \"\"\n self.ikcor_info = -9\n f.close()\n\n\ndef create_newVersion(versionInfo):\n\n VOUT = versionInfo.VERSION_OUT_TEXT # name of output version\n\n # create new version-subDir\n if (len(VOUT) == 0 ):\n sys.exit(\"Output directory not defined\\n Check SURVEY_OUT key\")\n\n if ( os.path.exists(VOUT) ):\n print(f\" Remove pre-existing {VOUT}\")\n shutil.rmtree(VOUT)\n \n os.mkdir(VOUT)\n\n # create auxillary files\n cdV = 'cd ' + VOUT + ' ; '\n\n cmd = cdV + ' touch ' + versionInfo.AUXFILE_README\n os.system(cmd)\n cmd = cdV + ' touch ' + versionInfo.AUXFILE_LIST\n os.system(cmd)\n cmd = cdV + ' touch ' + versionInfo.AUXFILE_IGNORE\n os.system(cmd)\n\n # write stuff in README\n README_OUTFILE = VOUT + '/' + versionInfo.AUXFILE_README\n f = open(README_OUTFILE,\"wt\")\n f.write(\"# Combined data files with command:\\n\")\n f.write(\"# %s %s \\n\" % (sys.argv[0], sys.argv[1]) )\n f.write(\"# Host = %s\\n\" % HOSTNAME )\n f.write(\"# User = %s\\n\" % getpass.getuser() )\n f.write(\"# Directory = %s\\n\" % CWD )\n f.write(\"# Time = %s\\n\" % NOW )\n f.write(\"\\n\")\n f.close\n\n \ndef add_newVersion(VIN,versoinInfo,kcorInfo):\n\n # add new version to combined version;\n # copy all data files and use 'sed' utility\n # to make changes to filters and SURVEY name.\n \n dataDir = TOPDIR_DATA + '/' + VIN\n SOUT = versionInfo.SURVEY_OUT\n VOUT_TEXT = versionInfo.VERSION_OUT_TEXT\n \n # read list file\n LISTFILE_IN = dataDir + '/' + VIN + '.LIST'\n LISTFILE_OUT = VOUT_TEXT + '/' + versionInfo.AUXFILE_LIST\n PTR_L = open(LISTFILE_IN,\"rt\")\n fileList = PTR_L.readlines()\n PTR_L.close\n\n # read contents of first file\n first_fileName = dataDir + '/' + fileList[0]\n first_fileName = first_fileName.replace(\"\\n\", \"\")\n f0 = open(first_fileName,\"rt\")\n fileContents = np.array(f0.readlines())\n f0.close\n \n # read FILTER string from first data file\n FILTERSTRING_OLD = parseLines(fileContents, 'FILTERS:', 1, 0)\n\n # get full filter lists from kcor file\n FILTERLIST_OLD = kcorInfo.FILTER_CHARLIST_OLD # only this version\n FILTERLIST_NEW = kcorInfo.FILTER_CHARLIST_NEW # only this version\n FILTERLIST_ALL = versionInfo.FILTER_CHARLIST_NEW # all filters\n NFILTER = kcorInfo.NFILTER\n FILTER_NEWNAME = kcorInfo.FILTER_NEWNAME\n\n print(f\"\\t {FILTERLIST_OLD} -> {FILTERLIST_NEW} \" )\n\n # read name of survey from first file\n SURVEY = parseLines(fileContents, 'SURVEY:', 1, 0)\n #print '\\t SURVEY_NAME = ', SURVEY\n \n # open new list file in append mode\n PTR_NEWLIST = open(LISTFILE_OUT,\"at\")\n\n # - - - - - start constructino of 'sed' command - - - - - - -\n sedcmd = \"sed \"\n \n # replace SURVEY ... only first occurance !\n # xxx mark sedAdd = \"-e '0,/SURVEY:/s/%s/ %s(%s)/' \" %(SURVEY,SOUT,SURVEY)\n sedAdd = f\"-e '0,/SURVEY:/s/{SURVEY}/ {SOUT}({SURVEY})/' \"\n sedcmd += sedAdd\n \n # Replace global filter string.\n OLD = FILTERSTRING_OLD # from data file\n OLD2 = FILTERLIST_OLD # from kcor file\n NEW = FILTERLIST_NEW # new kcor list\n ALL = FILTERLIST_ALL # all filters from all files\n # xxx mark sedAdd = \"-e 's/%s/%s # %s -> %s/g' \" % (OLD,ALL,OLD2,NEW) \n sedAdd = f\"-e 's/{OLD}/{ALL} # {OLD2} -> {NEW}/g' \" \n sedcmd += sedAdd\n\n # Replace each single-char band with new full filter name\n # Add '??' before each band to avoid removing new band\n # that matches an old band. Then remove ?? separately.\n\n for i in range(NFILTER):\n old = FILTERLIST_OLD[i]\n new = FILTERLIST_NEW[i] # new char name\n filter_newname = FILTER_NEWNAME[i].replace(\"/\",\"\\/\") # new full name\n sedAdd = f\"-e 's/ {old} / ??{filter_newname} /g' \"\n sedcmd += sedAdd\n \n #sys.exit(\"\\n xxx DEBUG STOP xxx \\n\")\n\n # remove the temporary '?s?'\n sedcmd += \"-e 's/??//g' \"\n \n # loop over all files and run 'sedcmd' \n nfile=0\n for fin in fileList:\n fin = fin.replace(\"\\n\", \" \")\n FIN = dataDir + '/' + fin\n FOUT = VOUT_TEXT + '/' + fin\n SEDCMD = sedcmd + FIN + ' > ' + FOUT \n os.system(SEDCMD)\n PTR_NEWLIST.write(\"%s\\n\" % (fin) )\n nfile += 1\n\n PTR_NEWLIST.close\n\n # update README file\n README_OUTFILE = VOUT_TEXT + '/' + versionInfo.AUXFILE_README\n PTR_README = open(README_OUTFILE,\"at\")\n\n # xxx mark delete txt1 = \"%3d data files from %-28.28s\" % (nfile,VIN)\n # xxx mark delete txt2 = \"%s -> %s\" % (FILTERLIST_OLD,FILTERLIST_NEW)\n\n txt1 = f\"{nfile:3d} data files from {VIN:>28}\"\n txt2 = f\"{FILTERLIST_OLD} -> {FILTERLIST_NEW}\"\n\n PTR_README.write(f\"{txt1} {txt2} \\n\")\n PTR_README.close\n\n\ndef merge_duplicates(versionInfo):\n\n VOUT_TEXT = versionInfo.VERSION_OUT_TEXT \n\n # get list of SNID using grep : file SNID\n cmd = (f\"cd {VOUT_TEXT} ; grep SNID: *.* \")\n output = subprocess.check_output(cmd, shell=True)\n logging.basicConfig(level=logging.DEBUG, \n format=\"[%(funcName)20s()] %(message)s\")\n\n output = output.decode('utf-8')\n output = output.split('\\n')[:-1]\n\n d = {}\n for line in output:\n s = line.split()\n key = s[-1]\n val = s[0][:s[0].index(\"SNID:\")-1]\n if key not in d:\n d[key] = []\n d[key].append(val)\n \n duplicates = [k for k in d.keys() if len(d[k]) > 1]\n ndupl = len(duplicates)\n print(f\"\\n Found {ndupl} duplicates:\" )\n for sn in duplicates :\n print(f\" {sn} {d[sn]} \") \n\n dup_dir = VOUT_TEXT + \"/DUPLICATES\"\n logging.info(\"Merging into %s\" % dup_dir)\n if not os.path.exists(dup_dir):\n logging.debug(\"Creating directory %s\" % dup_dir)\n os.makedirs(dup_dir)\n \n to_add = []\n for sn in duplicates:\n files = [\"%s/%s\" % (VOUT_TEXT, f) for f in d[sn]]\n \n output_file = \"%s/merged_%s.dat\" % (VOUT_TEXT, sn)\n to_add.append(os.path.basename(output_file) +\"\\n\")\n buf = [] \n with open(files[0]) as scaffold:\n for line in scaffold:\n if line.startswith(\"END:\"):\n continue\n buf.append(line)\n for f in files[1:]:\n with open(f) as obs:\n for line in obs:\n if not line.startswith(\"OBS:\"):\n continue\n buf.append(line)\n buf.append(\"END:\") \n n_obs = len([None for l in buf if l.startswith(\"OBS\")])\n logging.debug(\"Merging to %s with %d obs\" % (output_file, n_obs))\n for i, l in enumerate(buf):\n if l.startswith(\"NOBS:\"):\n buf[i] = \"NOBS: %d\\n\" % n_obs\n with open(output_file, \"w\") as output:\n for line in buf:\n output.write(line)\n for f in files:\n shutil.move(f, \"%s/%s\" % (dup_dir, os.path.basename(f)))\n\n # Update list\n list_file = \"%s/%s.LIST\" % (VOUT_TEXT, VOUT_TEXT)\n with open(list_file) as f:\n list_sn = [l for l in f]\n to_remove = [os.path.basename(filename) for sn in duplicates for filename in d[sn]]\n final_list = to_add + [f for f in list_sn if \"\".join(f.split()) not in to_remove]\n with open(list_file, \"w\") as f:\n f.writelines(final_list)\n logging.info(\"Updated list file %s\" % list_file) \n\n\n # For each pair of duplicates:\n # Get list of duplicates : filename & SNID\n # Merge \"OBS:\" lines into one file, keeping header of first file.\n # Update NOBS key-value and make sure that \"END:\" is at the end\n # Move duplicates into /DUPLICATES subDir\n \n\ndef add_vpec(versionInfo):\n\n # Created Jan 2018 by RK\n # call external script to read peculiar velocity correction \n # (VPEC) and its error (VPEC_ERR) from a fitres-formatted \n # text file, and append each data file header. \n # Use CLOBBER mode so that we don't have to move or copy \n # anything when it's done.\n # First argument is ./VERSION (instead of just VERSION)\n # so that the script will see a slash and use this \n # directory instead of searching under $SNDATA_ROOT/lcmerge.\n\n VPEC_FILE = versionInfo.VPEC_FILE\n\n if ( len(VPEC_FILE) > 1 ):\n print(f\"\\n Add VPEC and VPEC_ERR from {VPEC_FILE}\")\n VOUT_TEXT = versionInfo.VERSION_OUT_TEXT\n LOGFILE = \"ADD_VPEC.LOG\"\n cmd = \"update_data_files.pl ./%s %s 'VPEC VPEC_ERR' CLOBBER > %s \" % (VOUT_TEXT,VPEC_FILE,LOGFILE)\n os.system(cmd)\n # end add_vpec\n\n\ndef make_simlib(versionInfo):\n\n print(f\"\\n Create SIMLIB \")\n \n SOUT = versionInfo.SURVEY_OUT\n VOUT_TEXT = versionInfo.VERSION_OUT_TEXT\n# ZP_TEXT = versionInfo.ZPERR \n nmlFile = 'make_simlib_' + SOUT + '.nml'\n logFile = 'make_simlib_' + SOUT + '.log'\n simlibFile = versionInfo.simlibFile\n\n # open with 1 line buffer so that it is ready to run snana.exe\n PTR_NML = open(nmlFile,\"wt\",1)\n\n PTR_NML.write(f\" &SNLCINP\\n\")\n PTR_NML.write(f\" VERSION_PHOTOMETRY = '{VOUT_TEXT}' \\n\" )\n PTR_NML.write(f\" PRIVATE_DATA_PATH = './' \\n\")\n PTR_NML.write(f\" KCOR_FILE = '{versionInfo.kcor_outFile}' \\n\")\n\n# PTR_NML.write(\" SIMLIB_ZPERR_LIST = '%s' \\n\" % ZP_TEXT)\n \n \n PTR_NML.write(f\" SNTABLE_LIST = '' \\n\")\n PTR_NML.write(f\" SIMLIB_OUT = '{simlibFile}' \\n\" )\n \n PTR_NML.write(f\" \\n\")\n PTR_NML.write(f\" &END\\n\\n\")\n PTR_NML.flush\n PTR_NML.close\n\n cmd = \"snana.exe %s > %s\" % (nmlFile,logFile)\n os.system(cmd)\n\n \ndef convert2FITS(versionInfo):\n\n # convert TEXT format into FITS format\n S = versionInfo.SURVEY_OUT\n V_TEXT = versionInfo.VERSION_OUT_TEXT\n V_FITS = versionInfo.VERSION_OUT_FITS\n\n print(f\"\\n Convert TEXT formatted data files into FITS: \")\n print(f\"\\t {V_TEXT} -> {V_FITS}\\n\")\n \n # create new version directory\n if ( os.path.exists(V_FITS) ):\n shutil.rmtree(V_FITS)\n \n os.mkdir(V_FITS)\n\n # construct snana nmlFile\n nmlFile = \"convert2FITS_%s.nml\" % S\n NMLFILE = \"%s/%s\" % (V_FITS,nmlFile)\n logFile = \"convert2FITS_%s.log\" % S\n\n PTR_NML = open(NMLFILE,\"wt\",1)\n PTR_NML.write(\" &SNLCINP \\n\")\n PTR_NML.write(\" VERSION_PHOTOMETRY = '%s' \\n\" % V_TEXT )\n PTR_NML.write(\" PRIVATE_DATA_PATH = '%s' \\n\" % CWD )\n PTR_NML.write(\" VERSION_REFORMAT_FITS = '%s' \\n\" % V_FITS )\n PTR_NML.write(\" &END \\n\\n\")\n PTR_NML.flush\n PTR_NML.close\n cmd = \"cd %s ; snana.exe %s > %s \" % (V_FITS,nmlFile, logFile)\n os.system(cmd)\n\n # end convert2FITS\n \ndef printSummary(versionInfo):\n print(f\"\\n# =============================================== \")\n print(f\" Summary of outputs: \")\n\n V = versionInfo.VERSION_OUT_TEXT\n print(f\"\\t {V}/ (combined Data version)\" )\n\n V = versionInfo.VERSION_OUT_FITS\n print(f\"\\t {V}/ (combined Data version)\" )\n\n out_file = versionInfo.kcor_outFile\n print(f\"\\t {out_file} (combined kcor file)\" )\n\n simlib_file = versionInfo.simlibFile\n print(f\"\\t {simlib_file} (combined SIMLIB file)\" )\n \n # end printSummary\n\n# =========================\n# ======= MAIN ============\n# =========================\n\nif __name__ == \"__main__\":\n\n# parse input argument(s)\n if ( len(sys.argv) < 2 ):\n sys.exit(\"Must give INFILE arguent\\n-->ABORT\")\n else:\n INFILE = sys.argv[1]\n print(f\"Input file: {INFILE}\")\n\n print(f\" SNDATA_ROOT = {SNDATA_ROOT}\")\n\n versionInfo = VERSION_INFO(INFILE)\n\n # ----------------------------------------\n # prepare kcor file with all filters\n # ----------------------------------------\n kcorInfo = []\n nkcor=0\n for kcorFile in versionInfo.INFILE_KCOR:\n nkcor += 1\n kcorInfo.append(KCOR_INFO(kcorFile))\n \n print(f\"\\n Done parsing {nkcor} kcor-input files \")\n\n # combine multuple surveys with same FILTPATH (Dec 8 2020)\n combine_duplicate_filtpath(versionInfo,kcorInfo)\n\n # change filter char\n change_filterChar(versionInfo,kcorInfo)\n \n\n # write new combined kcor-input file\n write_kcor_inputFile(versionInfo,kcorInfo) \n run_kcor(versionInfo)\n \n # ---------------------------------------------------------\n # now re-write data files with different filter strings\n # ---------------------------------------------------------\n\n print(f\"\\n# - - - - - - - - - - - - - - - - - - - - - - \")\n \n # check for private data path\n if ( len(versionInfo.PRIVATE_DATA_PATH) > 0 ):\n TOPDIR_DATA = versionInfo.PRIVATE_DATA_PATH\n print(f\" Use PRIVATE_DATA_PATH = {TOPDIR_DATA}\")\n\n create_newVersion(versionInfo) \n nver=0\n for vname in versionInfo.NAME :\n print(f\" Swap filter strings in DATA-VERSION: {vname}\")\n add_newVersion(vname,versionInfo,kcorInfo[nver])\n nver += 1\n\n # -----------------\n # merge duplicate light curves from different instruments (Dec 2017)\n merge_duplicates(versionInfo)\n\n # --------------------\n # check option to add VPEC & VPEC_ERR (Jan 2018)\n add_vpec(versionInfo)\n \n # -----------------\n make_simlib(versionInfo)\n\n # -----------------\n convert2FITS(versionInfo)\n \n # --------------------\n # print summary of outputs\n printSummary(versionInfo)\n \n \n# ========= END MAIN ================\n\n","sub_path":"util/combine_dataVersions.py","file_name":"combine_dataVersions.py","file_ext":"py","file_size_in_byte":28275,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"457698193","text":"# # #########################################################################\n# ## This module file deals with content of the param files:\n# ## 1. create_top_matrix and create_left_matrix: based on the solution\n# ## board create instance's top matrix and left matrix;\n# ##\n# ## 2. create_content_of_inst_file: After getting top and left matrices\n# ## create a content of param files; The obtained content will be used\n# ## by the file_manager to create instance file.\n# ##\n# ## 3. transform_left_matrix, transform_top_matrix: moves clues of these\n# ## matrices from the top to down in the top matrix, and from left to\n# ## right of the left matrix. Also it removes the empty rows in\n# ## the top matrix and empty columns in the left matrix. As well as\n# ## it returns the number of all clues in respective matrix.\n# ##\n# ## 4. display_instance: is the function which generates display the any\n# ## instance of Nonogram on the traditional way so that it would be possible\n# ## to print it out and test with participants.\n# ##\n# ## 5. count_total_clues: this will just count the total number of clues of the\n# ## instance from both matrices: top and left. This is will be needed for\n# ## the evaluation part of the project.\n# ##\n# # ##########################################################################\n\n\nfrom matrix_processor import *\nimport matplotlib.pylab as plt\n\n\nPARAMS_TEMPLATE = \"\"\"\\\nlanguage ESSENCE' 1.0\nletting mainRow be {row_count}\nletting mainCol be {col_count}\nletting topRows be {top_row_count}\nletting leftCols be {left_col_count}\nletting leftMatrix be [\n{left_matrix_content}]\nletting topMatrix be [\n{top_matrix_content}\n]\"\"\"\n\n\ndef create_top_matrix(solution_board):\n # amount of rows:\n topRows = len(solution_board)\n topRows = topRows / 2 if topRows % 2 == 0 else topRows / 2 + 1\n\n # amount of cols: len(solution_board[0])\n topMatrix = [[0 for x in range(len(solution_board[0]))] for y in range(topRows)]\n\n for i in range(0, len(solution_board[0])): # columns\n counter = 0\n index = 0\n for j in range(0, len(solution_board)):\n if solution_board[j][i] == 1:\n counter += 1\n if topMatrix[index][i] != 0 and j > 0 and solution_board[j - 1][i] == 0 and index < topRows:\n index += 1\n topMatrix[index][i] = counter\n else:\n counter = 0\n if index > 0 and topMatrix[index][i] > 0:\n index += 1\n return topMatrix\n\n\ndef create_left_matrix(solution_board):\n # amount of columns:\n leftCols = len(solution_board[0])\n leftCols = leftCols / 2 if leftCols % 2 == 0 else leftCols / 2 + 1\n\n # amount of rows: len(solution_board)\n leftMatrix = [[0 for x in range(leftCols)] for y in range(len(solution_board))]\n\n for i in range(0, len(solution_board)):\n counter = 0\n index = 0\n for j in range(0, len(solution_board[0])):\n if solution_board[i][j] == 1:\n counter += 1\n if leftMatrix[i][index] != 0 and j > 0 and solution_board[i][j - 1] == 0 and index < leftCols:\n index += 1\n leftMatrix[i][index] = counter\n else:\n counter = 0\n if(index > 0 and leftMatrix[i][index] > 0):\n index += 1\n return leftMatrix\n\n\ndef create_content_of_inst_file(solution_board):\n left_matrix = create_left_matrix(solution_board)\n top_matrix = create_top_matrix(solution_board)\n left_matrix_content = convert_matrix_to_s(left_matrix)\n top_matrix_content = convert_matrix_to_s(top_matrix)\n return PARAMS_TEMPLATE.format(\n left_matrix_content=left_matrix_content,\n top_matrix_content=top_matrix_content,\n row_count=len(solution_board),\n col_count=len(solution_board[0]),\n top_row_count=len(top_matrix),\n left_col_count=len(left_matrix[0]),\n )\n\n\ndef transform_left_matrix(solution_board):\n left_matrix = create_left_matrix(solution_board)\n\n temp_matrix = [[0 for x in range(len(left_matrix[0]))]for y in range(len(left_matrix))]\n max_len = 0\n total_amount_digits = 0\n for row in range(0, len(left_matrix)):\n temp_matrix[row] = filter(lambda x: x != 0, left_matrix[row])\n # this is needed to truncate the matrix from empty columns\n if max_len < len(temp_matrix[row]):\n max_len = len(temp_matrix[row])\n\n transformed_left_matrix = [[0 for x in range(max_len)]for y in range(len(left_matrix))]\n for row in range(0, len(temp_matrix)):\n for col in range(0, len(temp_matrix[row])):\n transformed_left_matrix[row][len(transformed_left_matrix[row]) - (1 + col)] = temp_matrix[row][len(temp_matrix[row]) - (1 + col)]\n total_amount_digits += 1\n return transformed_left_matrix, total_amount_digits\n\n\ndef transform_top_matrix(solution_board):\n top_matrix = create_top_matrix(solution_board)\n\n temp_converted_1 = [[0 for x in range(len(top_matrix))]for y in range(len(top_matrix[0]))]\n temp_converted_2 = [[0 for x in range(len(top_matrix))]for y in range(len(top_matrix[0]))]\n max_len = 0\n total_amount_digits = 0\n\n for col in range(0, len(top_matrix[0])):\n for row in range(0, len(top_matrix)):\n temp_converted_1[col][row] = top_matrix[row][col]\n\n for row in range(0, len(temp_converted_1)):\n for col in range(0, len(temp_converted_1[0])):\n temp_converted_2[row] = filter(lambda x: x != 0, temp_converted_1[row])\n # cleaning the top matrix from row which are completely emty\n if max_len < len(temp_converted_2[row]):\n max_len = len(temp_converted_2[row])\n\n transformed_top_matrix = [[0 for x in range(len(top_matrix[0]))]for y in range(max_len)]\n for col in range(0, len(temp_converted_2)):\n for row in range(0, len(temp_converted_2[col])):\n transformed_top_matrix[len(transformed_top_matrix) - (1 + row)][col] = temp_converted_2[col][len(temp_converted_2[col]) - (1 + row)]\n total_amount_digits += 1\n return transformed_top_matrix, total_amount_digits\n\n\ndef display_instance(solution_board, title):\n left_matrix, num_clues_cols = transform_left_matrix(solution_board)\n top_matrix, num_clues_rows = transform_top_matrix(solution_board)\n # print(num_clues_cols)\n # print(num_clues_rows)\n\n # replacing all zeros with empty string\n # print_matrix(left_matrix)\n # print_matrix(top_matrix)\n # print_matrix(solution_board)\n for i in range(0, len(left_matrix)):\n for j in range(0, len(left_matrix[0])):\n if left_matrix[i][j] == 0:\n left_matrix[i][j] = ''\n\n for i in range(0, len(top_matrix)):\n for j in range(0, len(top_matrix[0])):\n if top_matrix[i][j] == 0:\n top_matrix[i][j] = ''\n empty_sol_board = [[''] * len(solution_board[0])] * len(solution_board)\n w = 10\n h = 10\n fig = plt.figure(figsize=(w, h))\n plt.axis('off')\n # for bbox = [x, y, width, height]\n # left matrix's:\n x0 = 0.1\n y0 = 0.1\n width0 = 0.025 * len(left_matrix[0])\n height0 = 0.025 * len(left_matrix)\n # top matrix's:\n x1 = 0.1 + 0.025 * len(left_matrix[0])\n y1 = 0.1 + 0.025 * len(left_matrix)\n width1 = 0.025 * len(top_matrix[0])\n height1 = 0.025 * len(top_matrix)\n # solution board matrix's:\n x3 = 0.1 + 0.025 * len(left_matrix[0])\n y3 = 0.1\n width3 = 0.025 * len(empty_sol_board[0])\n height3 = 0.025 * len(empty_sol_board)\n\n plt.table(cellText=left_matrix, cellLoc='center', bbox=[x0, y0, width0, height0])\n plt.table(cellText=top_matrix, cellLoc='center', bbox=[x1, y1, width1, height1])\n plt.table(cellText=empty_sol_board, bbox=[x3, y3, width3, height3])\n\n plt.table(cellText=[['']], bbox=[x1, y1, 0.025 * len(top_matrix[0]), 0.001])\n plt.table(cellText=[['']], bbox=[x3, y3, 0.002, 0.025 * len(left_matrix)])\n plt.title(title)\n plt.show()\n\n\ndef count_total_clues(solution_board):\n left_matrix, num_clues_cols = transform_left_matrix(solution_board)\n top_matrix, num_clues_rows = transform_top_matrix(solution_board)\n total_num_clues = num_clues_cols + num_clues_rows\n return total_num_clues\n","sub_path":"software_picross_solver/PicrossGeneratorSolver/instance_generator.py","file_name":"instance_generator.py","file_ext":"py","file_size_in_byte":8283,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"495468559","text":"import os\nimport textwrap\nimport config\nfrom newspaper import Article\n\n\nclass NewsGrabber:\n def parse(self, url):\n article = Article(url)\n article.download()\n article.parse()\n\n self.save(url, article.title, article.top_image, article.text)\n print('Result: {}'.format(article.title))\n\n def save(self, url, title, image, text):\n paragraphs = []\n\n for p in text.split('\\n'):\n paragraphs.append(textwrap.fill(p, width=config.text_width))\n\n file_path = self.get_file_path(url)\n file_title = textwrap.fill(title, width=config.text_width)\n file_content = '{}\\n\\n[{}]\\n\\n{}'.format(file_title, image, '\\n'.join(paragraphs))\n\n with open(file_path, 'wt') as out_file:\n out_file.write(file_content)\n\n def get_file_path(self, url):\n full_url = url.split('//')[1]\n path_elements = full_url.split('/')\n path_folders = path_elements[:-1]\n path_file = path_elements[-1:][0]\n\n folders = os.path.join('data', *path_folders)\n os.makedirs(folders, exist_ok=True)\n\n full_file_path = os.path.join(folders, self.get_file_name(path_file))\n\n return full_file_path\n\n def get_file_name(self, file_name):\n if '=' in file_name:\n path_elements = file_name.split('=')\n file = '{}.txt'.format(path_elements[1])\n elif '.' in file_name:\n path_elements = file_name.split('.')\n file = '{}.txt'.format(path_elements[0])\n else:\n file = '{}.txt'.format(file_name)\n\n return file\n","sub_path":"ngrab.py","file_name":"ngrab.py","file_ext":"py","file_size_in_byte":1589,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"623527772","text":"#------------------------------------------------------------------------------\n# Copyright (c) 2013, Nucleic Development Team.\n#\n# Distributed under the terms of the Modified BSD License.\n#\n# The full license is in the file COPYING.txt, distributed with this software.\n#------------------------------------------------------------------------------\nfrom PyQt4.QtCore import Qt, QPoint\n\n\nclass DockWindowResizer(object):\n \"\"\" A class to assist resizing top-level dock windows.\n\n \"\"\"\n #: Do not resize the window.\n NoResize = 0\n\n #: Resize the window vertically from the north edge.\n North = 1\n\n #: Resize the window horizontally from the east edge.\n East = 2\n\n #: Resize the window vertically from the south edge.\n South = 3\n\n #: Resize the window horizontally from the west edge.\n West = 4\n\n #: Resize the window diagonally from the northeast edge.\n NorthEast = 5\n\n #: Resize the window diagonally from the northwest edge.\n NorthWest = 6\n\n #: Resize the window diagonally from the southeast edge.\n SouthEast = 7\n\n #: Resize the window diagonally from the southwest edge.\n SouthWest = 8\n\n #: The cursors to use for a given resize mode.\n Cursors = {\n North: Qt.SizeVerCursor,\n South: Qt.SizeVerCursor,\n East: Qt.SizeHorCursor,\n West: Qt.SizeHorCursor,\n NorthEast: Qt.SizeBDiagCursor,\n SouthWest: Qt.SizeBDiagCursor,\n NorthWest: Qt.SizeFDiagCursor,\n SouthEast: Qt.SizeFDiagCursor,\n }\n\n @classmethod\n def cursor(cls, mode):\n \"\"\" Get the cursor style to show for a given mode.\n\n Parameters\n ----------\n mode : int\n One of the resize mode enum values.\n\n Returns\n -------\n result : Qt.CursorShape or None\n The cursor shape to use for the mode, or None if the\n cursor should be unset.\n\n \"\"\"\n return cls.Cursors.get(mode)\n\n @classmethod\n def hit_test(cls, window, pos, extra):\n \"\"\" Hit test a window for the resize mode.\n\n Hit testing is confined to the contents margins of the window.\n\n Parameters\n ----------\n window : QWidget\n The top-level dock window to test for resize area.\n\n pos : QPoint\n The point of interest, expressed in local coordinates.\n\n extra : int\n Extra space to add to the hit test for corners. The\n default is 0.\n\n Returns\n -------\n result : tuple\n A 2-tuple of (int, QPoint) representing the resize mode\n and offset for the mode.\n\n \"\"\"\n x = pos.x()\n y = pos.y()\n width = window.width()\n height = window.height()\n m = window.contentsMargins()\n if x < m.left():\n if y < m.top() + extra:\n mode = cls.NorthWest\n offset = QPoint(x, y)\n elif y > height - (m.bottom() + extra):\n mode = cls.SouthWest\n offset = QPoint(x, height - y)\n else:\n mode = cls.West\n offset = QPoint(x, 0)\n elif y < m.top():\n if x < m.left() + extra:\n mode = cls.NorthWest\n offset = QPoint(x, y)\n elif x > width - (m.right() + extra):\n mode = cls.NorthEast\n offset = QPoint(width - x, y)\n else:\n mode = cls.North\n offset = QPoint(0, y)\n elif x > width - m.right():\n if y < m.top() + extra:\n mode = cls.NorthEast\n offset = QPoint(width - x, y)\n elif y > height - (m.bottom() + extra):\n mode = cls.SouthEast\n offset = QPoint(width - x, height - y)\n else:\n mode = cls.East\n offset = QPoint(width - x, 0)\n elif y > height - m.bottom():\n if x < m.left() + extra:\n mode = cls.SouthWest\n offset = QPoint(x, height - y)\n elif x > width - (m.right() + extra):\n mode = cls.SouthEast\n offset = QPoint(width - x, height - y)\n else:\n mode = cls.South\n offset = QPoint(0, height - y)\n else:\n mode = cls.NoResize\n offset = QPoint()\n return mode, offset\n\n @staticmethod\n def resize(window, pos, mode, offset):\n \"\"\" Reset the window for the given state.\n\n Parameters\n ----------\n window : QWidget\n The top-level dock window to resize.\n\n pos : QPoint\n The position of the mouse, in local coordinates.\n\n mode : int\n The resize mode to apply during the resize. This should be\n the first value returned from the borderTest() method.\n\n offset : QPoint\n The offset of the mouse press at the border. This should be\n the second value returned from the borderTest() method.\n\n \"\"\"\n handler = _RESIZE_HANDLERS.get(mode)\n if handler is not None:\n handler(window, pos, offset)\n\n\ndef _resize_north(widget, pos, offset):\n \"\"\" A resize handler for north resizing.\n\n \"\"\"\n dh = pos.y() - offset.y()\n height = widget.height()\n min_height = widget.minimumSizeHint().height()\n if height - dh < min_height:\n dh = height - min_height\n rect = widget.geometry()\n rect.setY(rect.y() + dh)\n widget.setGeometry(rect)\n\n\ndef _resize_south(widget, pos, offset):\n \"\"\" A resize handler for south resizing.\n\n \"\"\"\n dh = pos.y() - widget.height() + offset.y()\n size = widget.size()\n size.setHeight(size.height() + dh)\n widget.resize(size)\n\n\ndef _resize_east(widget, pos, offset):\n \"\"\" A resize handler for east resizing.\n\n \"\"\"\n dw = pos.x() - widget.width() + offset.x()\n size = widget.size()\n size.setWidth(size.width() + dw)\n widget.resize(size)\n\n\ndef _resize_west(widget, pos, offset):\n \"\"\" A resize handler for west resizing.\n\n \"\"\"\n dw = pos.x() - offset.x()\n width = widget.width()\n min_width = widget.minimumSizeHint().width()\n if width - dw < min_width:\n dw = width - min_width\n rect = widget.geometry()\n rect.setX(rect.x() + dw)\n widget.setGeometry(rect)\n\n\ndef _resize_northeast(widget, pos, offset):\n \"\"\" A resize handler for northeast resizing.\n\n \"\"\"\n dw = pos.x() - widget.width() + offset.x()\n dh = pos.y() - offset.y()\n size = widget.size()\n min_size = widget.minimumSizeHint()\n if size.height() - dh < min_size.height():\n dh = size.height() - min_size.height()\n rect = widget.geometry()\n rect.setWidth(rect.width() + dw)\n rect.setY(rect.y() + dh)\n widget.setGeometry(rect)\n\n\ndef _resize_northwest(widget, pos, offset):\n \"\"\" A resize handler for northwest resizing.\n\n \"\"\"\n dw = pos.x() - offset.x()\n dh = pos.y() - offset.y()\n size = widget.size()\n min_size = widget.minimumSizeHint()\n if size.width() - dw < min_size.width():\n dw = size.width() - min_size.width()\n if size.height() - dh < min_size.height():\n dh = size.height() - min_size.height()\n rect = widget.geometry()\n rect.setX(rect.x() + dw)\n rect.setY(rect.y() + dh)\n widget.setGeometry(rect)\n\n\ndef _resize_southwest(widget, pos, offset):\n \"\"\" A resize handler for southwest resizing.\n\n \"\"\"\n dw = pos.x() - offset.x()\n dh = pos.y() - widget.height() + offset.y()\n size = widget.size()\n min_size = widget.minimumSizeHint()\n if size.width() - dw < min_size.width():\n dw = size.width() - min_size.width()\n rect = widget.geometry()\n rect.setX(rect.x() + dw)\n rect.setHeight(rect.height() + dh)\n widget.setGeometry(rect)\n\n\ndef _resize_southeast(widget, pos, offset):\n \"\"\" A resize handler for southeast resizing.\n\n \"\"\"\n dw = pos.x() - widget.width() + offset.x()\n dh = pos.y() - widget.height() + offset.y()\n size = widget.size()\n size.setWidth(size.width() + dw)\n size.setHeight(size.height() + dh)\n widget.resize(size)\n\n\n_RESIZE_HANDLERS = {\n DockWindowResizer.North: _resize_north,\n DockWindowResizer.South: _resize_south,\n DockWindowResizer.East: _resize_east,\n DockWindowResizer.West: _resize_west,\n DockWindowResizer.NorthEast: _resize_northeast,\n DockWindowResizer.SouthWest: _resize_southwest,\n DockWindowResizer.NorthWest: _resize_northwest,\n DockWindowResizer.SouthEast: _resize_southeast,\n}\n","sub_path":"enaml/qt/docking/dock_window_resizer.py","file_name":"dock_window_resizer.py","file_ext":"py","file_size_in_byte":8499,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"203751765","text":"from flask import Blueprint, render_template, redirect, url_for, request\nfrom util import db_write, get_posts\nfrom localStoragePy import localStoragePy\n\nuser_post = Blueprint(\"user_post\", __name__)\n\nlocalStorage = localStoragePy('flask_arch', 'json')\n\n@user_post.route(\"/upload_post\", methods=[\"GET\", \"POST\"])\ndef upload():\n user_email = localStorage.getItem(\"token\")\n if user_email == None:\n return redirect(url_for('authentication.login_user'))\n if request.method == 'POST':\n post = request.form[\"post\"]\n if db_write([user_email, post], \"posts\"):\n return redirect(url_for('user_post.view'))\n else:\n return render_template('post/upload_post.html', msg = \"Upload Failed!\", user = user_email)\n return render_template('post/upload_post.html', user = user_email)\n\n@user_post.route(\"/view_post\", methods=[\"GET\"])\ndef view():\n user_email = localStorage.getItem(\"token\")\n print(user_email)\n if user_email == None:\n return redirect(url_for('authentication.login_user'))\n data = get_posts()\n return render_template('post/view_post.html', data = data, user = user_email)\n","sub_path":"Second Approach/app/controllers/user_post.py","file_name":"user_post.py","file_ext":"py","file_size_in_byte":1148,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"376303231","text":"# pylint: disable=C0103\nfrom lxml import etree\nfrom django.conf import settings\nfrom django.db import transaction\nfrom django.template import Context\nfrom django.utils.safestring import mark_safe\nimport os\nimport logging\n\nlogger = logging.getLogger('translate') # pylint: disable=C0103\n\n# pylint: disable=F0401\nfrom apps.present.models import Tmplt, Site, Layout, Page, Block_Type, \\\n BlockMap, Content\nfrom apps.translate.image import parse_images\nfrom apps.translate.translators import Translator\nfrom apps.translate.form import Form\nfrom apps.translate.rich_text_format import Rtf\nfrom apps.translate.utils import get_template\n\n#\n# Too many public methods. This is likely a good catch on the part of\n# pylint. XXX\nclass Parser: # pylint: disable=R0904\n \"\"\"\n Parses a microsite definition.\n\n Given a microsite XML tree the XML document will be parsed and a new site added\n to the database. If there is an old site then it will be deactivated.\n \"\"\"\n site = None # Object\n main_page = None # Object representing **ALL*\n old_site = None # Object\n menu = None # dictionary structure holding the main menu of a site.\n images = None # dictionary of image objects\n trans = None # optimization, get only one instance of the Translator\n\n def __init__(self):\n self.trans = Translator()\n\n def get_template(self,name):\n \"\"\"\n caching template loader wrapper\n\n Args:\n name: The name of the template we want\n\n Returns:\n The template ready to be used or throws loader error\n \"\"\"\n return get_template('piece',name)\n\n def do_site(self, e):\n \"\"\"\n create a site object\n\n \n version -- required, uniquely identifies which version of the site this is.\n name -- required, the domain name is used in presentation layer to determine\n which site to present.\n\n return -- site and old_site are populated and ready to use.\n \"\"\"\n try:\n self.old_site = Site.objects.get(site=e.attrib['name'],\n version=e.attrib['version'])\n except Site.DoesNotExist:\n pass\n\n self.site = Site(site=e.attrib['name'],\n version = e.attrib['version'],\n is_active=False,\n )\n #\n # When the site is fully populated this site object should be made \n # active and the old site object is deactivated.\n #\n\n def do_name(self, e):\n \"\"\"\n Set the name of the site\n\n Site Name\n Text is used to set the company name.\n \"\"\"\n if not self.site:\n raise ValueError('found name element before site element')\n self.site.company = e.text\n\n def do_template(self, e):\n \"\"\"\n Set the site template\n \n