diff --git "a/6436.jsonl" "b/6436.jsonl" new file mode 100644--- /dev/null +++ "b/6436.jsonl" @@ -0,0 +1,648 @@ +{"seq_id":"86051712","text":"from django.conf.urls import url, include\nfrom django.views.generic import TemplateView\nfrom views import add_group, get_group\nfrom django.contrib import admin\n\nfrom views import add_group, get_group, get_tasks, add_tasks, add_collect, get_attence, get_persons, add_attence, \\\n get_collectable_group,get_group_collect_start_date,get_group_attence_date,get_attenceable_group\n\nurlpatterns = [\n url(r\"^$\", TemplateView.as_view(template_name=\"homepage.html\"), name=\"home\"),\n url(r\"^addgroup$\", add_group),\n url(r\"^getgroups$\", get_group),\n url(r'^admin/', include(admin.site.urls)),\n url(r\"^getgroups$\", get_group),\n url(r\"^addtasks$\", add_tasks),\n url(r\"^gettasks$\", get_tasks),\n url(r\"^addcollect$\", add_collect),\n url(r\"^addattence$\", add_attence),\n url(r\"^getattence$\", get_attence),\n url(r\"^getpersons$\", get_persons),\n url(r\"^get_collectable_group$\", get_collectable_group),\n url(r\"^get_group_collect_start_date$\",get_group_collect_start_date),\n url(r\"^get_group_attence_date$\",get_group_attence_date),\n url(r\"^get_attenceable_group$\",get_attenceable_group),\n\n]\n","sub_path":"collect_attence/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1112,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"90548214","text":"# Copyright (C) 2008 Distance and e-Learning Centre, \n# University of Southern Queensland\n#\n# This program is free software; you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation; either version 2 of the License, or\n# (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with this program; if not, write to the Free Software\n# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA\n#\n\n\nclass QueryTokenizer(object):\n def __init__(self, queryText):\n self.__tokens = []\n # states = None, or a bracket state of \"[\" or \"(\" or \"{\" or '\"'\n self.__chars = list(queryText)\n self.__chars.append(None)\n self.__process()\n \n @property\n def tokens(self):\n return self.__tokens\n \n \n def extractTag(self, tagName):\n tagTokens = []\n if not tagName.endswith(\":\"):\n tagName += \":\"\n for token in self.__tokens:\n if token.find(tagName)!=-1:\n tagTokens.append(token)\n for tt in list(tagTokens):\n i = self.__tokens.index(tt)\n x = tagTokens.index(tt)\n if tt.endswith(\":\"):\n try:\n t = self.__tokens.pop(i+1)\n tagTokens[x] += t\n except: pass\n if i>0:\n if self.__tokens[i-1]==\"+\":\n self.__tokens.pop(i-1)\n tagTokens[x] = \"+\" + tagTokens[x]\n self.__tokens = [t for t in self.__tokens if t.find(tagName)==-1]\n return tagTokens\n \n \n #### Private methods ####\n def __getNextChar(self):\n chars = self.__chars\n if chars[0] is None:\n return None\n char = chars.pop(0)\n if char==\"&\":\n if chars[0]==\"&\":\n char += chars.pop(0)\n elif char==\"|\":\n if chars[0]==\"|\":\n char += chars.pop(0)\n elif char.isspace():\n char = \" \"\n while(chars[0] is not None and chars[0].isspace()):\n chars.pop(0)\n elif char==\"\\\\\":\n char += chars.pop(0)\n return char\n \n \n def __process(self):\n while True:\n token = self.__getNextToken()\n if token is None:\n break\n self.__tokens.append(token)\n \n \n def __getNextToken(self, endChar=None):\n chars = self.__chars\n sepChars = [\"+\", \"-\", \"!\", \"&&\", \"||\", \" \", None]\n openBrkChars = ['\"', \"(\", \"{\", \"[\"]\n closeBrkChars = ['\"', \")\", \"}\", \"]\"]\n specialChars = sepChars + openBrkChars + closeBrkChars\n token = self.__getNextChar()\n if token in sepChars:\n return token\n #if token in closeBrkChars:\n # return token\n if token==endChar:\n return token\n if token in openBrkChars:\n closingChar = closeBrkChars[openBrkChars.index(token)]\n while True:\n t = self.__getNextToken(closingChar)\n if t is None:\n break\n token += t\n if t==closingChar:\n break\n return token\n while True:\n char = self.__getNextChar()\n if char in specialChars:\n if char is None:\n break\n # put it back\n chars.insert(0, char)\n break\n token += char\n return token\n\n\n\n","sub_path":"apps/ice/plugins/indexer/queryTokenizer.py","file_name":"queryTokenizer.py","file_ext":"py","file_size_in_byte":3869,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"392701717","text":"from flask import Flask, jsonify, request\nfrom flask_restful import Resource, Api\nfrom database_connect import ConnectDB\n\nimport json, re, requests\n\"\"\"\n\n\tEndpoints : \n\t\tGET method - /performance/listener/{mediaIP}\n\t\tPOST method - /performance/listener\n\tGet method : Takes mediaIP as the last name in endpoint\n Returns current data from local instance or if local instance is empty then it returns lastest entry in db for a given mediaIP \n\n\"\"\"\nclass Performance(Resource):\n #Performance endpoint to collect all media server performance parameter\n #get performance details for a particular machine\n response = {}\n liveInfo = {}\n def get(self,mediaIP):\n connectDB = ConnectDB()\n if mediaIP in Performance.liveInfo:\n return json.dumps(Performance.liveInfo[mediaIP])\n data = connectDB.getPerformanceData(mediaIP) \n return data\n\n def post(self):\n per_data = request.get_json(force=True)\n media = \"\"\n timeEntry = \"\"\n record = \"\"\n data = per_data['componentList'][0]['properties']\n if not data:\n return \"Empty data received\"\n for ip in data:\n media = ip\n for timeStamp in data[media]:\n record = data[media][timeStamp]\n timeEntry = timeStamp\n Performance.liveInfo[media] = []\n Performance.liveInfo[media].append({\"record\" : record, \"timestamp\": timeEntry, \"media\": media})\n connectDB = ConnectDB()\n connectDB.insertData(media, timeEntry, record)\n return data\n","sub_path":"DevAppInsight/performance_collector.py","file_name":"performance_collector.py","file_ext":"py","file_size_in_byte":1560,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"292302521","text":"import pickle\n\nfrom functools4 import lru_cache\nfrom cacheData2 import CacheData\n \n \ndef checkCache(key):\n result=fib.checkCache(key);\n return result;\n \ndef writeToCache(key,url):\n fib(key,url)\n\n\n\n# open a file, where you stored the pickled data\nfile = open('important2', 'rb')\n\n# dump information to that file\ncacheData = pickle.load(file)\n\n# close the file\nfile.close()\n\n#fib.set_cache_dictionary(result)\n\n#print(fib.cache_info())\n\n\nfib.set_cache_dictionary(cacheData.cache,cacheData.root,cacheData.hit,cacheData.full)\n\n\ncache=fib.get_cache_dictionary()\n\nfor x in range(54,65):\n result=checkCache(x)\n if result is None:\n print(\"Writing to Cache now\")\n writeToCache(x,chr(x))\n else:\n print(\"hit---\"+str(result) )\n\nprint(\"Stage 2\")\nfor x in range(65,80):\n result=checkCache(x)\n if result is None:\n print(\"Writing to Cache now\")\n writeToCache(x,chr(x))\n else:\n print(\"hit---\"+str(result) )\n\n\nprint(\"Stage 3\")\nfor x in range(54,80):\n result=checkCache(x)\n if result is None:\n print(\"Writing to Cache now\")\n writeToCache(x,chr(x))\n else:\n print(\"hit---\"+str(result) )\n \n\n#for x in range(16):\n# fib(x)\n#print(fib.cache_info())\n","sub_path":"map-reduce with caching on HDFS/cacheTest2.py","file_name":"cacheTest2.py","file_ext":"py","file_size_in_byte":1247,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"594737749","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\n\nimport dash_core_components as dcc\nimport dash_html_components as html\nfrom dash.dependencies import Input, Output\nfrom app import server\n\n\nfrom app import app\nfrom layouts3 import layout3,layout4,layout5\n\nimport callbacks\n\napp.layout = html.Div([\n dcc.Location(id='url', refresh=False),\n html.Div(id='page-content')\n])\n\n@app.callback(Output('page-content', 'children'),\n [Input('url', 'pathname')])\ndef display_page(pathname):\n if pathname == '/apps/year':\n return layout3\n elif pathname == '/apps/transmission':\n return layout4\n elif pathname =='/apps/estimateur':\n return layout5\n else:\n return '404'\n\nif __name__ == '__main__':\n app.run_server(debug=True)\n\n","sub_path":"index.py","file_name":"index.py","file_ext":"py","file_size_in_byte":772,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"441995083","text":"from flask import Flask, render_template, request\n\nfrom mrc.utils import classify, load, partial_fit\nfrom mrc.review_db import ReviewDB\nfrom mrc.review_form import ReviewForm\n\nfrom pathlib import Path\n\n\ncur_dir = Path(__file__).parent\napp = Flask(__name__, template_folder=cur_dir / 'templates')\n\nclf = load(cur_dir / 'data' / 'pkl_objects' / 'clf.pkl')\ndb = ReviewDB(cur_dir / 'data' / 'reviews.sqlite')\n\n\n@app.route('/')\ndef index():\n form = ReviewForm(request.form)\n\n return render_template('review_form.html', form=form)\n\n\n@app.route('/results', methods=['POST'])\ndef results():\n form = ReviewForm(request.form)\n\n if request.method == 'POST' and form.validate():\n review = request.form['movie_review']\n\n label, proba = classify(clf, None, review)\n\n return render_template('results.html', content=review, prediction=label, probability=round(proba * 100, 2))\n\n return render_template('review_form.html', form=form)\n\n\n@app.route('/thanks', methods=['POST'])\ndef feedback():\n fb = request.form['feedback_button']\n review = request.form['review']\n prediction = request.form['prediction']\n\n inv_labels = {'negative': 0, 'positive': 1}\n y = inv_labels[prediction]\n\n if fb == 'Неправильно':\n y = int(not(y))\n\n partial_fit(clf, None, review, y)\n db.insert_entry(review, y)\n\n return render_template('thanks.html')\n\n\nif __name__ == '__main__':\n app.run(debug=True)\n","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1442,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"357635907","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Sat Jun 13 13:37:01 2020\r\n\r\n@author: 赵\r\n\"\"\"\r\n\r\nimport numpy as np\r\na=np.arange(2,102,2)\r\nprint(a)\r\ns=np.sum(a)\r\nprint(s)","sub_path":"作业1.py","file_name":"作业1.py","file_ext":"py","file_size_in_byte":163,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"631349901","text":"# -*- encoding: utf-8 -*-\nimport time\nfrom openerp.tools.translate import _\nfrom openerp.osv import orm, fields\nimport logging\n_logger = logging.getLogger(__name__)\n\nclass od_asset_report(orm.TransientModel):\n _name = 'od.asset.report' \n _description = 'Asset Report'\n _columns = {\n 'company_id': fields.many2one('res.company', 'Company',readonly=True),\n 'category_ids': fields.many2many('account.asset.category', 'account_asset_report_rel','category_id','report_id','Asset Category'),\n 'period_from': fields.many2one('account.period', 'Start Period',required=True,domain=\"[('company_id','=',company_id)]\"),\n 'period_to': fields.many2one('account.period', 'End Period',required=True,domain=\"[('company_id','=',company_id)]\"),\n# 'state': fields.selection([('draft','Draft'),('open','Running'),('close','Close')], 'Status', required=True)\n } \n _defaults={\n 'company_id': lambda s,cr,uid,c: s.pool.get('res.company')._company_default_get(cr, uid, 'res.partner', context=c),\n }\n\n def pre_print_report(self, cr, uid, ids, data, context=None):\n if context is None:\n context = {}\n #data['form'].update(self.read(cr, uid, ids, ['display_account'], context=context)[0])\n return data\n\n def build_filter(self,cr,uid,ids,context=None):\n data = self.read(cr, uid, ids,['category_ids','company_id','period_from','period_to'])[0]\n \n if not data.get('category_ids'):\n data['category_ids'] = self.pool['account.asset.category'].search(cr,uid,[('company_id','=',data.get('company_id')[0])])\n if data.get('period_from') or data.get('period_to'):\n acc_period = self.pool['account.period']\n period_from = data.get('period_from') and data.get('period_from')[0] or False\n period_to = data.get('period_to') and data.get('period_to')[0] or False\n if not period_from:\n period_from = acc_period.search(cr,uid,[('company_id','=',data.get('company_id')[0])])\n period_from.sort()\n period_from = period_from[0]\n if not period_to:\n period_to = acc_period.search(cr,uid,[('company_id','=',data.get('company_id')[0])])\n period_to.sort(reverse=True)\n period_to=period_to[0]\n acc_periods = acc_period.search(cr,uid,[('company_id','=',data.get('company_id')[0]),('id','<',period_from)])\n data['accum_periods'] = acc_periods or []\n data['period_ids'] = acc_period.build_ctx_periods(cr,uid,period_from,period_to)\n return data\n\n def print_report(self, cr, uid, ids, context=None):\n if context is None:\n context = {}\n data = self.build_filter(cr,uid,ids,context=context)\n return self.pool['report'].get_action(cr, uid, [], 'orchid_asset.report_asset_statement', data=data, context=context)\n\n","sub_path":"orchid_asset/wizard/asset_report.py","file_name":"asset_report.py","file_ext":"py","file_size_in_byte":2910,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"459690032","text":"from Clase5.app.EmployeeMod import Empleado\r\nfrom Clase5.app.ExecutiveMod import Ejecutivo\r\n\r\n\r\ndef agregarpersonal(nom, sal, tipo):\r\n if tipo == \"Ej\":\r\n Ex.append(Ejecutivo(nom, sal))\r\n elif tipo == \"Emp\":\r\n Em.append(Empleado(nom, sal))\r\n else:\r\n return 0\r\n\r\n\r\nEx = []\r\nEm = []\r\n\r\nif __name__ == \"__main__\":\r\n\r\n banderin = True\r\n\r\n while banderin:\r\n try:\r\n a = int(input(\"Elegir una opción: \\n1.Agregar empleado - 2.Ver empleado - 3.Salir\\n\\n\"))\r\n banderin = False\r\n except: ValueError: print(\"No fue una opcion valida introducida, intentar de nuevo...\")\r\n\r\n else:\r\n\r\n if a == 1:\r\n try:\r\n x = int(input(\"Elegir una categoria de trabajador: --> 1.Ejecutivo - 2.Empleado\\n\\n\"))\r\n except: ValueError: print(\"No fue una opcion valida introducida, intentar de nuevo...\")\r\n else:\r\n\r\n if x == 1:\r\n\r\n NomEj = str(input(\"Ingresar nombre del ejecutivo: \"))\r\n try:\r\n SalEj = float(input(\"Ingresar salario: \"))\r\n except: ValueError: print(\"Salario no valido, intentar nuevamente\")\r\n else:\r\n\r\n agregarpersonal(NomEj, SalEj, \"Ej\")\r\n\r\n elif x == 2:\r\n\r\n NomEm = str(input(\"Ingresar nombre del empleado: \"))\r\n try:\r\n SalEm = float(input(\"Ingresar salario: \"))\r\n except: ValueError: print(\"Salario no valido, intentar nuevamente\")\r\n else:\r\n\r\n agregarpersonal(NomEm, SalEm, \"Emp\")\r\n\r\n banderin = True\r\n\r\n elif a == 2:\r\n\r\n if len(Ex) > 0 or len(Em) > 0:\r\n\r\n for i in range(0,len(Ex)):\r\n print(\"No hay ejecutivos\\n\") if Ex == \"\" else Ex[i].mostrar_empleado()\r\n\r\n for j in range(0,len(Em)):\r\n print(\"No hay empleados\\n\") if Em == \"\" else Em[j].mostrar_empleado()\r\n\r\n Em[0].mostrar_conteo()\r\n\r\n else:\r\n\r\n print(\"No hay personal registrado, favor de agregar personal\")\r\n\r\n\r\n banderin = True\r\n\r\n elif a == 3:\r\n\r\n print(\"Adios\")\r\n banderin = False\r\n\r\n else:\r\n print(\"No es una opcion valida, intentar de nuevo\")\r\n banderin = True\r\n\r\n\r\n","sub_path":"Quiz2/app/Menu.py","file_name":"Menu.py","file_ext":"py","file_size_in_byte":2575,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"565460784","text":"import discord\r\nfrom discord.ext import commands\r\nimport random\r\nimport time\r\n\r\ndescription = \"images & words bot!\"\r\nbot = commands.Bot(command_prefix='$', description=description)\r\nLP_CHANNEL_IDS = [419732859360641024, 457320312124604416]\r\n#TEST_CHANNEL_IDS = [419732859360641024, 457320312124604416, 430646748487221261, 455253321067003927]\r\n\r\nCD_ALLOWED = {x: True for x in LP_CHANNEL_IDS}\r\n#CD_ALLOWED = {x: True for x in TEST_CHANNEL_IDS}\r\n\r\n@bot.event\r\nasync def on_ready():\r\n print('Logged in as')\r\n print(bot.user.name)\r\n print(bot.user.id)\r\n print('------')\r\n\r\n@bot.command()\r\nasync def cd(ctx):\r\n \"\"\"Auto countdown from 10 for LPs. Only allowed in LP channels.\"\"\"\r\n if ctx.channel.id in CD_ALLOWED: \r\n if CD_ALLOWED[ctx.channel.id]:\r\n CD_ALLOWED[ctx.channel.id] = False\r\n for i in range(10,0, -1):\r\n await ctx.send(i)\r\n time.sleep(1)\r\n await ctx.send('GO')\r\n CD_ALLOWED[ctx.channel.id] = True\r\n else:\r\n await ctx.send('Countdown is allowed only in LP channels.')\r\n\r\n# Make sure you have a token.txt in the same directory as main.py containing ONLY the bot token\r\ntoken = open('token.txt', 'r').read()\r\nbot.run(token)\r\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1236,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"165372623","text":"import cloudpickle\nfrom flask import Flask, render_template, flash, url_for, redirect, request\nfrom flask_wtf import FlaskForm\nfrom wtforms import SubmitField, DateField, DecimalField, IntegerField, SelectField\nfrom wtforms.validators import DataRequired, NumberRange\nimport pandas as pd\n\napp = Flask(__name__)\napp.config['SECRET_KEY'] = 'proyecto1'\n\nmodel = cloudpickle.load(open('model_carpre_vb.pkl', 'rb'))\n\nclass Cuestionario(FlaskForm):\n marcas=['Opel', 'Daewoo', 'Ambassador', 'Ashok', 'Datsun', 'Fiat', 'Force',\n 'Nissan', 'Maruti', 'Renault', 'Chevrolet', 'Volkswagen', 'Tata',\n 'Mahindra', 'MG', 'Kia', 'Isuzu', 'Hyundai', 'Mitsubishi', 'Honda',\n 'Volvo', 'Ford', 'Skoda', 'Jaguar', 'Toyota', 'Jeep', 'Land', 'Lexus',\n 'Mercedes-Benz', 'Audi', 'BMW']\n \n fuel_cat = ['Diesel', 'Petrol', 'LPG', 'CNG']\n transmission_cat = ['Manual', 'Automatic']\n seller_cat = ['Individual', 'Dealer', 'Trustmark Dealer']\n owner_cat = ['First Owner', 'Second Owner', 'Third Owner', 'Fourth & Above Owner', 'Test Drive Car']\n \n marca =SelectField('Marca de Automovil', choices=marcas)\n fuel = SelectField('Tipo de Combustible', choices=fuel_cat)\n seller_type = SelectField('Tipe de Vendedor', choices=seller_cat)\n transmission = SelectField('Transmision', choices=transmission_cat)\n owner = SelectField('Dueño', choices=owner_cat)\n \n seats= IntegerField('Número de asientos', validators=[DataRequired(), NumberRange(min=1, max=8, message='Cantidad invalida de asientos permitada')])\n \n year = IntegerField('Año del auto', validators=[DataRequired(), NumberRange(min=1960, max=2021, message='Ingresar valor no mayor a 2021')])\n \n \n km_driven=DecimalField('Kilómetros conducidos', validators=[DataRequired()])\n mileage=DecimalField('Kilometraje (Mileage)',validators=[DataRequired()] )\n max_power=DecimalField('Break Horse Power (max_power)', validators=[DataRequired()])\n \n submit=SubmitField('Submit')\n \n \n@app.route('/')\n@app.route('/home', methods=['GET'])\ndef base():\n return render_template('base.html')\n\n@app.route('/predict', methods=['GET', 'POST'])\ndef predict():\n form = Cuestionario()\n if request.method == \"POST\":\n \n marca = form.marca.data\n fuel = form.fuel.data\n seller = form.seller_type.data\n trans = form.transmission.data\n ow = form.owner.data\n seats = form.seats.data\n year = int(2021-form.year.data)\n km = form.km_driven.data\n mil = form.mileage.data\n maxp = form.max_power.data\n \n \n val = [ marca, fuel, seller, trans, ow, seats, year, km, mil, maxp]\n col = ['marca','fuel','seller_type','transmission','owner','seats','num_year','km_driven','mileage','max_power']\n \n df_pred = pd.DataFrame(val).T\n df_pred.columns = col\n \n prediction = model.predict(df_pred)\n flash(\"Precio sugerido: {0}\".format(prediction[0]), 'success')\n #else:\n # flash(\"Valores incorrectos\", 'warning')\n return redirect(url_for('base'))\n else:\n return render_template('prediction.html', form=form)\n \n\nif __name__ == '__main__':\n app.run(debug=True)","sub_path":"car_app.py","file_name":"car_app.py","file_ext":"py","file_size_in_byte":3251,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"592756095","text":"from flask import render_template, url_for, flash, redirect, request, jsonify\nfrom flask_server import app, db, socketio, loc_bridge\nfrom flask_server.models import Measurement, Measurement_test\nimport json\nimport math\nfrom help_module.webcam_helper import config_webcam_ip, save_webcam_frame, start_webcams, remove_webcam, stop_webcams\nfrom help_module.calibration_helper import add_calibration_point, get_calibration_points, remove_calibration_point, get_calibration_co\nimport datetime\n\n@app.route('/sensor/debug', methods=['POST'])\ndef receive_sensor_debug():\n data = request.json\n # print(data)\n # print(\"---\")\n data['data'] = [0 if math.isnan(a) else a * 5 for a in data['data']]\n # print(data[\"data\"])\n new_db_data = Measurement(sensor_id=data[\"device_id\"], data=data[\"data\"], sequence_id=data[\"sequence\"], data_type=2)\n db.session.add(new_db_data)\n db.session.commit()\n\n # socketio.emit('new_image', {'device_id': data['device_id']})\n # save_webcam_frame(new_db_data)\n # print(data['data'])\n loc_bridge.update(data[\"device_id\"], data[\"data\"], new_db_data.timestamp)\n\n return \"Hello World!\"\n\n@app.route('/sensor/simulate', methods=['POST'])\ndef receive_simulate():\n # print(\"Simulated request\")\n data = request.json\n new_db_data = Measurement_test(sensor_id=data[\"device_id\"], data=data[\"data\"], sequence_id=data[\"sequence\"], data_type=0)\n db.session.add(new_db_data)\n db.session.commit()\n\n # socketio.emit('new_image', {'device_id': data['device_id']})\n\n loc_bridge.update(data[\"device_id\"], data[\"data\"], new_db_data.timestamp)\n\n return \"Succes\"\n\n@app.route('/sensor/simulate_no_save', methods=['POST'])\ndef receive_simulate_no_save():\n # print(\"Simulated request\")\n data = request.json\n\n loc_bridge.update(data[\"device_id\"], data[\"data\"], datetime.datetime.now())\n\n return \"Succes\"\n\n\n@app.route('/sensor/bits', methods=['POST'])\ndef receive_sensor_bits():\n data = request.json\n socketio.emit('new_image', {'device_id': data['device_id']})\n print(data)\n data['data'] = [0 if math.isnan(a) else a for a in data['data']]\n print(data)\n\n db.session.add(data)\n db.session.commit()\n print(data)\n\n print('NEW post request')\n\n return \"Hello World!\"\n\n\n@app.route('/sensor/raw', methods=['POST'])\ndef test_cbor():\n print(\"============== CBOR Test ================\")\n data = request.get_data()\n print(data)\n sensor_data = [int(a) for a in data]\n sensor_id = sensor_data[0]\n seq_id = sensor_data[1]\n thermal_data = sensor_data[2:]\n\n new_db_data = Measurement(sensor_id=sensor_id, data=thermal_data, sequence_id=seq_id, data_type=0)\n db.session.add(new_db_data)\n db.session.commit()\n\n socketio.emit('new_image', {'device_id': sensor_id})\n\n print(\" CBOR STOP \")\n return 'Hello'\n\n@app.route('/data/last', methods=['GET'])\ndef send_data():\n last_result = Measurement.query.order_by(Measurement.timestamp.desc()).first()\n ret_json = {\"data\":last_result.data, \"time\":last_result.timestamp, \"sensor_id\": last_result.sensor_id}\n print(last_result)\n return jsonify(ret_json)\n\n@app.route('/webcam/setip', methods=['POST'])\ndef configure_webcam_ip():\n sensor_id = request.form.get('sensor_id')\n ip = request.form.get('ip')\n print(sensor_id)\n print(ip)\n if sensor_id is None or ip is None:\n return \"Invalid request\"\n\n config_webcam_ip(int(sensor_id), ip)\n\n return redirect(url_for('config_webcams'))\n\n@app.route('/webcam/start', methods=['GET'])\ndef start_webcams_req():\n start_webcams()\n return redirect(url_for('config_webcams'))\n\n@app.route('/webcam/stop', methods=['GET'])\ndef stop_webcams_req():\n stop_webcams()\n return redirect(url_for('config_webcams'))\n\n@app.route('/webcam//delete', methods=['GET'])\ndef delete_webcam(sensor_id):\n remove_webcam(sensor_id)\n return redirect(url_for('config_webcams'))\n\n@app.route('/config/calibrate/addpoint', methods=['POST'])\ndef config_add_calibration_point():\n name = request.form.get('name')\n co_x = int(request.form.get('xco'))\n co_y = int(request.form.get('yco'))\n print(f'Adding calibration point: ({co_x}, {co_y})')\n\n add_calibration_point(name, (co_x, co_y))\n return redirect(url_for('config_calibrate'))\n\n@app.route('/config/calibrate//delete', methods=['GET'])\ndef config_remove_calibration_point(pointname):\n remove_calibration_point(pointname)\n # loc_bridge.calibrate_point((co_x, co_y))\n return redirect(url_for('config_calibrate'))\n\n@app.route('/config/calibrate//start', methods=['GET'])\ndef config_start_calibration_point(pointname):\n co = get_calibration_co(pointname)\n loc_bridge.calibrate_point(pointname, co)\n return redirect(url_for('config_calibrate'))\n\n@app.route('/config/calibrate/save', methods=['GET'])\ndef config_save_calibration_data():\n loc_bridge.bridge_save_cal_data()\n return redirect(url_for('config_calibrate'))\n\n\n\n","sub_path":"server/flask_server/routes_backend.py","file_name":"routes_backend.py","file_ext":"py","file_size_in_byte":4954,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"421403407","text":"#Complete making sandwishes\nsandwish_orders = ['tuna', 'chicken', 'rain_forest', 'pineapple', 'watermelon']\nfinished_sandwishes = []\n\n#按照序号遍历,在remove之后出现少遍历的异常\nfor sandwish in sandwish_orders:\n print(\"I made your \" + sandwish + \" sandwish.\")\n finished_sandwishes.append(sandwish)\n sandwish_orders.remove(sandwish) \n\n# while sandwish_orders:\n# sandwish = sandwish_orders.pop()\n# print(\"I made your \" + sandwish + \" sandwish.\")\n# finished_sandwishes.append(sandwish)\n\nprint(\"\\nFinished sandwishes are: \")\nfor sandwish in finished_sandwishes:\n print(sandwish)\n\n","sub_path":"7.用户输入和while循环/7.3/sandwish.py","file_name":"sandwish.py","file_ext":"py","file_size_in_byte":618,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"218937269","text":"import sys\nimport math\n\ninfile = open(sys.argv[1], 'r')\noutfile = open('wing_body_ratio_2_{0}'.format(sys.argv[1]), 'w')\n\nfam_dict = {}\noutfile.write('family,wing_body_ratio\\r')\nlines = infile.readlines()[0].split('\\r')\nentry = 0\nfor l in lines:\n entry += 1\n spl_line = l.split(',')\n if len(spl_line) < 2: continue\n family = spl_line[3]\n if len(family) < 2:\n print('entry {0}: {1}'.format(entry, family))\n species = spl_line[1]\n if family in fam_dict:\n fam_dict[family].update({species : [None,None]})\n else:\n fam_dict.update({family : {species : [None,None]}})\n\nfor l in lines[1:]:\n spl_line = l.split(',')\n if len(spl_line) < 2: continue\n family = spl_line[3]\n species = spl_line[1]\n size = float(spl_line[14])\n part = spl_line[9].lower()\n if part == 'body':\n fam_dict[family][species][0] = (size / 6)**2 * size * math.pi * 0.5\n else:\n fam_dict[family][species][1] = size**2 / 2\n\nout_dict = dict.fromkeys(family)\nfor family in fam_dict.keys():\n species_ratios = []\n for species in fam_dict[family].keys():\n body_wing = fam_dict[family][species]\n if None not in body_wing:\n species_ratios.append(body_wing[1] / body_wing[0])\n if species_ratios:\n out_dict[family] = sum(species_ratios) / len(species_ratios)\n else:\n out_dict[family] = 0\n\nfor family in out_dict.keys():\n outfile.write('{0},{1}\\r'.format(family, out_dict[family]))\n\ninfile.close()\noutfile.close()\n","sub_path":"calculate_wing_body_ratio.py","file_name":"calculate_wing_body_ratio.py","file_ext":"py","file_size_in_byte":1499,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"97162929","text":"# Program to convert infix expression to prefix expression\n \n# method to reverse a string and return the reversed string\ndef reverse(str):\n return(\"\".join(reversed(str)))\n \n \n# method that returns the priority of the operator\ndef priority(operator):\n if operator == '+' or operator == '-':\n return 1\n \n elif operator == '*' or operator == '/' or operator == '%':\n return 2\n \n elif operator == '^':\n return 3\n \n else:\n return 0\n \n# method that converts string in infix to prefix\n# all the strings are assumed to be valid expressions\ndef in2prefix(infixString):\n \n infixString = reverse(infixString)\n \n # taking a list variable to store operators\n stack = []\n # result string variable\n prefixString = \"\"\n i = 0\n \n # loop till i is in the range of the length of string\n while i in range(0, len(infixString)):\n \n # if an alphabet is found then copy it to the output string\n if infixString[i].isalpha():\n prefixString += infixString[i]\n \n # as we have reversed the string closing bracket will be found first\n # if an closing bracket is found then put it in stack\n elif infixString[i] == ')' or infixString[i] == ']' or infixString[i] == '}':\n stack.append(infixString[i])\n \n \n # as we have reversed the string opening bracket will be found after the closing bracket\n # if an opening bracket is found then\n # keep removing the operators from the stack and add them to prefix string until you find the corresponding opening bracket\n elif infixString[i] == '(':\n \n if infixString[i] == '(':\n while stack[-1] != ')':\n prefixString += stack.pop()\n stack.pop()\n \n \n # if none of the above cases are satisfied then we surely have an operator\n else:\n \n # if the stack if empty then we simply put the operator in stack\n if len(stack) == 0:\n stack.append(infixString[i])\n \n # if not then we compare the priority of the stack top and current operator\n else:\n \n # if the priority of current operator is greater than or equal to the stack top then push it onto the stack\n if priority(infixString[i]) >= priority(stack[-1]):\n stack.extend(infixString[i])\n \n \n # if the priority of current operator is less than the stack top then\n # pop the stack top and add it to the prefix string\n elif priority(infixString[i]) < priority(stack[-1]):\n prefixString += stack.pop()\n position = len(stack) - 1\n \n # now if you have an operator that has less priority as of current operator then pop\n while position >= 0 and priority(stack[position]) > priority(infixString[i]):\n prefixString += stack.pop()\n position -= 1\n if position < 0:\n break\n \n stack.extend(infixString[i])\n \n # increment the value of i\n i += 1\n \n # at the end remove all the operators from the stack\n while len(stack) != 0:\n prefixString += stack.pop()\n \n # reverse the string before output \n prefixString = reverse(prefixString)\n \n # return the result\n return prefixString\n \n# main function\ndef brackets_trim(a):\n global infix\n infix = a\n infix = infix.replace(' ', '')\n \n \nbrackets_trim('a+b+c')\nprefix = in2prefix(infix)\nprefix = list(prefix)\n#print(\"The converted Expression in Postfix is : \" + prefix)\n\nclass Calculator:\n def __init__ (self):\n self.stack = []\n\n def push (self, p):\n if p in ['-', '+']:\n op1 = self.stack.pop ()\n op2 = self.stack.pop ()\n self.stack.append ('(%s %s %s)' % (op1, p, op2) )\n elif p in ['*', '/']:\n op1 = self.stack.pop ()\n op2 = self.stack.pop ()\n self.stack.append ('%s %s %s' % (op1, p, op2) )\n\n else:\n self.stack.append (p)\n\n def convert (self, l):\n l.reverse ()\n for e in l:\n self.push (e)\n return self.stack.pop ()\n\nc = Calculator ()\na= (c.convert ( prefix ))\nprint(a)\na = list(a)\n\n\n \nt = ['+', '-']\nz = ['/', '*']\nfor i in a:\n if i in t:\n a = ''.join(a)\n a = a.replace(')', '')\n a = a.replace('(', '')\n if i in z:\n a = ''.join(a) \n print(a)\n #break\n\n\n\n\na = ''.join(a)\nprint(a)\n\n\n\n\n\n\n","sub_path":"hw4.py","file_name":"hw4.py","file_ext":"py","file_size_in_byte":4591,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"394765938","text":"# __*__encoding:utf8__*__\nimport openpyxl\nimport openpyxl.worksheet\n\nwb = openpyxl.load_workbook(\"files/adfc.xlsx\")\nst = wb.active\n\nrow_max = st.max_row\nprint(row_max)\ncell1 = []\ncell2 = []\ncell3 = []\n\nfor i in range(1, row_max + 1):\n btxt1 = st.cell(row=i, column=2).value\n btxt2 = st.cell(row=i, column=2).value\n btxt3 = st.cell(row=i, column=2).value\n\n if (btxt1 == u\"元件合计\"):\n cell1.append(i)\n if (btxt2 == u\"税管费\"):\n cell2.append(i)\n if (btxt3 == u\"单台合计\"):\n cell3.append(i)\n\nprint(len(cell1))\nprint(len(cell2))\nprint(len(cell3))\n\nfor i in range(0, len(cell1)):\n st['G' + str(cell3[i])].value = \"=SUM(G\" + str(cell1[i]) + \":G\" + str(cell2[i]) + \")\"\nwb.save(\"abss.xlsx\")\n","sub_path":"autosum.py","file_name":"autosum.py","file_ext":"py","file_size_in_byte":736,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"310016022","text":"# coding=utf-8\nimport datetime\nimport io\nimport urllib\nimport os.path\nfrom datetime import datetime\nfrom time import mktime\nimport time\nimport pytz\nfrom root import file_paths_config\nimport pandas as pd\nfrom flask import request, jsonify, render_template\nfrom flask_login import login_required, current_user\nimport requests\nfrom openpyxl import load_workbook\nimport xlrd\nfrom root.global_functions import date_to_unix_ym\n\nfrom database import db_session, engine\nfrom root import provozni_hodnoty # připojení složky sprava_kgj\n\nfrom root.models.model_kgj_values import KgjMeasuredValM\nfrom root.models.model_ote_m_data import PhElektrinaOteM\nfrom root.models.m_ph_m_plyn import PhPlynFaktM, DistribOblPly, PhSpalTepM\nimport wget\nimport xlwings as xw\n\n# provázání na sprava_kgj\\__init__.py\nprovozni_hodnoty_page = provozni_hodnoty.provozni_hodnoty_page\n\n\n# nepouzívá se app.route, ale přímo název aplikce (kgj_data_page.route)\n# v html se potom volá \n\n\n@provozni_hodnoty.provozni_hodnoty_page.route(\"/ph_spalne_teplo_test\", methods=['GET', 'POST'])\n@login_required\ndef ph_spal_tep_get():\n year = 2019\n month = 12\n date_record = int(date_to_unix_ym(year, month))\n print(date_record)\n\n records = DistribOblPly.query.all()\n for record in records:\n print(record)\n actual_record = record.spal_tep_m(date_record)\n print(actual_record)\n if actual_record is None:\n try:\n\n print('zanam ješte v DB neni')\n save_dir = \"C:/_Coding/Pyton/EnergoToolsV3/Downloads/spaltep/\" + str(\n year) + tdm(month) + \"_\" + record.file_name + \".csv\"\n if os.path.exists(save_dir) is True:\n spal_tep = get_from_excel(save_dir, record)\n print(str(spal_tep).replace(',', '.'))\n\n new_record = PhSpalTepM(datetime_up=int(time.time()),\n user=1,\n date_record=date_record,\n spaltep_distrib=str(spal_tep).replace(',', '.'),\n id_distrib_obl=record.id)\n db_session.add(new_record)\n\n else:\n request_url = make_url(str(year), month, record.file_name)\n urllib.request.urlretrieve(request_url, save_dir)\n spal_tep = get_from_excel(save_dir, record)\n print(str(spal_tep).replace(',', '.'))\n\n new_record = PhSpalTepM(datetime_up=int(time.time()),\n user=1,\n date_record=date_record,\n spaltep_distrib=str(spal_tep).replace(',', '.'),\n id_distrib_obl=record.id)\n\n db_session.add(new_record)\n except:\n pass\n\n db_session.commit()\n\n\n return render_template(\"homepage.html\")\n\ndef get_from_excel(save_dir, record):\n if record.file_name == \"eon\":\n pass\n elif record.file_name == \"pre\":\n pass\n else:\n df = pd.read_csv(save_dir, engine='python', error_bad_lines=False, sep='delimiter', delimiter=';')\n try:\n dfx = df.loc[df['OblKonstVýhřPly'] == record.obl_id_csv]\n return dfx['Měsíční průměr spaln. tepla'].item()\n except:\n dfx = df.loc[df['OblKonSpT'] == record.obl_id_csv]\n return dfx['Měsíč.prům'].item()\n return\n\n\n# Make url adress from year, month and distrib obl\ndef make_url(year, month, obl):\n actual_month = month+1\n month = tdm(month)\n if obl == \"eon\":\n url = \"https://portal.eon-distribuce.cz/spalne-teplo/index.php?gv_oblast_id=1110&gv_obec_id=0&gv_mesic=\" + \\\n month + \"&gv_rok=\" + year + \"&gv_export=1\"\n elif obl == \"pre\":\n url = \"http://www.ppdistribuce.cz/sites/default/files/\" + year+ \"-\" + tdm(actual_month) + \"/\" + \\\n month + \"_\" + year[-2:] + \"%20Protokol%20ZP%20jakostni%CC%81%20znaky.xls\"\n else:\n base_url = \"https://www.gasnet.cz/data/spalna_tepla/\"\n url = base_url + year + month + \"_\" + obl + \".csv\"\n return url\n\n\n# Add zero (0) before month nuber if <10\ndef tdm(month):\n if int(month) < 10:\n return \"0\"+str(month)\n else:\n return str(month)\n\n","sub_path":"root/provozni_hodnoty/ph_spal_tep.py","file_name":"ph_spal_tep.py","file_ext":"py","file_size_in_byte":4473,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"350352154","text":"import torch\nimport torch.nn.functional as F\nimport numpy as np\nfrom collections import OrderedDict\nfrom easydict import EasyDict\nfrom _main_base import main\nimport os\n\n#---\n# config\n#---\ncfg = EasyDict()\n\n# class\ncfg.CLASS_LABEL = ['akahara', 'madara']\ncfg.CLASS_NUM = len(cfg.CLASS_LABEL)\n\n# model\ncfg.INPUT_HEIGHT = 64\ncfg.INPUT_WIDTH = 64\ncfg.INPUT_CHANNEL = 3\n\ncfg.GPU = False\ncfg.DEVICE = torch.device(\"cuda\" if cfg.GPU and torch.cuda.is_available() else \"cpu\")\n\ncfg.MODEL_SAVE_PATH = 'models/Xception_{}.pt'\ncfg.MODEL_SAVE_INTERVAL = 200\ncfg.ITERATION = 1000\ncfg.MINIBATCH = 8\ncfg.OPTIMIZER = torch.optim.SGD\ncfg.LEARNING_RATE = 0.01\ncfg.MOMENTUM = 0.9\ncfg.LOSS_FUNCTION = loss_fn = torch.nn.NLLLoss()\n\ncfg.TRAIN = EasyDict()\ncfg.TRAIN.DISPAY_ITERATION_INTERVAL = 50\n\ncfg.TRAIN.DATA_PATH = '../Dataset/train/images/'\ncfg.TRAIN.DATA_HORIZONTAL_FLIP = True\ncfg.TRAIN.DATA_VERTICAL_FLIP = True\ncfg.TRAIN.DATA_ROTATION = False\n\ncfg.TEST = EasyDict()\ncfg.TEST.MODEL_PATH = cfg.MODEL_SAVE_PATH.format('final')\ncfg.TEST.DATA_PATH = '../Dataset/test/images/'\ncfg.TEST.MINIBATCH = 2\n\n# random seed\ntorch.manual_seed(0)\n\n\nclass Xception(torch.nn.Module):\n def __init__(self):\n super(Xception, self).__init__()\n\n class Block(torch.nn.Module):\n def __init__(self, dim=728, cardinality=1):\n super(Block, self).__init__()\n\n self.block = torch.nn.Sequential(\n torch.nn.ReLU(),\n torch.nn.Conv2d(dim, dim, kernel_size=3, padding=1, stride=1, groups=cardinality),\n torch.nn.BatchNorm2d(dim),\n torch.nn.ReLU(),\n torch.nn.Conv2d(dim, dim, kernel_size=3, padding=1, stride=1, groups=cardinality),\n torch.nn.BatchNorm2d(dim),\n torch.nn.ReLU(),\n torch.nn.Conv2d(dim, dim, kernel_size=3, padding=1, stride=1, groups=cardinality),\n torch.nn.BatchNorm2d(dim),\n )\n \n def forward(self, x):\n res_x = self.block(x) \n x = torch.add(res_x, x)\n return x\n\n # Entry flow\n self.conv1 = torch.nn.Conv2d(cfg.INPUT_CHANNEL, 32, kernel_size=3, padding=1, stride=2)\n self.bn1 = torch.nn.BatchNorm2d(32)\n \n self.conv2 = torch.nn.Conv2d(32, 64, kernel_size=3, padding=1, stride=1)\n self.bn2 = torch.nn.BatchNorm2d(64)\n \n self.conv3 = torch.nn.Sequential(\n torch.nn.Conv2d(64, 128, kernel_size=3, padding=1, stride=1, groups=1),\n torch.nn.BatchNorm2d(128),\n torch.nn.ReLU(),\n torch.nn.Conv2d(128, 128, kernel_size=3, padding=1, stride=1, groups=1),\n torch.nn.BatchNorm2d(128),\n torch.nn.MaxPool2d(3, stride=2, padding=1))\n self.conv3_sc = torch.nn.Conv2d(64, 128, kernel_size=1, padding=0, stride=2)\n self.bn3_sc = torch.nn.BatchNorm2d(128)\n \n self.conv4 = torch.nn.Sequential(\n torch.nn.ReLU(),\n torch.nn.Conv2d(128, 256, kernel_size=3, padding=1, stride=1, groups=1),\n torch.nn.BatchNorm2d(256),\n torch.nn.ReLU(),\n torch.nn.Conv2d(256, 256, kernel_size=3, padding=1, stride=1, groups=1),\n torch.nn.BatchNorm2d(256),\n torch.nn.MaxPool2d(3, stride=2, padding=1))\n self.conv4_sc = torch.nn.Conv2d(128, 256, kernel_size=1, padding=0, stride=2)\n self.bn4_sc = torch.nn.BatchNorm2d(256)\n \n self.conv5 = torch.nn.Sequential(\n torch.nn.ReLU(),\n torch.nn.Conv2d(256, 728, kernel_size=3, padding=1, stride=1, groups=1),\n torch.nn.BatchNorm2d(728),\n torch.nn.ReLU(),\n torch.nn.Conv2d(728, 728, kernel_size=3, padding=1, stride=1, groups=1),\n torch.nn.BatchNorm2d(728),\n torch.nn.MaxPool2d(3, stride=2, padding=1))\n self.conv5_sc = torch.nn.Conv2d(256, 728, kernel_size=1, padding=0, stride=2)\n self.bn5_sc = torch.nn.BatchNorm2d(728)\n \n # Middle flow\n self.middle_flow = torch.nn.Sequential(\n *[Block() for _ in range(8)]\n )\n \n # Exit flow\n self.conv_exit1 = torch.nn.Sequential(\n torch.nn.ReLU(),\n torch.nn.Conv2d(728, 728, kernel_size=3, padding=1, stride=1, groups=1),\n torch.nn.BatchNorm2d(728),\n torch.nn.ReLU(),\n torch.nn.Conv2d(728, 1024, kernel_size=3, padding=1, stride=1, groups=1),\n torch.nn.BatchNorm2d(1024),\n torch.nn.MaxPool2d(3, stride=2, padding=1))\n self.conv_exit1_sc = torch.nn.Conv2d(728, 1024, kernel_size=1, padding=0, stride=2)\n self.bn_exit1_sc = torch.nn.BatchNorm2d(1024)\n \n self.conv_exit2 = torch.nn.Sequential(\n torch.nn.Conv2d(1024, 1536, kernel_size=3, padding=1, stride=1, groups=1),\n torch.nn.BatchNorm2d(1536),\n torch.nn.ReLU(),\n torch.nn.Conv2d(1536, 2048, kernel_size=3, padding=1, stride=1, groups=1),\n torch.nn.BatchNorm2d(2048),)\n \n self.linear = torch.nn.Linear(2048, cfg.CLASS_NUM)\n \n \n def forward(self, x):\n # Entry flow\n x = self.conv1(x)\n x = self.bn1(x)\n x = F.relu(x)\n \n x = self.conv2(x)\n x = self.bn2(x)\n x = F.relu(x)\n \n x_sc = self.conv3_sc(x)\n x_sc = self.bn3_sc(x_sc)\n x = self.conv3(x)\n x = torch.add(x_sc, x)\n \n x_sc = self.conv4_sc(x_sc)\n x_sc = self.bn4_sc(x_sc)\n x = self.conv4(x)\n x = torch.add(x_sc, x)\n \n x_sc = self.conv5_sc(x_sc)\n x_sc = self.bn5_sc(x_sc)\n x = self.conv5(x)\n x = torch.add(x_sc, x)\n \n # Middle flow\n x = self.middle_flow(x)\n \n # Exit flow\n x_sc = self.conv_exit1_sc(x)\n x_sc = self.bn_exit1_sc(x_sc)\n x = self.conv_exit1(x)\n x = torch.add(x_sc, x)\n \n x = self.conv_exit2(x)\n\n x = F.avg_pool2d(x, [cfg.INPUT_HEIGHT // 32, cfg.INPUT_WIDTH // 32], padding=0, stride=1)\n x = x.view(x.size()[0], -1)\n x = self.linear(x)\n x = F.softmax(x, dim=1)\n \n return x\n\n# main\nif __name__ == '__main__':\n\n model_save_dir = '/'.join(cfg.MODEL_SAVE_PATH.split('/')[:-1])\n os.makedirs(model_save_dir, exist_ok=True)\n\n main(cfg, Xception())","sub_path":"Scripts_Model/scripts_pytorch/Xception_pytorch.py","file_name":"Xception_pytorch.py","file_ext":"py","file_size_in_byte":6466,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"420678201","text":"\"\"\"Sensor platform for wattbox.\"\"\"\r\nimport logging\r\n\r\nimport voluptuous as vol\r\nimport homeassistant.helpers.config_validation as cv\r\nfrom homeassistant.const import CONF_NAME, CONF_RESOURCES\r\nfrom homeassistant.helpers.entity import Entity\r\n\r\nfrom . import update_data\r\nfrom .const import DOMAIN_DATA, SENSOR_TYPES\r\n\r\n_LOGGER = logging.getLogger(__name__)\r\n\r\n\r\nasync def async_setup_platform(\r\n hass, config, async_add_entities, discovery_info=None\r\n): # pylint: disable=unused-argument\r\n \"\"\"Setup sensor platform.\"\"\"\r\n name = discovery_info[CONF_NAME]\r\n entities = []\r\n\r\n for resource in discovery_info[CONF_RESOURCES]:\r\n sensor_type = resource.lower()\r\n\r\n if sensor_type not in SENSOR_TYPES:\r\n continue\r\n\r\n entities.append(WattBoxSensor(hass, name, sensor_type))\r\n\r\n async_add_entities(entities, True)\r\n\r\n\r\nclass WattBoxSensor(Entity):\r\n \"\"\"WattBox Sensor class.\"\"\"\r\n\r\n def __init__(self, hass, name, sensor_type):\r\n self.hass = hass\r\n self.attr = {}\r\n self.type = sensor_type\r\n self.wattbox_name = name\r\n self._name = name + \" \" + SENSOR_TYPES[self.type][0]\r\n self._state = None\r\n self._unit = SENSOR_TYPES[self.type][1]\r\n\r\n async def async_update(self):\r\n \"\"\"Update the sensor.\"\"\"\r\n # Send update \"signal\" to the component\r\n await update_data(self.hass, self.wattbox_name)\r\n\r\n # Get new data (if any)\r\n updated = self.hass.data[DOMAIN_DATA][self.wattbox_name]\r\n\r\n # Check the data and update the value.\r\n self._state = getattr(updated, self.type)\r\n\r\n @property\r\n def name(self):\r\n \"\"\"Return the name of the sensor.\"\"\"\r\n return self._name\r\n\r\n @property\r\n def state(self):\r\n \"\"\"Return the state of the sensor.\"\"\"\r\n return self._state\r\n\r\n @property\r\n def icon(self):\r\n \"\"\"Return the icon of the sensor.\"\"\"\r\n return SENSOR_TYPES[self.type][2]\r\n\r\n @property\r\n def device_state_attributes(self):\r\n \"\"\"Return the state attributes.\"\"\"\r\n return self.attr\r\n\r\n @property\r\n def unit_of_measurement(self):\r\n \"\"\"Return the unit of measurement of this entity, if any.\"\"\"\r\n return self._unit\r\n","sub_path":"custom_components/wattbox/sensor.py","file_name":"sensor.py","file_ext":"py","file_size_in_byte":2240,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"635574647","text":"#no.2\r\n\r\nclass MhsTIF(object):\r\n \"\"\"docstring for MhsTIF\"\"\"\r\n def __init__(self, nama, nim, kota, uangS):\r\n self.nama = nama\r\n self.nim = nim\r\n self.kota = kota\r\n self.uangSaku = uangS\r\n\r\nclass my_Array(object):\r\n \"\"\"docstring for bikinArray\"\"\"\r\n def __init__(self):\r\n self.index = 11*[None]\r\n def __getitem__(self, item):\r\n getData = self.index[item]\r\n return getData\r\n def __setitem__(self, key, value):\r\n self.index[key] = value\r\n\r\nc = my_Array()\r\n\r\nc[0] = MhsTIF(\"Ika\", 10, \"Sukoharjo\", 240000)\r\nc[1] = MhsTIF(\"Budi\", 51, \"Sragen\", 230000)\r\nc[2] = MhsTIF(\"Ahmad\", 2, \"Surakarta\", 250000)\r\nc[3] = MhsTIF(\"Candra\", 18, \"Surakarta\", 235000)\r\nc[4] = MhsTIF(\"Eka\", 4, \"Boyolali\", 240000)\r\nc[5] = MhsTIF(\"Fandi\", 31, \"Salatiga\", 250000)\r\nc[6] = MhsTIF(\"Deni\", 13, \"Klaten\", 245000)\r\nc[7] = MhsTIF(\"Galuh\", 5, \"Wonogiri\", 245000)\r\nc[8] = MhsTIF(\"Janto\", 23, \"Klaten\", 245000)\r\nc[9] = MhsTIF(\"Hasan\", 64, \"Karanganyar\", 270000)\r\nc[10] = MhsTIF(\"Khalid\", 29, \"Purwodadi\", 210000)\r\n\r\ndef cariUangKecil():\r\n final = [None,None]\r\n sebelum = 0\r\n x = 0\r\n l = len(c.index) - 1\r\n while x <= l:\r\n try:\r\n sebelum = c[x]\r\n nexta = x + 1\r\n if sebelum.uangSaku <= c[nexta].uangSaku:\r\n sebelum = c[x]\r\n final[0] = c[x].nama\r\n final[1] = c[x].uangSaku\r\n x += 1\r\n except IndexError:\r\n break\t\r\n return final\r\n","sub_path":"modul_4_L200170071/no.2.py","file_name":"no.2.py","file_ext":"py","file_size_in_byte":1494,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"373133524","text":"import pathlib\n\nimport setuptools\n\n\ndocs_requires = [\"sphinx\"]\ntests_requires = ['pytest>=3.0.0', 'pytest-asyncio']\naiohttp_requires = [\"aiohttp\"]\ntreq_requires = [\"treq\", \"twisted[tls]\"]\ntornado_requires = [\"tornado\"]\n\nlong_description = pathlib.Path(\"README.rst\").read_text(\"utf-8\")\n\nsetuptools.setup(\n name=\"gidgethub\",\n version=\"3.0.0.dev0\",\n description=\"An async GitHub API library\",\n long_description=long_description,\n url=\"https://gidgethub.readthedocs.io\",\n author=\"Brett Cannon\",\n author_email=\"brett@python.org\",\n license=\"Apache\",\n classifiers=[\n 'Intended Audience :: Developers',\n \"License :: OSI Approved :: Apache Software License\",\n \"Development Status :: 5 - Production/Stable\",\n 'Programming Language :: Python :: 3 :: Only',\n 'Programming Language :: Python :: 3.6',\n ],\n keywords=\"github sans-io async\",\n packages=setuptools.find_packages(),\n zip_safe=True,\n python_requires=\">=3.6.0\",\n setup_requires=['pytest-runner>=2.11.0'],\n tests_require=tests_requires,\n install_requires=['uritemplate>=3.0.0'],\n extras_require={\n \"docs\": docs_requires,\n \"tests\": tests_requires,\n \"aiohttp\": aiohttp_requires,\n \"treq\": treq_requires,\n \"tornado\": tornado_requires,\n \"dev\": (docs_requires + tests_requires + aiohttp_requires\n + treq_requires + tornado_requires),\n },\n)\n","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1428,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"201356211","text":"import unittest\n\n\nclass Solution:\n\n def lengthOfLastWord(self, s):\n \"\"\"\n :type s: str\n :rtype: int\n \"\"\"\n ans = 0\n s = s.strip()\n for i in range(1, len(s) + 2):\n if i == len(s) + 1 or s[-i] == ' ':\n ans = i - 1\n break\n return ans\n\n\nclass CaseCheck(unittest.TestCase):\n\n def test0(self):\n s_in = \"Hello World\"\n s = Solution()\n actual = s.lengthOfLastWord(s_in)\n expected = 5\n self.assertEqual(actual, expected)\n\n def test1(self):\n s_in = \"a\"\n s = Solution()\n actual = s.lengthOfLastWord(s_in)\n expected = 1\n self.assertEqual(actual, expected)\n\nif __name__ == '__main__':\n unittest.main()\n","sub_path":"python/len_of_last_word.py","file_name":"len_of_last_word.py","file_ext":"py","file_size_in_byte":765,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"411524970","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\"\"\"\n A Test fixture for dir_watcher.\n Checks the create_parser, and main functions for\n correct behavior.\n\"\"\"\nimport sys\nimport os\nimport glob\nimport unittest\n\nimport dirwatcher\n\n__author__ = \"Janell.Huyck, based off madarp's test code for babynames\"\n\n\nclass TestDirwatcher(unittest.TestCase):\n def test_create_parser(self):\n \"\"\"Check if parser can parse args correctly\"\"\"\n p = dirwatcher.create_parser()\n test_args = ['watchdir', 'txt', '-poll', '3', 'Tiffany']\n ns = p.parse_args(test_args)\n self.assertTrue(ns.dir == 'watchdir', 'Incorrectly parsing dir')\n self.assertTrue(ns.ext == 'txt', 'Incorrectly parsing ext')\n self.assertTrue(\n ns.poll == '3', 'Incorrectly parsing given polling time')\n self.assertTrue(ns.magic == 'Tiffany',\n 'Incorrectly parsing magic string')\n\n \"\"\"Check if the default polling time comes back as 1\"\"\"\n\n test_args = ['watchdir', 'txt', 'Tiffany']\n ns = p.parse_args(test_args)\n self.assertTrue(\n ns.poll == '1', \"Default polling time should be 1 second\")\n\n # def test_exit_sigint():\n","sub_path":"tests/test_dirwatcher.py","file_name":"test_dirwatcher.py","file_ext":"py","file_size_in_byte":1203,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"171929411","text":"#!/usr/bin/env python3\n#\nimport sys\n\n#import and open the BAM file\n\ninputBAM = sys.argv[1]\nBAMfile = open(inputBAM, 'r')\n\n#make the BAM file into a dictionary\n\nBamdict = {}\n\nfor line in BAMfile:\n\tline.rstrip()\n\tlistofBam = line.split('\\t')\n\tgene = listofBam[2]\n\tread = listofBam[0]\n#would be best to really pull the gene away from the trans by doing:\n#\tgene, trans = gene.split('^') \n#the gene and trans are written in the file as gene^trans but can just leave trans attached to it, totally fine \n\tif gene not in Bamdict:\n\t\tBamdict[gene] = set()\n\t\tBamdict[gene].add(read)\n\telse:\n\t\tBamdict[gene].add(read)\n#print(Bamdict)\n#passed\n\nReadcountdict={}\n\nfor gene in Bamdict:\n\tReadcountdict[gene] = len(Bamdict[gene])\nfor gene in Readcountdict:\n\tprint(gene,'\\t',Readcountdict[gene])\n\n#stuck here though because i need to sort by highest value \n\nfor key,value in sorted(Readcountdict.items(), key=lambda item: item[1], reverse=True):\n\tprint ('%s\\t\\t%s' % (key, value))\n\n\n\n","sub_path":"Kmersprobset/gene_read_counter.py","file_name":"gene_read_counter.py","file_ext":"py","file_size_in_byte":964,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"135044052","text":"from decimal import Decimal\n\ndef cancel_digits(num, dem):\n '''Returns a tuple comtaining the numerator and denominater of a fraction where \n if the input numerator and denominator share a digit, they are canceled.'''\n\n num_str = str(num)\n dem_str = str(dem)\n\n repl = None\n\n if num_str[0] in dem_str:\n repl = num_str[0]\n elif num_str[1] in dem_str:\n repl = num_str[1]\n\n if not repl:\n return None\n\n num_str = num_str.replace(repl, \"\")\n dem_str = dem_str.replace(repl, \"\")\n\n return (int(num_str), int(dem_str))\n\ntruthy = []\n\nfor dem in range(10, 100):\n for num in range(10, dem):\n # skip trivial or guaranteed false cases.\n if num == dem or num % 10 == 0 or dem % 10 == 0 or len(set(str(num))) == 1 or len(set(str(dem))) == 1:\n continue\n\n res = cancel_digits(num, dem)\n\n if not res:\n continue\n\n new_frac = res[0] / res[1] \n\n if new_frac == num / dem:\n truthy.append((num, dem))\n\nfor n, d in truthy:\n print(f'{n}/{d}')","sub_path":"python/33.py","file_name":"33.py","file_ext":"py","file_size_in_byte":1056,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"366328436","text":"# Python program for Optimized Dynamic Programming solution to \r\n# Binomail Coefficient. This one uses the concept of pascal \r\n# Triangle and less memory \r\n\r\ndef binomialCoeff(n , k): \r\n\r\n\t# Declaring an empty array \r\n\tC = [0 for i in range(k+1)] \r\n\tC[0] = 1 #since nC0 is 1 \r\n\r\n\tfor i in range(1,n+1): \r\n\r\n\t\t# Compute next row of pascal triangle using \r\n\t\t# the previous row \r\n\t\tj = min(i ,k) \r\n\t\twhile (j>0): \r\n\t\t\tC[j] = C[j] + C[j-1] \r\n\t\t\tj -= 1\r\n\r\n\treturn C[k] \r\n\r\n# Driver Program to test the above function \r\nn = 5\r\nk = 2\r\nprint (\"Value of C(%d,%d) is %d\" %(n,k,binomialCoeff(n,k)) )\r\n# useful as it returned array c contains\r\n# nc0+nc1+nc2+....+nck\r\n# which will be helpful in many calculations\r\n# This code is contribtued by Nikhil Kumar Singh(nickzuck_007) \r\n","sub_path":"(useful)sbinomial_coeef.py","file_name":"(useful)sbinomial_coeef.py","file_ext":"py","file_size_in_byte":768,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"421418837","text":"##Kimberly Winter \t\t10/19/16\n##Pygame tutorial- Bouncing ball :)\n\nimport sys, pygame\npygame.init()\n\nsize = width, height = 2048, 1028\nspeed = [2, 2]\nblack = 0, 0, 0\n\nscreen = pygame.display.set_mode(size)\n\n#ball = pygame.image.load(\"football.png\")\n#ballrect = ball.get_rect()\n\nclass ball(object):\n def __init__(self):\n self.ball=pygame.image.load(\"football.png\")\n self.ballrect=self.ball.get_rect()\n\nball1=ball()\nwhile 1:\n for event in pygame.event.get():\n if event.type == pygame.QUIT: \n \tsys.exit()\n# ballrect = ballrect.move(speed)\n # if ballrect.left < 0 or ballrect.right > width:\n # speed[0] = -speed[0]\n # if ballrect.top < 0 or ballrect.bottom > height:\n # speed[1] = -speed[1]\n\n screen.fill(black)\n screen.blit(ball1.ball, ball1.ballrect)\n pygame.display.flip()","sub_path":"pygameTutorial.py","file_name":"pygameTutorial.py","file_ext":"py","file_size_in_byte":876,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"83108442","text":"import collections\nfrom curses import curs_set\nfrom typing import List\n\n\n# 1. Haiku\n\n# def haiku(words, syllables):\n# left, pos = 0, 0\n# targets, i = [5, 7, 5], 0\n# result = []\n# while left < len(words):\n# currWord = words[pos]\n# numSyllables = syllables[currWord]\n# if numSyllables < targets[i]:\n# targets[i] -= numSyllables\n# pos += 1\n# elif numSyllables == targets[i]:\n# i += 1\n# result.append(' '.join(words[left, pos+1]))\n# pos += 1\n# if i == 3:\n# return result\n# else:\n# left += 1\n# result = []\n# pos = left\n# targets, i = [5, 7, 5], 0\n# return []\n\n\n''' \n2. Schedules (和 lc 370 很像)\nInput: A list of [start, end] representing orders received in a car rental shop.\nOutput: the minimum number of cars required to fulfill the requests.\n'''\n\n\ndef carRental(orders):\n endTime = max(orders, key=lambda x: x[1])[1]\n rentals = [0]*(endTime+2)\n for order in orders:\n rentals[order[0]] += 1\n rentals[order[1]+1] -= 1\n for i in range(1, endTime+2):\n rentals[i] += rentals[i-1]\n return max(rentals)\n# print(carRental([[0, 3], [1, 4], [2, 5]]))\n\n\n'''\nlc 79 Word Search\n'''\n\n\nclass Solution:\n def exist(self, board: List[List[str]], word: str) -> bool:\n for i in range(len(board)):\n for j in range(len(board[0])):\n if self.bfs(i, j, board, word) == True:\n return True\n return False\n\n def bfs(self, i, j, board, word):\n if len(word) == 0:\n return True\n if i < 0 or i >= len(board) or j < 0 or j >= len(board[0]) or board[i][j] != word[0]:\n return False\n board[i][j] = '.'\n flag = self.bfs(i+1, j, board, word[1:]) or self.bfs(i-1, j, board, word[1:]) or self.bfs(\n i, j+1, board, word[1:]) or self.bfs(i, j-1, board, word[1:])\n board[i][j] = word[0]\n return flag\n\n\n''' \nlc 772\nBasic Calculator III\n'''\n\n\nclass Solution:\n def calculate(self, s: str) -> int:\n stack, sign, num = [], '+', 0\n for i, c in enumerate(s + '+'):\n if c.isdigit():\n num = num * 10 + ord(c) - ord('0')\n elif c == '(':\n stack.append(sign)\n stack.append('(')\n sign = '+'\n elif c in '+-*/)':\n if sign == '+':\n stack.append(num)\n elif sign == '-':\n stack.append(-num)\n elif sign == '*':\n stack.append(stack.pop() * num)\n elif sign == '/':\n stack.append(int(stack.pop() / num))\n if c == ')':\n num, item = 0, stack.pop()\n while item != '(':\n num += item\n item = stack.pop()\n sign = stack.pop()\n else:\n sign, num = c, 0\n return sum(stack)\n\n\n'''\nlc 1376 Employee\n'''\n\n\nclass Solution:\n def numOfMinutes(self, n: int, headID: int, manager: List[int], informTime: List[int]) -> int:\n children = collections.defaultdict(list)\n for i, m in enumerate(manager):\n if m >= 0:\n children[m].append(i)\n return self.dfs(headID, children, informTime)\n\n def dfs(self, head, children, informTime):\n if head not in children:\n return 0\n maxTime = 0\n for child in children[head]:\n time = self.dfs(child, children, informTime)\n maxTime = max(maxTime, time)\n return maxTime+informTime[head]\n\n\n''' \nlc 273. Integer to English Words\n'''\n\n\nclass Solution:\n def __init__(self):\n self.LESS_THAN_20 = [\"\", \"One\", \"Two\", \"Three\", \"Four\", \"Five\", \"Six\", \"Seven\", \"Eight\", \"Nine\", \"Ten\",\n \"Eleven\", \"Twelve\", \"Thirteen\", \"Fourteen\", \"Fifteen\", \"Sixteen\", \"Seventeen\", \"Eighteen\", \"Nineteen\"]\n self.TENS = [\"\", \"Ten\", \"Twenty\", \"Thirty\", \"Forty\",\n \"Fifty\", \"Sixty\", \"Seventy\", \"Eighty\", \"Ninety\"]\n self.THOUSANDS = [\"\", \"Thousand\", \"Million\", \"Billion\"]\n\n def numberToWords(self, num: int) -> str:\n if num == 0:\n return \"Zero\"\n thousands_idx = 0\n res = \"\"\n while num > 0:\n if num % 1000 != 0:\n words = self.getWords(num % 1000)\n res = words + self.THOUSANDS[thousands_idx] + ' ' + res\n num //= 1000\n thousands_idx += 1\n return res.strip()\n\n def getWords(self, num):\n if num == 0:\n return \"\"\n if num < 20:\n return self.LESS_THAN_20[num] + ' '\n elif num < 100:\n return self.TENS[num//10] + ' ' + self.getWords(num % 10)\n else:\n return self.LESS_THAN_20[num//100] + \" Hundred \" + self.getWords(num % 100)\n\n\n'''\nlc 269 Alien Dictionary (这题似乎已经不出了, good for practice of topological search)\n'''\n\n\nclass Solution:\n def alienOrder(self, words: List[str]) -> str:\n map = collections.defaultdict(set)\n degree = collections.defaultdict(int)\n\n for word in words:\n for l in word:\n degree[l] = 0\n\n for i in range(len(words)-1):\n currWord = words[i]\n nextWord = words[i+1]\n length = min(len(currWord), len(nextWord))\n for j in range(length):\n if currWord[j] != nextWord[j]:\n if nextWord[j] not in map[currWord[j]]:\n map[currWord[j]].add(nextWord[j])\n degree[nextWord[j]] += 1\n break\n elif j == length-1 and len(currWord) > len(nextWord):\n return \"\"\n\n queue = []\n for key in degree.keys():\n val = degree[key]\n if val == 0:\n queue.append(key)\n\n result = \"\"\n while queue:\n letter = queue.pop(0)\n for neighbour in map[letter]:\n degree[neighbour] -= 1\n if degree[neighbour] == 0:\n queue.append(neighbour)\n result += letter\n if len(result) != len(map):\n return \"\"\n return result\n\n\n'''\nlc 588. File System\n'''\n\n\nclass Node:\n def __init__(self):\n self.children = collections.defaultdict(Node)\n self.content = \"\"\n\n\nclass FileSystem:\n\n def __init__(self):\n self.root = Node()\n\n def getNode(self, path, create):\n if path == '/':\n return self.root\n curr = self.root\n for directory in path[1:].split('/'):\n if not curr.children.get(directory) and create == False:\n return None\n # curr.children[directory] = Node()\n curr = curr.children[directory]\n return curr\n\n def ls(self, path: str) -> List[str]:\n curr = self.getNode(path, False)\n if not curr:\n return []\n if curr.content: # file path,return file name\n return [path.split('/')[-1]]\n return sorted(curr.children.keys())\n\n def mkdir(self, path: str) -> None:\n\n self.getNode(path, True)\n return\n\n def addContentToFile(self, filePath: str, content: str) -> None:\n node = self.getNode(filePath, True)\n node.content += content\n\n def readContentFromFile(self, filePath: str) -> str:\n node = self.getNode(filePath, False)\n return node.content\n\n\n'''\nlc 68. Text Justification\n'''\n\n\nclass Solution:\n def fullJustify(self, words: List[str], maxWidth: int) -> List[str]:\n n_letter, n_word, curr = 0, 0, []\n lines = []\n for word in words:\n if n_letter + n_word + len(word) > maxWidth:\n if len(curr) == 1:\n lines.append(curr[0]+' '*(maxWidth-n_letter))\n\n else:\n n_space, mod = divmod(maxWidth - n_letter, n_word-1)\n for i in range(mod):\n curr[i] += ' '\n lines.append((' '*n_space).join(curr))\n # currLine = \"\"\n # for i,w in enumerate(curr):\n # currLine += w\n # if i == len(curr)-1:\n # break\n # currLine += ' '*n_space\n # if mod > 0:\n # currLine += ' '\n # mod -= 1\n # lines.append(currLine)\n n_letter = len(word)\n n_word = 1\n curr = [word]\n else:\n n_letter += len(word)\n n_word += 1\n curr.append(word)\n lines.append(' '.join(curr) + ' '*(maxWidth - len(curr)-n_letter+1))\n return lines\n\n\n'''\nlc 23. Merge k sorted List\n'''\n\n\nclass Solution:\n def mergeKLists(self, lists: List[ListNode]) -> ListNode:\n pq = []\n dummy = ListNode(0)\n curr = dummy\n count = 0\n for node in lists:\n if node:\n count += 1\n heapq.heappush(pq, (node.val, count, node))\n while len(pq) > 0:\n currMin = heapq.heappop(pq)\n curr.next = currMin[2]\n curr = curr.next\n if curr.next:\n count += 1\n heapq.heappush(pq, (curr.next.val, count, curr.next))\n return dummy.next\n\n\n''' \nCSV parser\n'''\n\n\ndef parse(lines):\n result = []\n currStr = \"\"\n isQuoted = False\n i = 0\n while i < len(lines):\n c = lines[i]\n if isQuoted == False:\n if c == '\"':\n isQuoted = True\n elif c == ',':\n result.append(currStr)\n currStr = \"\"\n else:\n currStr += c\n else:\n if c == '\"':\n if i < len(lines)-1 and lines[i+1] == '\"':\n currStr += '\"'\n i += 1\n else:\n isQuoted = False\n else:\n currStr += c\n i += 1\n\n if currStr != \"\":\n result.append(currStr)\n return \"|\".join(result)\n\n\ntest = 'Alexandra Alex,\"Alexandra Alex,\",\"Alexandra \"\"Alex\"\"\",\"\"\"Alexandra Alex\"\"\",\"Alexandra \"\"Alex\"\"\",\"\"\"Alexandra \"\"Alex\"\"\"\"\"'\ntest1 = \"John,Smith,john.smith@gmail.com,Los Angeles,1\"\ntest2 = \"\\\"Alexandra \\\"\\\"Alex\\\"\\\"\\\",Menendez,alex.menendez@gmail.com,Miami,1\"\n\nprint(parse(test))\nprint(parse(test1))\nprint(parse(test2))\n","sub_path":"费尔.py","file_name":"费尔.py","file_ext":"py","file_size_in_byte":10618,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"298873241","text":"\"\"\"\n.. See the NOTICE file distributed with this work for additional information\n regarding copyright ownership.\n\n Licensed under the Apache License, Version 2.0 (the \"License\");\n you may not use this file except in compliance with the License.\n You may obtain a copy of the License at\n\n http://www.apache.org/licenses/LICENSE-2.0\n\n Unless required by applicable law or agreed to in writing, software\n distributed under the License is distributed on an \"AS IS\" BASIS,\n WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n See the License for the specific language governing permissions and\n limitations under the License.\n\"\"\"\n\nfrom flask import Blueprint, jsonify, make_response, abort\nfrom flask import current_app as app\nfrom flask_restful import Resource, Api, reqparse\n\nalt_assemblies_bp = Blueprint('alternative_assemblies', __name__)\napi = Api(alt_assemblies_bp)\n\n\nclass AltAssemblies(Resource):\n def get(self, **kwargs):\n\n parser = reqparse.RequestParser(bundle_errors=True)\n parser.add_argument('genome_id', type=str, required=True, help=\"Missing genome_id param in the request.\", location='args')\n self.args = parser.parse_args()\n\n genome_key = app.genome_store.check_if_genome_exists('genome_id', self.args.genome_id)\n\n if genome_key is None:\n return abort(400, {'error': 'Invalid Genome ID'})\n\n genome = app.genome_store.get_genome(genome_key)\n\n if genome['alternative_assemblies'] is None:\n return make_response(jsonify([]), 200)\n\n alt_assemblies_response = {}\n for alt_assembly_genome_id in genome['alternative_assemblies']:\n alt_genome_key = app.genome_store.check_if_genome_exists('genome_id', alt_assembly_genome_id)\n\n # Check in case there is a corrupt alternative genome id loaded into genome store from configs\n if alt_genome_key is not None:\n alt_genome = app.genome_store.get_genome(alt_genome_key)\n alt_assemblies_response = self._prepare_response(alt_assemblies_response, alt_genome)\n\n\n return make_response(jsonify(alt_assemblies_response), 200)\n\n\n\n def _prepare_response(self, alt_assemblies_response, alt_genome):\n\n alt_assemblies_info = dict(\n assembly_name=alt_genome['assembly_name'],\n genome_id=alt_genome['genome_id']\n )\n\n alt_assemblies_response.setdefault('alternative_assemblies', []).append(alt_assemblies_info)\n\n return alt_assemblies_response\n\n\napi.add_resource(AltAssemblies, '/')\n","sub_path":"blueprints/alternative_assemblies.py","file_name":"alternative_assemblies.py","file_ext":"py","file_size_in_byte":2576,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"390355495","text":"#!/usr/bin/env python\n\n# Run the drones and queen for local testing.\n# Call this with four command line arguments:\n# 1 hostname/ip\n# 2 port number\n# 3 team id\n# 4 number of drones\n\nimport sys,os\n\nhost_name = sys.argv[1]\nport = sys.argv[2]\nteam_id = sys.argv[3]\nnum_drones = int(sys.argv[4])\n\n# Run drones.\nfor drone_id in range(1,num_drones+1):\n os.system(\"./drone %s %s %s %s &\" % (host_name,port,team_id,drone_id))\n\n# Run queen.\nos.system(\"./queen %s %s %s\" % (host_name,port,team_id))\n","sub_path":"2009/bagthebanner/wrapper.py","file_name":"wrapper.py","file_ext":"py","file_size_in_byte":491,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"89368796","text":"# importing modules\nfrom flask import Flask, url_for, request, g, render_template, abort \nfrom flask import send_from_directory, redirect, session, send_file, flash\nfrom pymyadmin.globs import __random__, generate_auth, __auth__, __dict__\nimport pymysql\nimport os\nimport pymyadmin.actions as action\n\nserver = Flask(__name__.split(\".\")[0])\nserver.secret_key = os.urandom(__random__)\n\n# ensure responses aren't cached\nif server.config[\"DEBUG\"]:\n @server.after_request\n def after_request(response):\n response.headers[\"Cache-Control\"] = \"no-cache,no-store,must-validate\"\n response.headers[\"Expires\"] = 0\n response.headers[\"Pragma\"] = \"no-cache\"\n return response\n\n# endpoint to redirect to its desired location\n@server.route(\"/\")\ndef decide():\n if \"auth\" in session and \"logged\" in session:\n return redirect(\"/{}/\".format(__auth__))\n return redirect(\"/login\")\n\n# login and auth setup\n@server.route(\"/login\", methods=[\"GET\", \"POST\"]) # accepts post and get method\ndef login():\n # local variables\n conn = None\n \n # checking if user is still logged\n if \"auth\" in session and \"logged\" in session:\n return redirect(\"/{}/\".format(__auth__))\n\n # actions to be performed on form submit\n if request.method == \"POST\":\n # fetching form data\n host = request.form['hostname']\n user = request.form['username']\n paas = request.form['password']\n \n # validating form inputs\n if host == \"\" or user == \"\" or paas == \"\" :\n return render_template(\"login.html\", msg_type=\"error\", msg=\"All feilds are mandatory\")\n else:\n # trying to connecti to mysql server\n try:\n conn = pymysql.connect(host=host, password=paas, user=user)\n except pymysql.err.InternalError as e: # db not found error\n return render_template(\"login.html\", msg_type=\"error\", msg=e.args[1])\n except pymysql.err.OperationalError as e: # login credential error\n return render_template(\"login.html\", msg_type=\"error\", msg=e.args[1])\n \n # building data carriers\n __dict__['user'] = user\n __dict__['conn'] = conn\n __dict__['curr'] = conn.cursor()\n session['auth'] = __auth__\n session['logged'] = True\n return redirect(\"/{}/\".format(__auth__)) # redirecting to admin panel\n\n # on GET method show the login page \n return render_template(\"login.html\")\n\n# dynamic endpint for endpoint\n@server.route(\"/{}/\".format(__auth__))\n@server.route(\"/{}/home\".format(__auth__))\ndef admin():\n return render_template(\"admin.html\", tbls=action.tables(__dict__['curr']))\n\n\n# logout endpoint\n@server.route(\"/logout\")\ndef logout():\n if \"user\" not in __dict__: # checking if user to be logged out\n return redirect(\"/login\") # redirecting to login if user is already logged out\n \n # destructing data carriers\n usr = __dict__.pop('user')\n __dict__['conn'].close() # closing mysql connection\n __dict__.pop('conn')\n __dict__.pop('curr')\n\n # loging out user\n session.pop(\"auth\", None)\n session.pop(\"logged\", None)\n\n # viewing logout pannel\n return render_template(\"logout.html\", user=usr)\n","sub_path":"pymyadmin/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3273,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"509951644","text":"from flask import Flask, render_template, request, redirect\nfrom flask_sqlalchemy import SQLAlchemy\nimport pandas as pd\nimport sqlite3\nimport smtplib\n\n\napp = Flask(__name__)\napp.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///books.db'\ndb = SQLAlchemy(app)\n\nclass Books(db.Model):\n id = db.Column(db.Integer, primary_key=True)\n name = db.Column(db.String(50), nullable=False)\n author = db.Column(db.String(30), nullable=False)\n own = db.Column(db.String(3), nullable=False)\n description = db.Column(db.String(70), nullable=False)\n\n def __repr__(self):\n return '' % self.id\n\n\n\n\n@app.route('/')\ndef hello_world():\n return render_template('home.html')\n\n@app.route('/facts')\ndef facts():\n return render_template('facts.html')\n\n@app.route('/list', methods=['POST', 'GET'])\ndef list():\n x = 0\n yes = ''\n no = ''\n conn = sqlite3.connect('books.db')\n df = pd.read_sql_query('SELECT * FROM Books', conn)\n if request.method == 'POST':\n x = request.form['name']\n\n for i in df.name:\n if i == x:\n yes = df[df.name == x].reset_index()\n break\n\n if i != x:\n no = 'Book not found'\n\n x = 0\n for i in df.own:\n if i == 'yes':\n x += 1\n elif i == 'Yes':\n x += 1\n else:\n pass\n\n message = 'Your book has been added to your list'\n if request.method == 'POST':\n record = Books(name = request.form['name'], author = request.form['author'], own = request.form['own'], description = request.form['description'])\n\n\n try:\n db.session.add(record)\n db.session.commit()\n return redirect('/list')\n except:\n return \"There was an error adding your record\"\n\n else:\n p = Books.query.order_by(Books.author)\n return render_template('list.html', p=p, message=message, x=x, yes=yes, no=no)\n\n\n@app.route('/search', methods=['POST', 'GET'])\ndef search():\n conn = sqlite3.connect('books.db')\n df = pd.read_sql_query('SELECT * FROM Books', conn)\n descrip = request.form['name2']\n\n l = df[df.name == descrip].description\n\n\n return render_template('list.html', descrip=descrip, l=l)\n\n\n@app.route('/recommendation', methods=['POST', 'GET'])\ndef recommendation():\n r_email = request.form.get('r_email')\n r_recommendation = request.form.get('r_recommendation')\n r_email2 = 'projects.creativity.growth@gmail.com'\n\n message = \"We have received your recommendation. It will be taken into consideration shortly...\"\n server = smtplib.SMTP(\"smtp.gmail.com\", 587)\n server.starttls()\n server.login(\"projects.creativity.growth@gmail.com\", 'juanpablo14')\n server.sendmail(\"projects.creativity.growth@gmail.com\", r_email, message)\n server.sendmail('projects.creativity.growth@gmail.com', r_email2, r_recommendation)\n\n return render_template('facts.html')\n\n\n@app.route('/delete/', methods=['POST', 'GET'])\ndef delete(name):\n record_to_delete = Books.query.get_or_404(name)\n\n try:\n db.session.delete(record_to_delete)\n db.session.commit()\n return redirect('/list')\n except:\n return \"The record could not be deleted\"\n","sub_path":"run.py","file_name":"run.py","file_ext":"py","file_size_in_byte":3200,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"34519763","text":"# -*- coding: utf-8 -*-\n\"\"\"\n\tLongest Common Prefix\n\n\tWrite a function to find the longest common prefix string amongst an array of strings.\n\n\thttps://leetcode.com/problems/longest-common-prefix/description/\n\n\t-- mzellhuber February 10, 2018\n\n (c) 2018 Melissa Zellhuber, All Rights Reserved.\n\"\"\"\nstrs = [\"geeksforgeeks\", \"geeks\", \"geek\", \"geezer\"]\n\ndef longestCommonPrefix(strs):\n \"\"\"\n :type strs: List[str]\n :rtype: str\n \"\"\"\n if not strs:\n return ''\n res = ''\n for i in xrange(len(strs[0])):\n for j in xrange(1, len(strs)):\n if i >= len(strs[j]) or strs[j][i] != strs[0][i]:\n return res\n res += strs[0][i]\n return res\n\nprint(longestCommonPrefix(strs))","sub_path":"LeetCode/Longest Common Prefix.py","file_name":"Longest Common Prefix.py","file_ext":"py","file_size_in_byte":777,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"557099508","text":"from math import sqrt\n\n# quadratic equation formula:\n# ax^2 + bx + c = 0\ndef solveQuadraticEquation(a, b, c):\n if(a == 0):\n # the equation returns imaginary root and infinity\n return(\"This equation has no solution\")\n\n discriminant = (b*b) - (4*a*c)\n\n if(discriminant < 0):\n # the equation returns imaginary roots\n return(\"This equation has no solution\")\n\n rootOne = (-b + sqrt(discriminant)) / (2 * a)\n rootTwo = (-b - sqrt(discriminant)) / (2 * a)\n print(\"the roots of the equation are:\")\n return(rootOne, rootTwo)\n\n\nif __name__ == \"__main__\":\n print(\"please enter a\")\n a = int(raw_input())\n print(\"please enter b\")\n b = int(raw_input())\n print(\"please enter c\")\n c = int(raw_input())\n print(solveQuadraticEquation(a, b, c))\n","sub_path":"quadratic-equation-haalsa/quadratic-equation-solver.py","file_name":"quadratic-equation-solver.py","file_ext":"py","file_size_in_byte":796,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"586335567","text":"from time import sleep\nfrom functools import wraps\nfrom traceback import print_exc\n\nimport PIL\n\nfrom helpers import setup_logger\nfrom number_input import IntegerAdjustInput\nfrom utils import to_be_foreground\nfrom base_ui import BaseUIElement, internal_callback_in_background\n\nlogger = setup_logger(__name__, \"info\")\n\n\nclass RefresherExitException(Exception):\n pass\n\n\nclass Refresher(BaseUIElement):\n \"\"\"\n A Refresher allows you to update the screen on a regular interval.\n All you need is to provide a function that'll return the text/image you want to display;\n that function will then be called with the desired frequency and the display\n will be updated with whatever it returns.\n \"\"\"\n\n def __init__(self, refresh_function, i, o, refresh_interval=1, keymap=None, name=\"Refresher\", **kwargs):\n \"\"\"Initialises the Refresher object.\n\n Args:\n\n * ``refresh_function``: a function which returns data to be displayed on the screen upon being called, in the format accepted by ``screen.display_data()`` or ``screen.display_image()``. To be exact, supported return values are:\n\n * Tuples and lists - are converted to lists and passed to ``display_data()``\n * Strings - are converted to a single-element list and passed to ``display_data()``\n * `PIL.Image` objects - are passed to ``display_image()``\n\n * ``i``, ``o``: input&output device objects\n\n Kwargs:\n\n * ``refresh_interval``: Time between display refreshes (and, accordingly, ``refresh_function`` calls).\n * ``keymap``: Keymap entries you want to set while Refresher is active.\n * By default, KEY_LEFT deactivates the Refresher, if you want to override it, make sure that user can still exit the Refresher.\n * ``name``: Refresher name which can be used internally and for debugging.\n\n \"\"\"\n self.custom_keymap = keymap if keymap else {}\n BaseUIElement.__init__(self, i, o, name, input_necessary=False, **kwargs)\n self.set_refresh_interval(refresh_interval)\n self.set_refresh_function(refresh_function)\n self.calculate_intervals()\n\n @property\n def is_active(self):\n return self.in_background\n\n def pause(self):\n \"\"\"\n Pauses the refresher, not allowing it to print anything on the screen\n while it's paused.\n \"\"\"\n self.in_foreground = False\n\n def resume(self):\n \"\"\"\n Resumes the refresher after it's been paused, allowing it to continue\n printing things on the screen. Refreshes the screen when it's called.\n \"\"\"\n if not self.in_foreground:\n self.to_foreground()\n\n def background_if_inactive(self):\n \"\"\"\n If the UI element hasn't been launched yet, launches it in background\n and waits until it's fully running. Otherwise, resumes the UI element.\n \"\"\"\n if not self.is_active:\n self.run_in_background()\n self.wait_for_active()\n else:\n self.resume()\n\n def wait_for_active(self, timeout=100):\n \"\"\"\n If the UI element hasn't been launched yet, launches it in background\n and waits until it's fully running.\n \"\"\"\n counter = 0\n while not self.is_active:\n sleep(0.1)\n counter += 1\n if counter == timeout:\n raise ValueError(\"Waiting for {} to be active - never became active!\".format(self.name))\n\n def set_refresh_interval(self, new_interval):\n \"\"\"Allows setting Refresher's refresh intervals after it's been initialized\"\"\"\n #interval for checking the in_background property in the activate()\n #when refresh_interval is small enough, is the same as refresh_interval\n if new_interval == 0:\n raise ValueError(\"Refresher refresh_interval can't be 0 ({})\".format(self.name))\n self.refresh_interval = new_interval\n self.sleep_time = 0.1 if new_interval > 0.1 else new_interval\n self.calculate_intervals()\n\n def set_refresh_function(self, refresh_function):\n if isinstance(refresh_function, RefresherView):\n refresh_function.init(self.o)\n self.refresh_function = refresh_function\n\n def calculate_intervals(self):\n \"\"\"Calculates the sleep intervals of the refresher, so that no matter the\n ``refresh_interval``, the refresher is responsive. Also, sets the counter to zero.\"\"\"\n #in_background of the refresher needs to be checked approx. each 0.1 second,\n #since users expect the refresher to exit almost instantly\n iterations_before_refresh = self.refresh_interval/self.sleep_time\n if iterations_before_refresh < 1:\n logger.warning(\"{}: self.refresh_interval is smaller than self.sleep_time!\".format(self.name))\n #Failsafe\n self.iterations_before_refresh = 1\n else:\n self.iterations_before_refresh = int(iterations_before_refresh)\n self._counter = 0\n\n def idle_loop(self):\n if self.in_foreground:\n if self._counter == self.iterations_before_refresh:\n self._counter = 0\n if self._counter == 0:\n self.refresh()\n self._counter += 1\n sleep(self.sleep_time)\n\n @internal_callback_in_background\n def change_interval(self):\n \"\"\"\n A helper function to adjust the Refresher's refresh interval while it's running\n \"\"\"\n new_interval = IntegerAdjustInput(self.refresh_interval, self.i, self.o, message=\"Refresh interval:\").activate()\n if new_interval is not None:\n self.set_refresh_interval(new_interval)\n\n def set_keymap(self, keymap):\n keymap.update(self.custom_keymap)\n BaseUIElement.set_keymap(self, keymap)\n\n def generate_keymap(self):\n return {}\n\n def process_callback(self, func):\n \"\"\"\n Decorates a function so that during its execution the UI element stops\n being in foreground. Is typically used as a wrapper for a callback from\n input event processing thread. After callback's execution is finished,\n sets the keymap again and refreshes the UI element.\n \"\"\"\n # This function is copied from base_ui.py - the only difference is\n # the RefresherExitException handling. TODO: think of a prettier way\n # to make it work.\n @wraps(func)\n def wrapper(*args, **kwargs):\n self.to_background()\n self.to_background()\n e = None\n try:\n func(*args, **kwargs)\n except RefresherExitException:\n self.deactivate()\n except Exception as e:\n print_exc()\n logger.debug(\"{}: executed wrapped function: {}\".format(self.name, func.__name__))\n if self.in_background:\n self.to_foreground()\n if e:\n raise e\n return wrapper\n\n @to_be_foreground\n def refresh(self):\n logger.debug(\"{}: refreshed data on display\".format(self.name))\n try:\n data_to_display = self.refresh_function()\n except RefresherExitException:\n logger.info(\"{}: received exit exception, deactivating\".format(self.name))\n self.deactivate()\n return\n if isinstance(data_to_display, basestring):\n #Passed a string, not a list.\n #Let's be user-friendly and wrap it in a list!\n data_to_display = [data_to_display]\n elif isinstance(data_to_display, tuple):\n #Passed a tuple. Let's convert it into a list!\n data_to_display = list(data_to_display)\n elif isinstance(data_to_display, PIL.Image.Image):\n if \"b&w\" not in self.o.type:\n raise ValueError(\"The screen doesn't support showing images!\")\n self.o.display_image(data_to_display)\n return\n elif not isinstance(data_to_display, list):\n raise ValueError(\"refresh_function returned an unsupported type: {}!\".format(type(data_to_display)))\n self.o.display_data(*data_to_display)\n\n\nclass RefresherView(object):\n def __init__(self, text_callback, monochrome_callback, color_callback=None):\n self.text_callback = text_callback\n self.monochrome_callback = monochrome_callback\n self.color_callback = color_callback\n\n def init(self, o):\n if \"color\" in o.type:\n self.callback = self.color_callback if self.color_callback else self.monochrome_callback\n elif \"b&w\" in o.type:\n self.callback = self.monochrome_callback\n else:\n self.callback = self.text_callback\n\n def __call__(self, *args, **kwargs):\n return self.callback(*args, **kwargs)\n","sub_path":"ui/refresher.py","file_name":"refresher.py","file_ext":"py","file_size_in_byte":8824,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"392951118","text":"# defining a class\nclass ShippingContainer:\n\n # defining CLASS ATTRIBUTES\n next_serial = 1\n\n # static method wih static method decorator (TAKES NO SELF ARGUMENT)\n @staticmethod\n def _get_next_serial():\n result = ShippingContainer.next_serial\n ShippingContainer.next_serial += 1\n return result\n\n # initializing (constructor) (INSTANCE ATTRIBUTES)\n def __init__(self, owner_code, contents):\n self.owner_code = owner_code\n self.contents = contents\n self.serial = ShippingContainer._get_next_serial()\n\n# calling our class and making two instances of it\ncontainer_one = ShippingContainer(\"A123\",\"lots of tricks\")\ncontainer_two = ShippingContainer(\"B123\",\"lots of push ups\")\n\n# creating an empty list to ppend items to\ncontainer_list = []\n\n# appending containers to the list\ncontainer_list.append(container_one)\ncontainer_list.append(container_two)\n\n# displaying information about current containers\nprint(\"There are currently {ammount} containers shipped.\".format(ammount=len(container_list)))\nprint(\"The containers are:\")\nfor container in container_list:\n print(\"{code} with {contents} inside and serial number of {serial}\".format(code = container.owner_code,\n contents = container.contents,\n serial = container.serial))\n\n","sub_path":"Python notes and examples/Code examples for properties and class methods/test_two.py","file_name":"test_two.py","file_ext":"py","file_size_in_byte":1441,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"198735173","text":"import numpy as np\nimport matplotlib \nimport matplotlib.pyplot as plt\n\n#Check theta-beta-mach relation at point to find max theta angle (avoid detached shock )\n\nM1 = 2.0 \nbeta = np.linspace(0.5*np.pi,0.0,100)\ntheta = np.zeros(100)\ngamma = 1.34\n\nfor i in range(0,100):\n\tbetai = beta[i]\n\ttantheta = 2.0*1.0/np.tan(betai)* ( M1**2*(np.sin(betai))**2 -1.0 ) / ( M1**2*(gamma + np.cos(2.0*betai)) *2.0 )\n\ttheta[i] = np.arctan(tantheta) + np.pi*0.5\n\tprint(betai, theta[i])\n\n#Back to deg\n\nfig1 = plt.figure(figsize=(8,6))\t\nax1 = fig1.add_subplot(111)\n\ntheta = theta * 180.0/np.pi\nbeta = beta * 180.0/np.pi\n\nax1.plot(theta,beta,ls='-',color='black')\nax1.set(xlabel=r'$\\theta$',ylabel=r'$\\beta$')\nax1.plot()\nplt.show()\nexit()\n","sub_path":"nzzle_design/pythontest.py","file_name":"pythontest.py","file_ext":"py","file_size_in_byte":749,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"281192508","text":"from sklearn import metrics\nfrom sklearn.linear_model import LinearRegression\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.metrics import classification_report\nimport seaborn as sns\n\nfrom pre_processing import *\n\nlm = LinearRegression()\n\n\n\"\"\"\nSetting X and y training variables for linear regression model\n\"\"\"\n\nX = train_set.drop(['SalePrice','Id'],axis = 1)\ntest_set.drop(['Id'],axis=1,inplace =True)\ny = np.log(train_set['SalePrice'])\n\n\"\"\"\nSplitting the training data set into training and test data set to get an idea\nof the accuracy of our model\n\"\"\"\nX_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.4, random_state=101)\n\n\"\"\"\nTraining the linear regression model\n\"\"\"\nlm.fit(X_train,y_train)\n#print(lm)\n\"\"\"\nPredicting the survived column for test set\n\"\"\"\npredictions = lm.predict(X_test)\n#print(predictions)\n\n\"\"\"\nCalculating accuracy of the model \n\"\"\"\nprint('MAE:', metrics.mean_absolute_error(y_test, predictions))\nprint('RMSE:', np.sqrt(metrics.mean_squared_error(y_test, predictions)))\nscore = (lm.score(X_test,y_test))\nprint(\"Model score is %g\" %score)\n#print(X_train)\nsns.scatterplot(predictions,y_test,alpha=.75)\n\n#print('\\n',test_set.info())\n#print('\\n',X.info())\n#print(X.columns & test_set.columns)\n\n\"\"\"\nUsing the entire train.csv file as the training dataset to predict survived column in the test.csv file\n\"\"\"\nX_test_final = test_set\nlm_final = LinearRegression()\nlm_final.fit(X,y) #Training the model with the completetraining set\npredictions_final = lm_final.predict(X_test_final)\n\npredictions_final = np.exp(predictions_final)\n#print(predictions_final)\nX_test_final['SalePrice'] = predictions_final\nX_test_final.to_csv('Results.csv',index=False)\n\n\nplt.show()\n","sub_path":"linear_reg.py","file_name":"linear_reg.py","file_ext":"py","file_size_in_byte":1719,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"211417334","text":"class Solution:\n # @param {int[]} nums1 an integer array\n # @param {int[]} nums2 an integer array\n # @return {int[]} an integer array\n def intersection(self, nums1, nums2):\n # Write your code here\n\n nums1.sort()\n nums2.sort()\n i = 0\n j = 0\n ret = []\n while i < len(nums1) and j < len(nums2):\n if nums1[i] == nums2[j]:\n ret.append(nums1[i])\n i += 1\n j += 1\n elif nums1[i] < nums2[j]:\n i += 1\n else:\n j += 1\n return ret\n \"\"\"\n myTable1 = {}\n for i in range(len(nums1)):\n if nums1[i] not in myTable1:\n myTable1[nums1[i]] = 1\n else:\n myTable1[nums1[i]] += 1\n ret = []\n for j in range(len(nums2)):\n if nums2[j] in myTable1 and myTable1[nums2[j]]>0:\n myTable1[nums2[j]] -= 1\n ret.append(nums2[j])\n return ret\n \"\"\"","sub_path":"array/548.intersectionTwoArray2.py","file_name":"548.intersectionTwoArray2.py","file_ext":"py","file_size_in_byte":1024,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"477152104","text":"import uvicorn\nfrom fastapi import FastAPI\nfrom pydantic import BaseModel\nfrom ml_utils import load_model, predict\nfrom typing import List\nimport datetime\n\n# defining the main app\napp = FastAPI(title=\"House Price Predictor\", docs_url=\"/\")\n\n# calling the load_model during startup.\n# this will train the model and keep it loaded for prediction.\napp.add_event_handler(\"startup\", load_model)\n\n# class which is expected in the payload\nclass QueryIn(BaseModel):\n MSSubClass: float\n MSZoning: str\n LotFrontage: float\n LotArea: float\n Street: str\n Alley: str\n LotShape: str\n LandContour: str\n Utilities: str\n LotConfig: str\n LandSlope: str\n Neighborhood: str\n Condition1: str\n Condition2: str\n BldgType: str\n HouseStyle: str\n OverallQual: float\n OverallCond: float\n YearBuilt: float\n YearRemodAdd: float\n RoofStyle: str\n RoofMatl: str\n Exterior1st: str\n Exterior2nd: str\n MasVnrType: str\n MasVnrArea: float\n ExterQual: str\n ExterCond: str\n Foundation: str\n BsmtQual: str\n BsmtCond: str\n BsmtExposure: str\n BsmtFinType1: str\n BsmtFinSF1: float\n BsmtFinType2: str\n BsmtFinSF2: float\n BsmtUnfSF: float\n TotalBsmtSF: float\n Heating: str\n HeatingQC: str\n CentralAir: str\n Electrical: str\n FirstFlrSF: float\n SecondFlrSF: float\n LowQualFinSF: float\n GrLivArea: float\n BsmtFullBath: float\n BsmtHalfBath: float\n FullBath: float\n HalfBath: float\n BedroomAbvGr: float\n KitchenAbvGr: float\n KitchenQual: str\n TotRmsAbvGrd: float\n Functional: str\n Fireplaces: float\n FireplaceQu: str\n GarageType: str\n GarageYrBlt: float\n GarageFinish: str\n GarageCars: float\n GarageArea: float\n GarageQual: str\n GarageCond: str\n PavedDrive: str\n WoodDeckSF: float\n OpenPorchSF: float\n EnclosedPorch: float\n ThreeSsnPorch: float\n ScreenPorch: float\n PoolArea: float\n PoolQC: str\n Fence: str\n MiscFeature: str\n MiscVal: float\n MoSold: float\n YrSold: float\n SaleType: str\n SaleCondition: str\n\n# class which is returned in the response\nclass QueryOut(BaseModel):\n saleprice: str\n timestamp: str\n\n# Route definitions\n@app.get(\"/ping\")\n# Healthcheck route to ensure that the API is up and running\ndef ping():\n ct = datetime.datetime.now().strftime('%d-%B-%y %H:%M')\n return {\"ping\": \"pong\", \"timestamp\": ct}\n\n\n@app.post(\"/predict_house_price\", response_model=QueryOut, status_code=200)\n# Route to do the prediction using the ML model defined.\n# Payload: QueryIn containing the parameters\n# Response: QueryOut containing the saleprice predicted (200)\ndef predict_house_price(query_data: QueryIn):\n ct = datetime.datetime.now().strftime('%d-%B-%y %H:%M')\n output = {\"saleprice\": predict(query_data), \"timestamp\": ct}\n return output\n\n\n# Main function to start the app when main.py is called\nif __name__ == \"__main__\":\n # Uvicorn is used to run the server and listen for incoming API requests on 0.0.0.0:8887\n uvicorn.run(\"main:app\", host=\"0.0.0.0\", port=8887, reload=True)\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3093,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"485770981","text":"import numpy as np\r\nimport torch\r\nfrom torch import nn\r\n\r\n\r\nclass LSTMEncoder(nn.Module):\r\n def __init__(self, n_times, d_in, d_out, activation_type,\r\n train_bn_scaling, noise_level, use_cuda):\r\n super(LSTMEncoder, self).__init__()\r\n self.n_times = n_times\r\n self.d_in = d_in\r\n self.d_out = d_out\r\n self.activation_type = activation_type\r\n self.train_bn_scaling = train_bn_scaling\r\n self.noise_level = noise_level\r\n self.use_cuda = use_cuda\r\n \r\n # LSTM MODULE\r\n self.lstm = nn.LSTM(d_in, d_out, batch_first=True)\r\n if self.use_cuda:\r\n try:\r\n self.lstm.cuda()\r\n except:\r\n self.lstm.cuda()\r\n \r\n # BN MODULE\r\n # clean and noisy tensors flow in separate ways\r\n # gaussian noise will be added after BN in noisy way\r\n # For ReLU, both Gamma and Beta are trained\r\n # For Softmax, both Gamma and Beta are trained\r\n self.bn_clean = nn.BatchNorm1d(n_times, affine=False)\r\n self.bn_noise = nn.BatchNorm1d(n_times, affine=False)\r\n \r\n if self.use_cuda:\r\n self.bn_beta = torch.zeros(d_out).cuda()\r\n else:\r\n self.bn_beta = torch.zeros(d_out)\r\n \r\n if self.train_bn_scaling:\r\n if self.use_cuda:\r\n self.bn_gamma = torch.ones(d_out).cuda()\r\n else:\r\n self.bn_gamma = torch.ones(d_out)\r\n\r\n # ACTIVATION MODULE\r\n if activation_type == 'relu':\r\n self.activation = torch.nn.ReLU()\r\n elif activation_type == 'leakyrelu':\r\n self.activation = torch.nn.LeakyReLU()\r\n elif activation_type == 'softmax':\r\n self.activation = torch.nn.Softmax(dim=-1)\r\n else:\r\n raise ValueError(\"invalid Acitvation type\")\r\n\r\n # BUFFER\r\n self.buffer_z_pre = None\r\n self.buffer_z = None\r\n self.buffer_tilde_z = None\r\n\r\n # implement Gamma and Beta after BN\r\n def bn_gamma_beta(self, x):\r\n if self.train_bn_scaling:\r\n h = self.bn_gamma * (x + self.bn_beta)\r\n else:\r\n h = x + self.bn_beta\r\n return h\r\n\r\n # clean tensor way\r\n def forward_clean(self, h):\r\n z_pre = self.lstm(h)[0]\r\n self.buffer_z_pre = z_pre.detach().clone()\r\n z = self.bn_clean(z_pre)\r\n self.buffer_z = z.detach().clone()\r\n z_gb = self.bn_gamma_beta(z)\r\n h = self.activation(z_gb)\r\n return h\r\n\r\n # noisy tensor way\r\n def forward_noise(self, tilde_h):\r\n z_pre = self.lstm(tilde_h)[0]\r\n z = self.bn_noise(z_pre)\r\n noise = np.random.normal(loc=0.0, scale=self.noise_level, size=z.size())\r\n if self.use_cuda:\r\n noise = torch.FloatTensor(noise).cuda()\r\n else:\r\n noise = torch.FloatTensor(noise)\r\n tilde_z = z + noise\r\n self.buffer_tilde_z = tilde_z\r\n z = self.bn_gamma_beta(tilde_z)\r\n h = self.activation(z)\r\n return h\r\n\r\n\r\nclass LSTMStackedEncoders(nn.Module):\r\n def __init__(self, n_times, d_in, d_encoders, activation_types,\r\n train_bn_scalings, noise_std, use_cuda):\r\n super(LSTMStackedEncoders, self).__init__()\r\n self.noise_std = noise_std\r\n self.use_cuda = use_cuda\r\n\r\n # BOTTOM PART\r\n self.bn_bottom = nn.BatchNorm1d(n_times, affine=False)\r\n \r\n # ENCODERS\r\n self.encoders_ref = []\r\n self.encoders = torch.nn.Sequential()\r\n n_encoders = len(d_encoders)\r\n for i in range(n_encoders):\r\n if i == 0:\r\n d_input = d_in\r\n else:\r\n d_input = d_encoders[i - 1]\r\n d_output = d_encoders[i]\r\n activation = activation_types[i]\r\n train_bn_scaling = train_bn_scalings[i]\r\n encoder_ref = \"encoder_\" + str(i)\r\n encoder = LSTMEncoder(n_times, d_input, d_output, activation, \r\n train_bn_scaling, noise_std, use_cuda)\r\n self.encoders_ref.append(encoder_ref)\r\n self.encoders.add_module(encoder_ref, encoder)\r\n \r\n # BUFFER\r\n self.buffer_tilde_z_bottom = None\r\n self.buffer_z_pre_bottom = None\r\n self.buffer_z_bottom = None\r\n\r\n # clean tensor way\r\n def forward_clean(self, x):\r\n h = x\r\n self.buffer_x_input = x.clone()\r\n self.buffer_z_pre_bottom = h.clone()\r\n self.buffer_z_bottom = self.bn_bottom(h)\r\n for e_ref in self.encoders_ref:\r\n encoder = getattr(self.encoders, e_ref)\r\n h = encoder.forward_clean(h)\r\n return h\r\n\r\n # noisy tensor way\r\n def forward_noise(self, x):\r\n noise = np.random.normal(loc=0.0, scale=self.noise_std, size=x.size())\r\n if self.use_cuda:\r\n noise = torch.FloatTensor(noise).cuda()\r\n else:\r\n noise = torch.FloatTensor(noise)\r\n h = x + noise\r\n self.buffer_tilde_z_bottom = h.clone()\r\n for e_ref in self.encoders_ref:\r\n encoder = getattr(self.encoders, e_ref)\r\n h = encoder.forward_noise(h)\r\n return h\r\n\r\n def get_encoders_tilde_z(self, reverse=True):\r\n tilde_z_layers = []\r\n tilde_z_layers.append(self.buffer_tilde_z_bottom)\r\n for e_ref in self.encoders_ref:\r\n encoder = getattr(self.encoders, e_ref)\r\n tilde_z = encoder.buffer_tilde_z.clone()\r\n tilde_z_layers.append(tilde_z)\r\n if reverse:\r\n tilde_z_layers.reverse()\r\n return tilde_z_layers\r\n\r\n def get_encoders_z_pre(self, reverse=True):\r\n z_pre_layers = []\r\n z_pre_layers.append(self.buffer_z_pre_bottom)\r\n for e_ref in self.encoders_ref:\r\n encoder = getattr(self.encoders, e_ref)\r\n z_pre = encoder.buffer_z_pre.clone()\r\n z_pre_layers.append(z_pre)\r\n if reverse:\r\n z_pre_layers.reverse()\r\n return z_pre_layers\r\n\r\n def get_encoders_z(self, reverse=True):\r\n z_layers = []\r\n z_layers.append(self.buffer_x_input)\r\n for e_ref in self.encoders_ref:\r\n encoder = getattr(self.encoders, e_ref)\r\n z = encoder.buffer_z.clone()\r\n z_layers.append(z)\r\n if reverse:\r\n z_layers.reverse()\r\n return z_layers\r\n","sub_path":"lstm_ladder/lstm_encoder.py","file_name":"lstm_encoder.py","file_ext":"py","file_size_in_byte":6397,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"308310012","text":"# Title: wechat push CVE-2020\r\n# Date: 2020-5-9\r\n# Exploit Author: weixiao9188\r\n# Version: 4.0\r\n# Tested on: Linux,windows\r\n# cd /root/sh/git/ && nohup python3 /root/sh/git/git.py &\r\n# coding:UTF-8\r\nimport requests\r\nimport json\r\nimport time\r\nimport os\r\nimport pandas as pd\r\ntime_sleep = 60 #每隔20秒爬取一次\r\nwhile(True):\r\n headers1 = {\r\n \"User-Agent\": \"Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/70.0.3538.25 Safari/537.36 Core/1.70.3741.400 QQBrowser/10.5.3863.400\"}\r\n #判断文件是否存在\r\n datas = []\r\n response1=None\r\n response2=None\r\n if os.path.exists(\"olddata.csv\"):\r\n #如果文件存在则每次爬取10个\r\n df = pd.read_csv(\"olddata.csv\", header=None)\r\n datas = df.where(df.notnull(),None).values.tolist()#将提取出来的数据中的nan转化为None\r\n requests.packages.urllib3.disable_warnings()\r\n response1 = requests.get(url=\"https://api.github.com/search/repositories?q=jumpserver&sort=updated&per_page=10\",headers=headers1,verify=False)\r\n response2 = requests.get(url=\"https://api.github.com/search/repositories?q=cms&ssort=updated&per_page=10\",headers=headers1,verify=False)\r\n\r\n else:\r\n #不存在爬取全部\r\n datas = []\r\n requests.packages.urllib3.disable_warnings()\r\n response1 = requests.get(url=\"https://api.github.com/search/repositories?q=红队行动框架&sort=updated&order=desc\",headers=headers1,verify=False)\r\n response2 = requests.get(url=\"https://api.github.com/search/repositories?q=tp5.0.24&ssort=updated&order=desc\",headers=headers1,verify=False)\r\n\r\n data1 = json.loads(response1.text)\r\n data2 = json.loads(response2.text)\r\n for j in [data1[\"items\"],data2[\"items\"]]:\r\n for i in j:\r\n s = {\"name\":i['name'],\"html\":i['html_url'],\"description\":i['description']}\r\n s1 =[i['name'],i['html_url'],i['description']]\r\n if s1 not in datas:\r\n #print(s1)\r\n #print(datas)\r\n params = {\r\n \"text\":s[\"name\"],\r\n \"desp\":\" 链接:\"+str(s[\"html\"])+\"\\n简介\"+str(s[\"description\"])\r\n }\r\n print(\"当前推送为\"+str(s)+\"\\n\")\r\n #print(params)\r\n requests.packages.urllib3.disable_warnings()\r\n requests.get(\".send\",params=params,headers=headers1,timeout=10,verify=False)\r\n #time.sleep(1)#以防推送太猛\r\n print(\"推送完成!\\n\")\r\n datas.append(s1)\r\n else:\r\n pass\r\n print(\"数据已在!\")\r\n pd.DataFrame(datas).to_csv(\"olddata.csv\",header=None,index=None)\r\n time.sleep(time_sleep)\r\n","sub_path":"github监控.py","file_name":"github监控.py","file_ext":"py","file_size_in_byte":2755,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"304312142","text":"#!/bin/env python3\nfrom pyftdi.spi import SpiController\nimport time\nimport struct\nimport random\n \n\n# start controllre and open an spi port mode 3(cpha=1,cpol=1)\nctrl = SpiController()\nctrl.configure('ftdi://ftdi:232h:FTU7EF6B/1')\nspi = ctrl.get_port(cs=0, freq=5e6, mode=3)\n\ni = 1\nwhile True:\n payload = random.randint(0,255) # inclusive\n # write byte to gpio\n spi.exchange([0x01, 0x01, payload], 0, True, True)\n # read byte back:\n response = spi.exchange([0x01, 0x00, 0x00], 1, True, True)\n if response[0] != payload:\n print(\"Error: sent {} but received {}\".format( hex(payload), hex(response[0])))\n if i%100==0:\n print(\"\\rRandom write/read tests: {:d}\".format(i), end=\"\", flush=True)\n i = i+1\n #time.sleep(0.005) # not strictly necessary but it's nice to see the leds blink\n","sub_path":"housekeeping_test_gpio.py","file_name":"housekeeping_test_gpio.py","file_ext":"py","file_size_in_byte":817,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"52574339","text":"#!/usr/bin/env python\n\n# Copyright INRA (Institut National de la Recherche Agronomique)\n# http://www.inra.fr\n# http://urgi.versailles.inra.fr\n#\n# This software is governed by the CeCILL license under French law and\n# abiding by the rules of distribution of free software. You can use, \n# modify and/ or redistribute the software under the terms of the CeCILL\n# license as circulated by CEA, CNRS and INRIA at the following URL\n# \"http://www.cecill.info\". \n#\n# As a counterpart to the access to the source code and rights to copy,\n# modify and redistribute granted by the license, users are provided only\n# with a limited warranty and the software's author, the holder of the\n# economic rights, and the successive licensors have only limited\n# liability. \n#\n# In this respect, the user's attention is drawn to the risks associated\n# with loading, using, modifying and/or developing or reproducing the\n# software by the user in light of its specific status of free software,\n# that may mean that it is complicated to manipulate, and that also\n# therefore means that it is reserved for developers and experienced\n# professionals having in-depth computer knowledge. Users are therefore\n# encouraged to load and test the software's suitability as regards their\n# requirements in conditions enabling the security of their systems and/or \n# data to be ensured and, more generally, to use and operate it in the \n# same conditions as regards security. \n#\n# The fact that you are presently reading this means that you have had\n# knowledge of the CeCILL license and that you accept its terms.\n\nimport os\nimport subprocess\nfrom commons.core.LoggerFactory import LoggerFactory\nfrom commons.core.checker.RepetException import RepetException\nfrom commons.core.checker.CheckerUtils import CheckerUtils\nfrom commons.core.utils.FileUtils import FileUtils\nfrom commons.core.utils.RepetOptionParser import RepetOptionParser\nfrom commons.core.coord.SlidingWindow import SlidingWindow\nfrom commons.core.coord.MergeFromOverlaps import MergeFromOverlaps\nfrom commons.core.seq.BioseqDB import BioseqDB\nfrom commons.core.seq.SequenceModifications import SequenceModifications\nfrom commons.core.seq.SequenceModificationsCollection import SequenceModificationsCollection\n\nLOG_DEPTH = \"repet.tools\"\n\n##Deletions detection based on coverage\n#\nclass GetDeletionsFromCoverage(object):\n \n def __init__(self, fastaFileName = \"\", BAMFileName = \"\", outVCF = \"\", windowsLength = 200, overlap = 1, minSize = 500, thresholdN = 0.1, doClean = False, verbosity = 3):\n self._fastaFileName = fastaFileName\n self._BAMFileName = BAMFileName\n self.setOutVCF(outVCF)\n self._windowLength = windowsLength\n self._overlap = overlap\n self._minSize = minSize\n self._thresholdN = thresholdN\n \n self._thresholdDown = 0.1\n self._thresholdUp = 5\n \n self._coverageFileName = None\n self._dlCov = None\n \n self._doClean = doClean\n self._verbosity = verbosity\n self._log = LoggerFactory.createLogger(\"%s.%s\" % (LOG_DEPTH, self.__class__.__name__), self._verbosity)\n \n def setAttributesFromCmdLine(self):\n usage = \"GetDeletionsFromCoverage.py [options]\"\n description = \"Detect deletions using Depth Of Coverage method (DOC).\"\n epilog = \"\\nExample 1: launch without verbosity.\\n\"\n epilog += \"\\t$ GetDeletionsFromCoverage.py -i genome.fa -b mapping.bam -v 0\\n\"\n parser = RepetOptionParser(description = description, epilog = epilog, usage = usage)\n parser.add_option(\"-i\", \"--fasta\", dest = \"fastaFileName\", action = \"store\", type = \"string\", help = \"input genome file name [compulsory] [format: fasta]\", default = \"\")\n parser.add_option(\"-b\", \"--bam\", dest = \"bamFileName\", action = \"store\", type = \"string\", help = \"input sorted BAM file name [compulsory] [format: sorted BAM]\", default = \"\")\n parser.add_option(\"-o\", \"--out\", dest = \"outVCF\", action = \"store\", type = \"string\", help = \"output VCF file name [optional] [default: .vcf]\", default = \"\")\n parser.add_option(\"-w\", \"--window\", dest = \"windowsLength\", action = \"store\", type = \"int\", help = \"sliding window length [optional] [default: 200]\", default = 200)\n parser.add_option(\"-O\", \"--overlap\", dest = \"overlap\", action = \"store\", type = \"int\", help = \"overlap between sliding windows [optional] [default: 1]\", default = 1)\n parser.add_option(\"-s\", \"--size\", dest = \"minSize\", action = \"store\", type = \"int\", help = \"minimal deletion size to detect [optional] [default: 500]\", default = 500)\n parser.add_option(\"-n\", \"--nratio\", dest = \"thresholdN\", action = \"store\", type = \"float\", help = \"maximum unknown nucleotide coverage allowed [optional] [default: 0.1]\", default = 0.1)\n parser.add_option(\"-c\", \"--clean\", dest = \"doClean\", action = \"store_true\", help = \"clean temporary files [optional] [default: False]\", default = False)\n parser.add_option(\"-v\", \"--verbosity\", dest = \"verbosity\", action = \"store\", type = \"int\", help = \"verbosity [optional] [default: 3]\", default = 3)\n options = parser.parse_args()[0]\n self._setAttributesFromOptions(options)\n \n def _setAttributesFromOptions(self, options):\n self.setFastaFileName(options.fastaFileName)\n self.setBAMFileName(options.bamFileName)\n self.setOutVCF(options.outVCF)\n self.setWindowLength(options.windowsLength)\n self.setOverlap(options.overlap)\n self.setMinSize(options.minSize)\n self.setThresholdN(options.thresholdN)\n self.setDoClean(options.doClean)\n self.setVerbosity(options.verbosity)\n\n def setFastaFileName(self, fastaFileName):\n self._fastaFileName = fastaFileName\n \n def setBAMFileName(self, bamFileName):\n self._BAMFileName = bamFileName\n \n def setOutVCF(self, outVCF):\n if outVCF == \"\" and self._BAMFileName:\n self._outVCF = \"%s.vcf\" % os.path.splitext(os.path.basename(self._BAMFileName))[0]\n else:\n self._outVCF = outVCF\n \n def setWindowLength(self, windowsLength):\n self._windowLength = windowsLength\n \n def setOverlap(self, overlap):\n self._overlap = overlap\n \n def setMinSize(self, minSize):\n self._minSize = minSize\n \n def setThresholdN(self, thresholdN):\n self._thresholdN = thresholdN\n \n def setDoClean(self, doClean):\n self._doClean = doClean\n \n def setVerbosity(self, verbosity):\n self._verbosity = verbosity\n \n def _checkOptions(self):\n if self._fastaFileName != \"\":\n if not FileUtils.isRessourceExists(self._fastaFileName):\n self._logAndRaise(\"ERROR: Genome file: '%s' does not exist!\" % self._fastaFileName)\n else:\n self._logAndRaise(\"ERROR: Missing input fasta file name\")\n \n if self._BAMFileName != \"\":\n if not FileUtils.isRessourceExists(self._BAMFileName):\n self._logAndRaise(\"ERROR: BAM file: '%s' does not exist!\" % self._BAMFileName)\n else:\n self._logAndRaise(\"ERROR: Missing input BAM file name\")\n \n if self._overlap >= self._windowLength:\n self._logAndRaise(\"ERROR: Windows overlaps must be lower than window size\")\n \n if self._thresholdN < 0 or 1 < self._thresholdN :\n self._logAndRaise(\"ERROR: maximal unknown nucleotide ratio ('-n' option) must be in [0;1]\")\n \n if not self._outVCF:\n self.setOutVCF(\"\")\n \n def _logAndRaise(self, errorMsg):\n self._log.error(errorMsg)\n raise RepetException(errorMsg)\n \n # Algorithm far from optimum!\n def detectDeletions(self, iSeqModif):\n lCoverages = self._dlCov.get(iSeqModif.getOriginalHeader())\n genomeSize = len(lCoverages)\n iSlidingWindows = SlidingWindow(self._windowLength, self._overlap)\n meanCoverageInLastWindow = sum(lCoverages[iSlidingWindows.getStart() - 1:iSlidingWindows.getEnd()]) / float(iSlidingWindows.getLength())\n hasLowerCoverage = False\n hasHigherCoverage = False\n start = 1\n end = None\n \n while iSlidingWindows.getEnd() + self._windowLength - self._overlap < genomeSize:\n iSlidingWindows.slideWindowOnce()\n meanCoverageInWindow = sum(lCoverages[iSlidingWindows.getStart() - 1:iSlidingWindows.getEnd()]) / float(iSlidingWindows.getLength())\n \n if meanCoverageInWindow == 0:\n meanCoverageInWindow = 2.2250738585072014e-308\n\n if meanCoverageInLastWindow == 0:\n meanCoverageInLastWindow = 2.2250738585072014e-308\n \n hasLowerCoverage = meanCoverageInWindow / float(meanCoverageInLastWindow) < self._thresholdDown\n hasHigherCoverage = meanCoverageInWindow / float(meanCoverageInLastWindow) > self._thresholdUp\n if hasLowerCoverage:\n start = iSlidingWindows.getMiddle()\n \n if hasHigherCoverage:\n end = iSlidingWindows.getMiddle()\n if end > genomeSize:\n end = genomeSize\n \n iSeqModif.addDeletion(start, end)\n\n meanCoverageInLastWindow = meanCoverageInWindow\n \n def _loadCoverage(self):\n self._coverageFileName = \"%s.coverage\" % os.path.basename(self._BAMFileName)\n \n cmd = \"bedtools genomecov -d -ibam %s | cut -f1,3 > %s\" % (self._BAMFileName, self._coverageFileName)\n process = subprocess.Popen(cmd, shell = True)\n self._log.debug(\"Running : '%s'\" % cmd)\n process.communicate()\n if process.returncode != 0:\n self._logAndRaise(\"ERROR when launching '%s'\" % cmd)\n \n os.system(\"cat %s | cut -f1 | uniq > BAMsequencesNames.txt\" % self._coverageFileName)\n with open(\"BAMsequencesNames.txt\") as f:\n lSeqNames = [line.strip() for line in f.readlines()]\n \n self._dlCov = dict((seqName, []) for seqName in lSeqNames)\n \n self._log.debug(\"Loading coverage in memory\")\n with open(self._coverageFileName) as f:\n for line in f:\n splittedLine = line.split()\n seqName = splittedLine[0]\n baseCov = int(splittedLine[1])\n self._dlCov.get(seqName).append(baseCov)\n \n if self._doClean:\n self._log.warning(\"Temporary files will be cleaned\")\n os.remove(\"BAMsequencesNames.txt\")\n os.remove(self._coverageFileName)\n \n def _loadCoverageSlim(self):\n self._coverageFileName = \"%s.coverage\" % os.path.basename(self._BAMFileName)\n\n cmd = \"bedtools genomecov -bga -ibam %s | grep -w '[0]$'> %s\" % (self._BAMFileName, self._coverageFileName)\n process = subprocess.Popen(cmd, shell = True)\n self._log.debug(\"Running : '%s'\" % cmd)\n process.communicate()\n if process.returncode != 0:\n self._logAndRaise(\"ERROR when launching '%s'\" % cmd)\n \n def detectDeletionsSlim(self, iSeqModif):\n [iSeqModif.addDeletion(int(line.split()[1])+1, int(line.split()[2])) for line in open(self._coverageFileName).readlines() if line.startswith(iSeqModif.getOriginalHeader())]\n \n if self._doClean:\n self._log.warning(\"Temporary files will be cleaned\")\n os.remove(self._coverageFileName)\n\n def run(self):\n LoggerFactory.setLevel(self._log, self._verbosity)\n self._checkOptions()\n if not CheckerUtils.isExecutableInUserPath(\"bedtools\"):\n self._logAndRaise(\"ERROR: 'bedtools' must be in your path\")\n\n self._log.info(\"START GetDeletionsFromCoverage\")\n self._log.debug(\"Fasta file name: %s\" % self._fastaFileName)\n self._log.debug(\"BAM file name: %s\" % self._BAMFileName)\n \n self._log.info(\"Computing base coverage from '%s'\" % self._BAMFileName)\n self._loadCoverage()\n# self._loadCoverageSlim()\n\n iSeqModifCollection = SequenceModificationsCollection()\n\n iBSDB = BioseqDB(self._fastaFileName)\n \n self._log.info(\"Coverage list loaded; starting detection.\")\n for iBS in iBSDB.db:\n iSeqModif = SequenceModifications(iBS.getHeader()) \n \n self._log.debug(\"Current sequence name: '%s'\" % iBS.getHeader())\n \n if iBS.getHeader() in self._dlCov.keys():\n# self.detectDeletionsSlim(iSeqModif)\n self.detectDeletions(iSeqModif)\n else:\n iSeqModif.addDeletion(1, iBS.getLength())\n\n self._log.debug(\"Detection done. Starting results cleaning.\")\n \n self._log.debug(\"Merging overlapping results.\")\n lClusteredRanges = MergeFromOverlaps(iSeqModif.getDeletions()).clusterize()\n \n iCleanSeqModif = SequenceModifications(iBS.getHeader())\n \n for iRange in lClusteredRanges:\n if iRange.getLength() >= self._minSize:\n if iBS.subseq(iRange.getStart(), iRange.getEnd()).propNt(\"N\") < self._thresholdN:\n iCleanSeqModif.addDeletion(iRange.getStart(), iRange.getEnd())\n \n self._log.debug(\"Number of deletions in '%s' before cleaning: %i\" % (iBS.getHeader(), len(iSeqModif.getDeletions())))\n self._log.debug(\"Number of deletions in '%s' after cleaning: %i\" % (iBS.getHeader(), len(iCleanSeqModif.getDeletions())))\n \n iSeqModifCollection.add(iCleanSeqModif)\n \n iSeqModifCollection.sort()\n iSeqModifCollection.writeVCF(self._outVCF, self._fastaFileName, \"GetDeletionsFromCoverage\")\n\n self._log.info(\"END GetDeletionsFromCoverage\")\n\nif __name__ == \"__main__\":\n iGetDeletionsFromCoverage = GetDeletionsFromCoverage()\n iGetDeletionsFromCoverage.setAttributesFromCmdLine()\n iGetDeletionsFromCoverage.run() ","sub_path":"commons/tools/GetDeletionsFromCoverage.py","file_name":"GetDeletionsFromCoverage.py","file_ext":"py","file_size_in_byte":14273,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"366511134","text":"import time\nimport pandas as pd\nimport numpy as np\nimport datetime\n\nCITY_DATA = { 'chicago': 'chicago.csv',\n 'new york': 'new_york_city.csv',\n 'washington': 'washington.csv' }\nmonths = ['january', 'february', 'march', 'april', 'may', 'june']\ndays = ['monday', 'tuesday', 'wednesday', 'thursday', 'friday', 'saturday','sunday' ]\n\ndef get_filters():\n \"\"\"\n Asks user to specify a city, month, and day to analyze.\n Args:\n NA\n\n Returns:\n (str) city - name of the city to analyze\n (str) month - name of the month to filter by, or \"all\" to apply no month filter\n (str) day - name of the day of week to filter by, or \"all\" to apply no day filter\n \"\"\"\n\n print('Hello! Let\\'s explore some US bikeshare data!')\n # get user input for city (chicago, new york city, washington). HINT: Use a while loop to handle invalid inputs\n user_choice1 = str(input('Which city\\'s bikeshare data would you like to explore? Chicago, New york, or Washington?\\n'))\n city = user_choice1.lower()\n while city not in ['chicago', 'new york', 'washington']:\n print ('Your choice is invalid, please try again')\n user_choice1 = str(input('Please enter Chicago, New york, or Washington.\\n'))\n city = user_choice1.lower()\n print('Thank you, you\\'ve choosen', city,'!')\n user_choice2 = str(input('Would you like to filter the data by month, day or not at all? Type \\'all\\' for no filter\\n'))\n filter = user_choice2.lower()\n while filter not in ['all', 'month', 'day']:\n print ('Your choice is invalid, please try again')\n user_choice2 = str(input('\\nPlease enter all, month, or day.\\n'))\n city = user_choice2.lower()\n if filter == 'all':\n month = 'all'\n day = 'all'\n elif filter == 'month':\n user_choice3 = str(input('Which month? January, February, March, April, May, or June?\\n'))\n month = user_choice3.lower()\n while month not in months:\n print ('Your choice is invalid, please try again')\n user_choice3 = str(input('Please enter January, February, March, April, May, or June?\\n'))\n month = user_choice3.lower()\n day = 'all'\n elif filter == 'day':\n user_choice4 = str(input('Which day? Sunday, Monday, Tuesday, Wednesday, Thursday, Friday or Saturday.\\n'))\n day = user_choice4.lower()\n while day not in days:\n print ('Your choice is invalid, please try again')\n user_choice4 = str(input('Please enter Sunday, Monday, Tuesday, Wednesday, Thursday, Friday or Saturday.\\n'))\n day = user_choice4.lower()\n month = 'all'\n print('-'*40)\n return city, month, day\n\ndef load_data(city, month, day):\n \"\"\"\n Loads data for the specified city and filters by month and day if applicable.\n\n Args:\n (str) city - name of the city to analyze\n (str) month - name of the month to filter by, or \"all\" to apply no month filter\n (str) day - name of the day of week to filter by, or \"all\" to apply no day filter\n Returns:\n df - Pandas DataFrame containing city data filtered by month and day\n \"\"\"\n df = pd.read_csv(CITY_DATA[city])\n df['Start Time'] = pd.to_datetime(df['Start Time'])\n df['month'] = df['Start Time'].dt.month\n df['day_of_week'] = df['Start Time'].dt.weekday_name\n if month != 'all':\n month = months.index(month)+1\n df = df[df['month']==month]\n if day != 'all':\n df = df[df['day_of_week']==day.title()]\n return df\n\ndef time_stats(df):\n \"\"\"\n Displays statistics on the most frequent times of travel.\n\n Args:\n (DataFrame) df\n\n Returns:\n NA\n \"\"\"\n print('\\nCalculating The Most Frequent Times of Travel...\\n')\n start_time = time.time()\n\n df['Start Time']=pd.to_datetime(df['Start Time'])\n\n df['month'] = df['Start Time'].dt.month\n popular_month = df['month'].mode()[0]\n ride_count_month = df['month'].value_counts().values[0]\n df['day'] = df['Start Time'].dt.dayofweek\n popular_day = df['day'].mode()[0]\n ride_count_day = df['day'].value_counts().values[0]\n df['hour'] = df['Start Time'].dt.hour\n popular_hour=df['hour'].mode()[0]\n ride_count_hour = df['hour'].value_counts().values[0]\n\n if df['month'].unique().size != 1 and df['day'].unique().size != 1:\n print('The most popular month is {} with {} rides!'.format(months[popular_month-1].title(), ride_count_month))\n print('The most popular day of the week is {} with {} rides!'.format(days[popular_day].title(),ride_count_day))\n print('The most popular time of day is {}:00 o\\'clock with {} rides!'.format(popular_hour,ride_count_hour))\n elif df['month'].unique().size == 1 and df['day'].unique().size != 1:\n print('In {}, there were total {} rides.'.format(months[popular_month-1].title(),ride_count_month))\n print('The most popular day of the week is {} with {} rides!'.format(days[popular_day].title(), ride_count_day))\n print('The most popular time of day is {}:00 o\\'clock with {} rides in {}!'.format(popular_hour, ride_count_hour,months[popular_month-1].title()))\n elif df['day'].unique().size == 1 and df['month'].unique().size != 1:\n print('{} {}s were the most popular with {} rides, out of {} {} rides!'.format(months[popular_month-1].title(),days[popular_day].title(),ride_count_month,ride_count_day, days[popular_day].title()))\n print('The most popular time of day is {}:00 o\\'clock with {} rides on {}!'.format(popular_hour, ride_count_hour,days[popular_day].title()))\n\n print(\"\\nThis took %s seconds.\" % (time.time() - start_time))\n print('-'*40)\n\ndef station_stats(df):\n \"\"\"\n Displays statistics on the most popular stations and trip.\n\n Args:\n (DataFrame) df\n\n Returns:\n NA\n \"\"\"\n print('\\nCalculating The Most Popular Stations and Trip...\\n')\n start_time = time.time()\n\n popular_start = df['Start Station'].mode()[0]\n popular_start_count = df['Start Station'].value_counts()[0]\n print('The most popular start station is {} with {} rides.'.format(popular_start,popular_start_count))\n popular_end = df['End Station'].mode()[0]\n popular_end_count=df['End Station'].value_counts()[0]\n print('The most popular end station is {} with {} rides.'.format(popular_end,popular_end_count ))\n df_count=df.groupby(['Start Station','End Station']).size().reset_index().rename(columns={0:'count'})\n max_freq = df_count['count'].max()\n df_count = df_count[df_count['count']==max_freq]\n print('The most frequent combination of start and end stations are:')\n print(df_count)\n\n print(\"\\nThis took %s seconds.\" % (time.time() - start_time))\n print('-'*40)\n\ndef trip_duration_stats(df):\n \"\"\"\n Displays statistics on the total and average trip duration.\n\n Args:\n (DataFrame) df\n\n Returns:\n NA\n \"\"\"\n\n print('\\nCalculating Trip Duration...\\n')\n start_time = time.time()\n # display total travel time\n total_travel_time = df['Trip Duration'].sum(skipna=True)\n print('The total travel time is {}.'.format(total_travel_time))\n average_travel_time = df['Trip Duration'].mean()\n converted = str(datetime.timedelta(seconds = average_travel_time))\n print('The average travel time is {} in HH:MM:SS format.'.format(converted))\n\n print(\"\\nThis took %s seconds.\" % (time.time() - start_time))\n print('-'*40)\n\n\ndef user_stats(df):\n \"\"\"\n Displays statistics on bikeshare users.\n\n Args:\n (DataFrame) df\n\n Returns:\n NA\n \"\"\"\n\n print('\\nCalculating User Stats...\\n')\n start_time = time.time()\n\n # Display counts of user types\n print(df['User Type'].value_counts())\n\n # if dataframe includes gender and\n if len(df.columns) == 13:\n #gender info\n print(df['Gender'].value_counts())\n #birth year stats\n print('The earilest birth year is {}.'.format(df['Birth Year'].min()))\n print('The most recent birth year is {}.'.format(df['Birth Year'].max()))\n print('The most common birth year is {}.'.format(df['Birth Year'].mode().values[0]))\n\n print(\"\\nThis took %s seconds.\" % (time.time() - start_time))\n print('-'*40)\n\ndef print_lines(df):\n \"\"\"\n Displays the next 5 lines of data until user stop.\n\n Args:\n (DataFrame) df\n\n Returns:\n NA\n \"\"\"\n choice = input('\\nWould you like to see individual trip data? Enter yes or no.\\n')\n lines = 0\n while choice.lower() not in ['yes', 'no']:\n print('Invalid input.')\n choice = input('Try again. Yes or No')\n while choice.lower() == 'yes':\n print(df.iloc[lines:lines+5,:])\n lines +=5\n choice = input('Would you like to see more? Yes or No\\n')\n while choice.lower() not in ['yes', 'no']:\n print('Invalid input.')\n choice = input('Try again.')\n\ndef main():\n \"\"\"Execution function\"\"\"\n while True:\n city, month, day = get_filters()\n df = load_data(city, month, day)\n time_stats(df)\n station_stats(df)\n trip_duration_stats(df)\n user_stats(df)\n print_lines(df)\n restart = input('Would you like to restart? Enter yes or no.\\n')\n while restart.lower() not in['yes','no']:\n print('Invalid input.')\n restart = input('Try again. Yes or No\\n')\n if restart.lower() != 'yes':\n break\n\n\nif __name__ == \"__main__\":\n\tmain()\n","sub_path":"bikeshare_2.py","file_name":"bikeshare_2.py","file_ext":"py","file_size_in_byte":9459,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"554994643","text":"# -*- coding:utf-8 -*-\n# @Author : 'longguangbin'\n# @Contact : lgb453476610@163.com\n# @Date : 2019/2/1\n\"\"\" \nUsage Of '139_word_break.py' : \n\"\"\"\n\n\nclass Solution(object):\n def wordBreak(self, s, wordDict):\n \"\"\"\n :type s: str\n :type wordDict: List[str]\n :rtype: bool\n \"\"\"\n # 40 ms - 55.64%\n # 记录每一个���始点\n if not s:\n return True\n\n breakp = [0]\n\n for i in range(len(s) + 1):\n for j in breakp:\n if s[j:i] in wordDict:\n breakp.append(i)\n break\n\n return breakp[-1] == len(s)\n\n def wordBreak2(self, s, wordDict):\n \"\"\"\n :type s: str\n :type wordDict: List[str]\n :rtype: bool\n \"\"\"\n # 28 ms - 97.67%\n l = set()\n for d in wordDict:\n l.add(len(d))\n\n dp = [0 for _ in s]\n for i in range(1, len(s) + 1):\n if s[0:i] in wordDict:\n dp[i - 1] = 1\n\n for i in range(1, len(s)):\n if dp[i] == 0:\n for k in l:\n if i >= k and dp[i - k] == 1 and s[i - k + 1:i + 1] in wordDict:\n dp[i] = 1\n break\n\n return dp[-1] == 1\n\n def wordBreak3(self, s, wordDict):\n \"\"\"\n :type s: str\n :type wordDict: List[str]\n :rtype: bool\n \"\"\"\n\n # 递归超时\n\n def word_break(s1):\n if (s1 in wordDict) or (s1 == ''):\n return True\n else:\n for j in range(1, len(s1)):\n if word_break(s1[:j]) and word_break(s1[j:]):\n return True\n return False\n\n return word_break(s)\n\n\ndef get_test_instance(example=1):\n s = \"leetcode\"\n wordDict = [\"leet\", \"code\"]\n if example == 1:\n pass\n if example == 2:\n s = \"applepenapple\"\n wordDict = [\"apple\", \"pen\"]\n if example == 3:\n s = \"catsandog\"\n wordDict = [\"cats\", \"dog\", \"sand\", \"and\", \"cat\"]\n return s, wordDict\n\n\ndef main():\n s, wordDict = get_test_instance(example=1)\n # s, wordDict = get_test_instance(example=2)\n # s, wordDict = get_test_instance(example=3)\n res = Solution().wordBreak(s, wordDict)\n print(res)\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"leetcode/dynamic_programming/139_word_break.py","file_name":"139_word_break.py","file_ext":"py","file_size_in_byte":2359,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"577072297","text":"def letter_frequency(text):\n \"\"\" Returns the frequency of each letter in input text. \n\n Allows the user to choose a sorting method.\"\"\"\n\n d = {}\n for char in text.lower():\n if char.isalpha():\n d[char] = d.get(char, 0) + 1\n # Creates a dictionary of letters and their occurence.\n\n d_pairs = sorted(d.items())\n # Creates a list of tuple pairs from the dictionary.\n\n sort = choose_sort()\n if sort == \"l\":\n d_pairs.sort(key=lambda v: v[-1])\n if sort == \"g\":\n d_pairs.sort(key=lambda v: v[-1], reverse=True)\n # Allows the user to choose a sorting method.\n\n for k, v in d_pairs:\n print(f\"\\n\\\"{k}\\\" occurence: {v}\")\n\n again = do_again()\n if again == \"y\":\n text = input(\"\\nEnter Your Text: \")\n letter_frequency(text)\n else:\n print(\"\\nGoodbye!\\n\")\n \ndef choose_sort():\n \"\"\" Asks the user to choose a sorting method.\n\n Continues until a proper input is recieved.\"\"\"\n\n while True:\n sort = input(\"\\nSort Alphabetically (a) or by Frequency (f)? \")\n if sort.lower() in \"a\":\n return sort.lower()\n elif sort.lower() in \"f\":\n while True:\n sort = input(\"\\nSort by Least (l) or Greatest (g) Occurence? \")\n if sort.lower() in \"lg\":\n return sort.lower()\n print(\"\\nI Don't Understand!\")\n print(\"\\nI Don't Understand!\")\n \ndef do_again():\n \"\"\" Asks the user if they would like to use the function again.\n\n Continues until a proper input is recieved.\"\"\"\n\n while True:\n again = input(\"\\nEnter More Text? Yes (y) or No (n): \")\n if again.lower() in \"yn\":\n return again.lower()\n print(\"\\nI Don't Understand!\") \n\nif __name__ == '__main__':\n text = input(\"\\nEnter Your Text: \")\n letter_frequency(text)","sub_path":"letter_frequency.py","file_name":"letter_frequency.py","file_ext":"py","file_size_in_byte":1881,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"528272993","text":"# -*- coding: utf-8 -*-\nimport scrapy\nfrom scrapy import Request\nimport json\n\n\nclass XiciProcySpider(scrapy.Spider):\n name = 'xici_procy'\n allowed_domains = ['www.xicidaili.com']\n\n def start_requests(self):\n for i in range(1,2):\n yield Request('http://www.xicidaili.com/nn/%s'%i)\n\n def parse(self, response):\n #提取每一页的代理\n for sel in response.xpath('//table[@id=\"ip_list\"]/tr[position()>1]'):\n ip=sel.xpath('./td[2]/text()').extract_first()\n port=sel.xpath('./td[3]/text()').extract_first()\n scheme = sel.xpath('./td[6]/text()').extract_first().lower()\n\n #验证代理是否可用\n url='%s://httpbin.org/ip'%scheme\n proxy='%s://%s:%s'%(scheme,ip,port)\n # print(proxy)\n\n meta={\n 'proxy':proxy,\n 'dont_rety':True,\n 'download_timeout':10,\n\n '_proxy_scheme':scheme,\n '_proxy_ip':ip,\n }\n\n yield Request(url=url,meta=meta,callback=self.check_available,dont_filter=True)\n\n def check_available(self,response):\n proxy_ip=response.meta['_proxy_ip']\n\n if proxy_ip==json.loads(response.text)['origin']:\n yield {\n 'proxy':response.meta['proxy'],\n 'proxy_scheme':response.meta['_proxy_scheme'],\n }","sub_path":"proxy_example/spiders/xici_procy.py","file_name":"xici_procy.py","file_ext":"py","file_size_in_byte":1395,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"272565404","text":"from django.shortcuts import render, redirect\n\nfrom video.apis.youtube import youtube_search\n__all__ = ['search']\n\ndef search(request):\n context = {}\n keyword = request.GET.get('keyword')\n page_token = request.GET.get('page_token')\n\n if keyword:\n response = youtube_search(keyword, page_token)\n context['keyword'] = keyword\n context['response'] = response\n return render(request, 'video/search.html', context)","sub_path":"django_app/video/views/search.py","file_name":"search.py","file_ext":"py","file_size_in_byte":445,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"618590470","text":"#!/usr/bin/env python\n# coding=utf-8\n\n__author__ = \"Lin Dechun\"\n__copyright__ = \"Copyright 2016, BGI Research.\"\n__credits__ = [\"Lin Dechun\"]\n__version__ = \"0.0.1\"\n__maintainer__ = \"Lin Dechun\"\n__email__ = \"lindechun@genomics.cn\"\n\nimport pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom argparse import ArgumentParser\n\nparser = ArgumentParser(description='This Script is to plot about condonUsage variation between host',version='0.0.1')\nparser.add_argument('-p', action='store', dest='prefix', help='Prefix of Output File')\n\npara=parser.parse_args()\nprefix=para.prefix\n\ntable=pd.read_table(prefix+\"_CodonPercent.host.locus.bar.txt\",sep=\"\\t\")\ntable.percent=table['percent'].astype(float)\n\ndef variation(df):\n codon=list(set(df.Codon))\n aedes=np.array([0]*len(codon),dtype=np.float64)\n human=aedes.copy()\n monkey=aedes.copy()\n for i,k in df.iterrows():\n if k['Host']=='Aedes':\n aedes[codon.index(k['Codon'])]=k['percent']\n\n elif k['Host']=='Human':\n human[codon.index(k['Codon'])]=k['percent']\n else:\n monkey[codon.index(k['Codon'])]=k['percent']\n g1=monkey-aedes\n g2=aedes-human\n return pd.Series([sum(g1*g2)],index=['variance'])\n\n\ncodon_var=table.groupby([\"locus\",'AmAcid'])[['Codon','Host','percent']].apply(variation).reset_index()\n\n# # codon_var.loc[:,['locus','variance']].plot()\n# codon_var.loc[codon_var.variance==1.2927527018428482]\n\nget_ipython().run_cell_magic(u'R', u'-i codon_var,prefix -w 960 -h 480', u'library(ggplot2)\\nlibrary(Cairo)\\ngtitle<-unlist(strsplit(prefix,\\'/\\'))[1]\\n\\\nCairoPDF(file=paste(prefix,\".variance.pdf\",sep=\"\"),width=15,height=5)\\n\\\nggplot(codon_var,aes(locus))+geom_line(aes(y=variance),group=1,color=\"#1f78b4\")+geom_hline(yintercept=0,color=\"red\")+labs(title=gtitle,x=\"Locus\",y=\"Variance Value\")\\n# +theme(axis.text.x=element_text(angle=45))')\n\n# import numpy as np\n# import seaborn as sns\n# import matplotlib.pyplot as plt\n\n\n# # In[409]:\n\n# sns.distplot(codon_var.locus, hist=False, rug=True, color=\"r\")\n# plt.plot(codon_var.locus,codon_var.variance)\n\n","sub_path":"variation_plot.py","file_name":"variation_plot.py","file_ext":"py","file_size_in_byte":2088,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"26276162","text":"# this file largely borrowed from http://wiki.wxpython.org/WorkingWithImages\n\nimport wx\nimport Image # PIL module. Only if you use the PIL library.\n\ndef WxBitmapToPilImage( myBitmap ) :\n return WxImageToPilImage( WxBitmapToWxImage( myBitmap ) )\n\ndef WxBitmapToWxImage( myBitmap ) :\n return wx.ImageFromBitmap( myBitmap )\n\n#-----\n\ndef PilImageToWxBitmap( myPilImage ) :\n return WxImageToWxBitmap( PilImageToWxImage( myPilImage ) )\n\ndef PilImageToWxImage( myPilImage ):\n myWxImage = wx.EmptyImage( myPilImage.size[0], myPilImage.size[1] )\n myWxImage.SetData( myPilImage.convert( 'RGB' ).tostring() )\n return myWxImage\n\n# Or, if you want to copy any alpha channel, too (available since wxPython 2.5)\n# The source PIL image doesn't need to have alpha to use this routine.\n# But, a PIL image with alpha is necessary to get a wx.Image with alpha.\n\ndef PilImageToWxImage( myPilImage, copyAlpha=True ) :\n\n hasAlpha = myPilImage.mode[ -1 ] == 'A'\n if copyAlpha and hasAlpha : # Make sure there is an alpha layer copy.\n\n myWxImage = wx.EmptyImage( *myPilImage.size )\n myPilImageCopyRGBA = myPilImage.copy()\n myPilImageCopyRGB = myPilImageCopyRGBA.convert( 'RGB' ) # RGBA --> RGB\n myPilImageRgbData =myPilImageCopyRGB.tostring()\n myWxImage.SetData( myPilImageRgbData )\n myWxImage.SetAlphaData( myPilImageCopyRGBA.tostring()[3::4] ) # Create layer and insert alpha values.\n\n else : # The resulting image will not have alpha.\n\n myWxImage = wx.EmptyImage( *myPilImage.size )\n myPilImageCopy = myPilImage.copy()\n myPilImageCopyRGB = myPilImageCopy.convert( 'RGB' ) # Discard any alpha from the PIL image.\n myPilImageRgbData =myPilImageCopyRGB.tostring()\n myWxImage.SetData( myPilImageRgbData )\n\n return myWxImage\n\n#-----\n\ndef imageToPil( myWxImage ):\n myPilImage = Image.new( 'RGB', (myWxImage.GetWidth(), myWxImage.GetHeight()) )\n myPilImage.fromstring( myWxImage.GetData() )\n return myPilImage\n\ndef WxImageToWxBitmap( myWxImage ) :\n return myWxImage.ConvertToBitmap()\n\ndef GetPILResizedWxImage(filename, width, height):\n img = Image.open(filename)\n (imgWidth, imgHeight) = img.size\n newSize = GetScaledSize(width, height, imgWidth, imgHeight)\n img = img.resize(newSize, Image.ANTIALIAS)\n return PilImageToWxImage(img)\n\ndef GetScaledSize(boxWidth, boxHeight, imgWidth, imgHeight):\n imgRatio = imgWidth / float(imgHeight)\n boxRatio = boxWidth / float(boxHeight)\n widthScale = boxWidth / float(imgWidth)\n heightScale = boxHeight / float(imgHeight)\n \n if imgRatio > boxRatio:\n return (boxWidth, int(imgHeight * widthScale))\n else:\n return (int(imgWidth * heightScale), boxHeight)\n\n\n","sub_path":"ImageUtils.py","file_name":"ImageUtils.py","file_ext":"py","file_size_in_byte":2758,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"292940905","text":"# ------------------------------------------------------------------------\n# Deformable DETR\n# Copyright (c) 2020 SenseTime. All Rights Reserved.\n# Licensed under the Apache License, Version 2.0 [see LICENSE for details]\n# ------------------------------------------------------------------------\n# Modified from DETR (https://github.com/facebookresearch/detr)\n# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved\n# ------------------------------------------------------------------------\n\n\"\"\"\nTransforms and data augmentation for both image + bbox.\n\"\"\"\nimport random\n\nimport PIL\nimport torch\nimport torchvision.transforms as T\nimport torchvision.transforms.functional as F\nfrom PIL import Image\n\nfrom util.box_ops import box_xyxy_to_cxcywh\nfrom util.misc import interpolate\nimport numpy as np\n\n\ndef crop(image, target, region):\n cropped_image = F.crop(image, *region)\n\n target = target.copy()\n i, j, h, w = region\n\n # should we do something wrt the original size?\n target[\"size\"] = torch.tensor([h, w])\n\n num_bbox = target['labels'].shape[0]\n\n if num_bbox > 0: # empty target do NOT need any process\n\n fields = [\"labels\", \"area\", \"iscrowd\"]\n\n if \"boxes\" in target:\n boxes = target[\"boxes\"]\n max_size = torch.as_tensor([w, h], dtype=torch.float32)\n cropped_boxes = boxes - torch.as_tensor([j, i, j, i])\n cropped_boxes = torch.min(cropped_boxes.reshape(-1, 2, 2), max_size)\n cropped_boxes = cropped_boxes.clamp(min=0)\n area = (cropped_boxes[:, 1, :] - cropped_boxes[:, 0, :]).prod(dim=1)\n target[\"boxes\"] = cropped_boxes.reshape(-1, 4)\n target[\"area\"] = area\n fields.append(\"boxes\")\n\n if \"masks\" in target:\n # FIXME should we update the area here if there are no boxes?\n target['masks'] = target['masks'][:, i:i + h, j:j + w]\n fields.append(\"masks\")\n\n # merged 24 keypoints\n if \"joints_2d\" in target:\n joints_2d = target[\"joints_2d\"]\n joints_2d_visible = target[\"joints_2d_visible\"]\n max_size = torch.as_tensor([w, h], dtype=torch.float32)\n cropped_joints_2d = joints_2d - torch.as_tensor([j, i])[None, :]\n cropped_joints_2d_visible = (joints_2d_visible *\n (cropped_joints_2d < max_size).min(dim=-1)[0])\n\n target[\"joints_2d\"] = cropped_joints_2d\n target[\"joints_2d_visible\"] = cropped_joints_2d_visible\n fields.append(\"joints_2d\")\n fields.append(\"joints_2d_visible\")\n\n if \"joints_3d\" in target:\n fields.append(\"joints_3d\")\n fields.append(\"joints_3d_visible\")\n if \"smpl_pose\" in target:\n fields.append(\"smpl_pose\")\n fields.append(\"smpl_shape\")\n fields.append(\"has_smpl\")\n if \"trans\" in target:\n fields.append(\"trans\")\n if \"camera\" in target:\n fields.append(\"camera\")\n\n if \"scene\" in target:\n target[\"scene\"] = target['scene'][i:i + h, j:j + w]\n\n # remove elements for which the boxes or masks that have zero area\n if \"boxes\" in target or \"masks\" in target:\n # favor boxes selection when defining which elements to keep\n # this is compatible with previous implementation\n if \"boxes\" in target:\n cropped_boxes = target['boxes'].reshape(-1, 2, 2)\n keep = torch.all(cropped_boxes[:, 1, :] > cropped_boxes[:, 0, :], dim=1)\n else:\n keep = target['masks'].flatten(1).any(1)\n\n for field in fields:\n if len(target[field]) != len(keep):\n print(field)\n print(target[field])\n print(keep)\n target[field] = target[field][keep]\n\n return cropped_image, target\n\n\ndef hflip(image, target):\n flipped_image = F.hflip(image)\n\n w, h = image.size\n\n target = target.copy()\n if \"boxes\" in target:\n boxes = target[\"boxes\"]\n boxes = boxes[:, [2, 1, 0, 3]] * torch.as_tensor([-1, 1, -1, 1]) + torch.as_tensor([w, 0, w, 0])\n target[\"boxes\"] = boxes\n\n if \"masks\" in target:\n target['masks'] = target['masks'].flip(-1)\n\n if \"joints_2d\" in target:\n joints_2d = target[\"joints_2d\"]\n joints_2d_visible = target[\"joints_2d_visible\"]\n joints_2d, joints_2d_visible = flip_kp_2d(joints_2d, joints_2d_visible, img_width=w)\n target[\"joints_2d\"] = joints_2d\n target[\"joints_2d_visible\"] = joints_2d_visible\n\n if \"joints_3d\" in target:\n joints_3d = target[\"joints_3d\"]\n joints_3d_visible = target[\"joints_3d_visible\"]\n joints_3d, joints_3d_visible = flip_kp_3d(joints_3d, joints_3d_visible)\n target[\"joints_3d\"] = joints_3d\n target[\"joints_3d_visible\"] = joints_3d_visible\n\n if \"smpl_pose\" in target:\n smpl_pose = target[\"smpl_pose\"]\n smpl_pose = flip_smpl_pose(smpl_pose)\n target[\"smpl_pose\"] = smpl_pose\n\n if \"scene\" in target:\n target[\"scene\"] = target['scene'].flip(-1)\n\n return flipped_image, target\n\n\ndef resize(image, target, size, max_size=None):\n # size can be min_size (scalar) or (w, h) tuple\n\n def get_size_with_aspect_ratio(image_size, size, max_size=None):\n w, h = image_size\n if max_size is not None:\n min_original_size = float(min((w, h)))\n max_original_size = float(max((w, h)))\n if max_original_size / min_original_size * size > max_size:\n size = int(round(max_size * min_original_size / max_original_size))\n\n if (w <= h and w == size) or (h <= w and h == size):\n return (h, w)\n\n if w < h:\n ow = size\n oh = int(size * h / w)\n else:\n oh = size\n ow = int(size * w / h)\n\n return (oh, ow)\n\n def get_size(image_size, size, max_size=None):\n if isinstance(size, (list, tuple)):\n return size[::-1]\n else:\n return get_size_with_aspect_ratio(image_size, size, max_size)\n\n size = get_size(image.size, size, max_size)\n rescaled_image = F.resize(image, size)\n\n if target is None:\n return rescaled_image, None\n\n ratios = tuple(float(s) / float(s_orig) for s, s_orig in zip(rescaled_image.size, image.size))\n ratio_width, ratio_height = ratios\n\n target = target.copy()\n if \"boxes\" in target:\n boxes = target[\"boxes\"]\n scaled_boxes = boxes * torch.as_tensor([ratio_width, ratio_height, ratio_width, ratio_height])\n target[\"boxes\"] = scaled_boxes\n\n if \"area\" in target:\n area = target[\"area\"]\n scaled_area = area * (ratio_width * ratio_height)\n target[\"area\"] = scaled_area\n\n h, w = size\n target[\"size\"] = torch.tensor([h, w])\n\n if \"masks\" in target:\n target['masks'] = interpolate(\n target['masks'][:, None].float(), size, mode=\"nearest\")[:, 0] > 0.5\n\n if \"joints_2d\" in target:\n joints_2d = target[\"joints_2d\"]\n scaled_joints_2d = joints_2d * torch.as_tensor([ratio_width, ratio_height])\n target[\"joints_2d\"] = scaled_joints_2d\n\n if \"scene\" in target:\n scene = target[\"scene\"]\n rescaled_scene = interpolate(scene[None, None, ...].float(), size, mode=\"nearest\")\n rescaled_scene = rescaled_scene.type(torch.uint8)[0, 0]\n target[\"scene\"] = rescaled_scene\n\n return rescaled_image, target\n\n\ndef pad(image, target, padding):\n # assumes that we only pad on the bottom right corners\n padded_image = F.pad(image, (0, 0, padding[0], padding[1]))\n if target is None:\n return padded_image, None\n target = target.copy()\n # should we do something wrt the original size?\n target[\"size\"] = torch.tensor(padded_image[::-1])\n if \"masks\" in target:\n target['masks'] = torch.nn.functional.pad(target['masks'], (0, padding[0], 0, padding[1]))\n\n if \"scene\" in target:\n target[\"scene\"] = torch.nn.functional.pad(target['scene'], (0, padding[0], 0, padding[1]))\n\n return padded_image, target\n\n\nclass RandomCrop(object):\n def __init__(self, size):\n self.size = size\n\n def __call__(self, img, target):\n region = T.RandomCrop.get_params(img, self.size)\n return crop(img, target, region)\n\n\nclass RandomSizeCrop(object):\n def __init__(self, min_size: int, max_size: int):\n self.min_size = min_size\n self.max_size = max_size\n\n def __call__(self, img: PIL.Image.Image, target: dict):\n w = random.randint(self.min_size, min(img.width, self.max_size))\n h = random.randint(self.min_size, min(img.height, self.max_size))\n region = T.RandomCrop.get_params(img, [h, w])\n return crop(img, target, region)\n\n\nclass CenterCrop(object):\n def __init__(self, size):\n self.size = size\n\n def __call__(self, img, target):\n image_width, image_height = img.size\n crop_height, crop_width = self.size\n crop_top = int(round((image_height - crop_height) / 2.))\n crop_left = int(round((image_width - crop_width) / 2.))\n return crop(img, target, (crop_top, crop_left, crop_height, crop_width))\n\n\nclass RandomHorizontalFlip(object):\n def __init__(self, p=0.5):\n self.p = p\n\n def __call__(self, img, target):\n if random.random() < self.p:\n return hflip(img, target)\n return img, target\n\n\nclass RandomResize(object):\n def __init__(self, sizes, max_size=None):\n assert isinstance(sizes, (list, tuple))\n self.sizes = sizes\n self.max_size = max_size\n\n def __call__(self, img, target=None):\n size = random.choice(self.sizes)\n return resize(img, target, size, self.max_size)\n\n\nclass RandomPad(object):\n def __init__(self, max_pad):\n self.max_pad = max_pad\n\n def __call__(self, img, target):\n pad_x = random.randint(0, self.max_pad)\n pad_y = random.randint(0, self.max_pad)\n return pad(img, target, (pad_x, pad_y))\n\n\nclass RandomSelect(object):\n \"\"\"\n Randomly selects between transforms1 and transforms2,\n with probability p for transforms1 and (1 - p) for transforms2\n \"\"\"\n def __init__(self, transforms1, transforms2, p=0.5):\n self.transforms1 = transforms1\n self.transforms2 = transforms2\n self.p = p\n\n def __call__(self, img, target):\n if random.random() < self.p:\n return self.transforms1(img, target)\n return self.transforms2(img, target)\n\n\nclass ToTensor(object):\n def __call__(self, img, target):\n return F.to_tensor(img), target\n\n\nclass RandomErasing(object):\n\n def __init__(self, *args, **kwargs):\n self.eraser = T.RandomErasing(*args, **kwargs)\n\n def __call__(self, img, target):\n return self.eraser(img), target\n\n\nclass Normalize(object):\n def __init__(self, mean, std):\n self.mean = mean\n self.std = std\n\n def __call__(self, image, target=None):\n image = F.normalize(image, mean=self.mean, std=self.std)\n if target is None:\n return image, None\n target = target.copy()\n h, w = image.shape[-2:]\n if \"boxes\" in target:\n boxes = target[\"boxes\"]\n boxes = box_xyxy_to_cxcywh(boxes)\n boxes = boxes / torch.tensor([w, h, w, h], dtype=torch.float32)\n target[\"boxes\"] = boxes\n return image, target\n\n\nclass Compose(object):\n def __init__(self, transforms):\n self.transforms = transforms\n\n def __call__(self, image, target):\n for t in self.transforms:\n image, target = t(image, target)\n return image, target\n\n def __repr__(self):\n format_string = self.__class__.__name__ + \"(\"\n for t in self.transforms:\n format_string += \"\\n\"\n format_string += \" {0}\".format(t)\n format_string += \"\\n)\"\n return format_string\n\n\n############# Added\n\n\nclass TranKeypoints(object):\n def __init__(self, type='coco'):\n self.type = type\n\n def __call__(self, img, target):\n target = target.copy()\n\n if 'keypoints' in target:\n keypoints_17 = target[\"keypoints\"]\n keypoints_24 = coco17_to_superset(keypoints_17)\n joints_2d = keypoints_24[..., :2]\n joints_2d_visible = keypoints_24[..., -1]\n target[\"joints_2d\"] = joints_2d\n target[\"joints_2d_visible\"] = joints_2d_visible\n\n return img, target\n\n\nclass AddSMPLKeys(object):\n def __call__(self, img, target):\n boxes = target[\"boxes\"]\n num_bbox = boxes.shape[0]\n\n if \"joints_2d\" not in target:\n target[\"joints_2d\"] = boxes.new_zeros(num_bbox, 24, 2)\n target[\"joints_2d_visible\"] = boxes.new_zeros(num_bbox, 24)\n\n if \"joints_3d\" not in target:\n target[\"joints_3d\"] = boxes.new_zeros(num_bbox, 24, 3)\n target[\"joints_3d_visible\"] = boxes.new_zeros(num_bbox, 24)\n\n if \"smpl_pose\" not in target:\n target[\"smpl_pose\"] = boxes.new_zeros(num_bbox, 72)\n target[\"smpl_shape\"] = boxes.new_zeros(num_bbox, 10)\n target[\"has_smpl\"] = boxes.new_zeros(num_bbox).long()\n\n if \"camera\" not in target:\n target[\"camera\"] = boxes.new_zeros(num_bbox, 3)\n\n if \"trans\" not in target:\n target[\"trans\"] = boxes.new_zeros(num_bbox, 3)\n\n if \"scene\" not in target:\n _, h, w = img.shape\n # target[\"scene\"] = torch.zeros([h, w], dtype=torch.uint8)\n target[\"scene\"] = torch.zeros([h, w], dtype=torch.int64)\n \n return img, target\n\n\ndef flip_kp_2d(kp, kp_visible, img_width):\n \"\"\"\n Flip augmentation for keypoints\n :param kp:\n :param img_width: (int)\n :return:\n \"\"\"\n flipped_parts = [5, 4, 3, 2, 1, 0, 11, 10, 9, 8, 7, 6, 12, 13, 14, 15, 16, 17, 18, 19, 21, 20, 23, 22]\n flipped_kp = kp[:, flipped_parts]\n flipped_kp[:, :, 0] = img_width - flipped_kp[:, :, 0]\n flipped_kp_visible = kp_visible[:, flipped_parts]\n return flipped_kp, flipped_kp_visible\n\n\ndef flip_kp_3d(kp, kp_visible):\n \"\"\"\n Flip augmentation for keypoints\n :param kp:\n :param img_width: (int)\n :return:\n \"\"\"\n flipped_parts = [5, 4, 3, 2, 1, 0, 11, 10, 9, 8, 7, 6, 12, 13, 14, 15, 16, 17, 18, 19, 21, 20, 23, 22]\n flipped_kp = kp[:, flipped_parts]\n flipped_kp[:, :, 0] = 0 - flipped_kp[:, :, 0]\n flipped_kp_visible = kp_visible[:, flipped_parts]\n return flipped_kp, flipped_kp_visible\n\n\ndef flip_smpl_pose(pose):\n \"\"\"Flip pose.\n The flipping is based on SMPL parameters.\n \"\"\"\n flippedParts = [0, 1, 2, 6, 7, 8, 3, 4, 5, 9, 10, 11, 15, 16, 17, 12, 13,\n 14, 18, 19, 20, 24, 25, 26, 21, 22, 23, 27, 28, 29, 33,\n 34, 35, 30, 31, 32, 36, 37, 38, 42, 43, 44, 39, 40, 41,\n 45, 46, 47, 51, 52, 53, 48, 49, 50, 57, 58, 59, 54, 55,\n 56, 63, 64, 65, 60, 61, 62, 69, 70, 71, 66, 67, 68]\n num = pose.shape[0]\n pose = pose.reshape(num, 72)\n pose = pose[:, flippedParts]\n # we also negate the second and the third dimension of the axis-angle\n pose[:, 1::3] = -pose[:, 1::3]\n pose[:, 2::3] = -pose[:, 2::3]\n pose = pose.reshape(num, 24, 3)\n return pose\n\n\ndef coco17_to_superset(coco_kpts):\n \"\"\"\n kp_names = ['nose', 'l_eye', 'r_eye', 'l_ear', 'r_ear', 'l_shoulder', # 5\n 'r_shoulder', 'l_elbow', 'r_elbow', 'l_wrist', 'r_wrist', # 10\n 'l_hip', 'r_hip', 'l_knee', 'r_knee', 'l_ankle', 'r_ankle']\n\n 0 - Right Ankle\n 1 - Right Knee\n 2 - Right Hip\n 3 - Left Hip\n 4 - Left Knee\n 5 - Left Ankle\n 6 - Right Wrist\n 7 - Right Elbow\n 8 - Right Shoulder\n 9 - Left Shoulder\n 10 - Left Elbow\n 11 - Left Wrist\n 12 - Neck (LSP definition)\n 13 - Top of Head (LSP definition)\n 14 - Pelvis (MPII definition)\n 15 - Thorax (MPII definition)\n 16 - Spine (Human3.6M definition)\n 17 - Jaw (Human3.6M definition)\n 18 - Head (Human3.6M definition)\n 19 - Nose\n 20 - Left Eye\n 21 - Right Eye\n 22 - Left Ear\n 23 - Right Ear\n :param gt_keypoints: ...x17xM Keypoints tensor or array\n :return super_kpts\n \"\"\"\n\n creator_fn = None\n coco_in_superset = [19, 20, 21, 22, 23, 9, # 5\n 8, 10, 7, 11, 6, # 10\n 3, 2, 4, 1, 5, 0 # 15\n ]\n if isinstance(coco_kpts, torch.Tensor):\n creator_fn = torch.zeros\n elif isinstance(coco_kpts, np.ndarray):\n creator_fn = np.zeros\n super_kpts = creator_fn((coco_kpts.shape[:-2]) + (24,) + (coco_kpts.shape[-1],))\n super_kpts[..., coco_in_superset, :] = coco_kpts\n return super_kpts\n\n\ndef coco17to19(coco17pose):\n \"\"\"\n kp_names = ['nose', 'l_eye', 'r_eye', 'l_ear', 'r_ear', 'l_shoulder', # 5\n 'r_shoulder', 'l_elbow', 'r_elbow', 'l_wrist', 'r_wrist', # 10\n 'l_hip', 'r_hip', 'l_knee', 'r_knee', 'l_ankle', 'r_ankle']\n coco19_kp_names = ['neck', 'nose', 'hip', 'l_shoulder', 'l_elbow', 'l_wrist', # 5\n 'l_hip', 'l_knee', 'l_ankle', 'r_shoulder', 'r_elbow', # 10\n 'r_wrist', 'r_hip', 'r_knee', 'r_ankle', 'l_eye', # 15\n 'l_ear', 'r_eye', 'r_ear']\n :param coco17pose: 17x3 coco pose np.array\n :return: 19x3 coco19 pose np.array\n \"\"\"\n coco19pose = np.zeros((19, coco17pose.shape[1]))\n index_array = np.array([1, 15, 17, 16, 18, 3, 9, 4, 10, 5, 11, 6, 12, 7, 13, 8, 14])\n coco19pose[index_array] = coco17pose\n coco19pose[0] = (coco17pose[5] + coco17pose[6]) / 2\n coco19pose[2] = (coco17pose[11] + coco17pose[12]) / 2\n coco19pose[-4:] = coco17pose[0] # Since we have not implement eye and ear yet.\n return coco19pose\n\n\ndef coco19_to_superset(coco19pose):\n \"\"\"\n coco19_kp_names = ['neck', 'nose', 'hip', 'l_shoulder', 'l_elbow', 'l_wrist', # 5\n 'l_hip', 'l_knee', 'l_ankle', 'r_shoulder', 'r_elbow', # 10\n 'r_wrist', 'r_hip', 'r_knee', 'r_ankle', 'l_eye', # 15\n 'l_ear', 'r_eye', 'r_ear']\n kpts_coco19 = [12, 19, 14, 9, 10, 11,\n 3, 4, 5, 8, 7, #10\n 6, 2, 1, 0, 20, #15\n 22, 21, 23]\n 0 - Right Ankle\n 1 - Right Knee\n 2 - Right Hip\n 3 - Left Hip\n 4 - Left Knee\n 5 - Left Ankle\n 6 - Right Wrist\n 7 - Right Elbow\n 8 - Right Shoulder\n 9 - Left Shoulder\n 10 - Left Elbow\n 11 - Left Wrist\n 12 - Neck (LSP definition)\n 13 - Top of Head (LSP definition)\n 14 - Pelvis (MPII definition)\n 15 - Thorax (MPII definition)\n 16 - Spine (Human3.6M definition)\n 17 - Jaw (Human3.6M definition)\n 18 - Head (Human3.6M definition)\n 19 - Nose\n 20 - Left Eye\n 21 - Right Eye\n 22 - Left Ear\n 23 - Right Ear\n :param coco19pose:\n :return:\n \"\"\"\n pass\n # superset_names =\n J24_names = ['Right Ankle',\n 'Right Knee',\n 'Right Hip',\n 'Left Hip',\n 'Left Knee',\n 'Left Ankle',\n 'Right Wrist',\n 'Right Elbow',\n 'Right Shoulder',\n 'Left Shoulder',\n 'Left Elbow',\n 'Left Wrist',\n 'Neck (LSP definition)',\n 'Top of Head (LSP definition)',\n 'Pelvis (MPII definition)',\n 'Thorax (MPII definition)',\n 'Spine (Human3.6M definition)',\n 'Jaw (Human3.6M definition)',\n 'Head (Human3.6M definition)',\n 'Nose',\n 'Left Eye',\n 'Right Eye',\n 'Left Ear',\n 'Right Ear']\n coco19_kp_names = ['neck', 'nose', 'hip', 'l_shoulder', 'l_elbow', 'l_wrist', # 5\n 'l_hip', 'l_knee', 'l_ankle', 'r_shoulder', 'r_elbow', # 10\n 'r_wrist', 'r_hip', 'r_knee', 'r_ankle', 'l_eye', # 15\n 'l_ear', 'r_eye', 'r_ear']\n\n h36m_names = ['Pelvis (MPII definition)',\n 'Left Hip',\n 'Left Knee',\n 'Left Ankle',\n 'Right Hip',\n 'Right Knee',\n 'Right Ankle',\n 'Spine (Human3.6M definition)', # To interpolate\n 'Neck (LSP definition)',\n 'Jaw (Human3.6M definition)', # To interpolate\n 'Head (Human3.6M definition)', # To interpolate\n 'Left Shoulder',\n 'Left Elbow',\n 'Left Wrist',\n 'Right Shoulder',\n 'Right Elbow',\n 'Right Wrist']\n \"\"\"\n 0: Pelvis (MPII definition)\n 1: Left Hip\n 2: Left Knee\n 3: Left Ankle\n 4: Right Hip\n 5: Right Knee\n 6: Right Ankle\n 7: Spine (Human3.6M definition)\n 8: Neck (LSP definition)\n 9: Jaw (Human3.6M definition)\n 10: Head (Human3.6M definition)\n 11: Left Shoulder\n 12: Left Elbow\n 13: Left Wrist\n 14: Right Shoulder\n 15: Right Elbow\n 16: Right Wrist\n \"\"\"\n\n superset_idx = [14, 3, 4, 5, 2, 1, 0, 16, 12, 17, 18, 9, 10, 11, 8, 7, 6]\n kpts_coco19 = [12, 19, 14, 9, 10, 11,\n 3, 4, 5, 8, 7, # 10\n 6, 2, 1, 0, 20, # 15\n 22, 21, 23]\n\n\ndef PanopticJ15_to_Superset():\n \"\"\"\n 0 - Right Ankle\n 1 - Right Knee\n 2 - Right Hip\n 3 - Left Hip\n 4 - Left Knee\n 5 - Left Ankle\n 6 - Right Wrist\n 7 - Right Elbow\n 8 - Right Shoulder\n 9 - Left Shoulder\n 10 - Left Elbow\n 11 - Left Wrist\n 12 - Neck (LSP definition)\n 13 - Top of Head (LSP definition)\n 14 - Pelvis (MPII definition)\n 15 - Thorax (MPII definition)\n 16 - Spine (Human3.6M definition)\n 17 - Jaw (Human3.6M definition)\n 18 - Head (Human3.6M definition)\n 19 - Nose\n 20 - Left Eye\n 21 - Right Eye\n 22 - Left Ear\n 23 - Right Ear\n BoneJointOrder = { [2 1 3] ... %{headtop, neck, bodyCenter}\n , [1 4 5 6] ... %{neck, leftShoulder, leftArm, leftWrist}\n , [3 7 8 9] ... %{neck, leftHip, leftKnee, leftAnkle}\n , [1 10 11 12] ... %{neck, rightShoulder, rightArm, rightWrist}\n , [3 13 14 15]}; %{neck, rightHip, rightKnee, rightAnkle}\n :return:\n \"\"\"\n pass\n Panoptic_to_J15 = [12, 13, 14, 9, 10, 11, # 5s\n 3, 4, 5, 8, 7, 6, # 11\n 2, 1, 0\n ]\n","sub_path":"datasets/my_transforms.py","file_name":"my_transforms.py","file_ext":"py","file_size_in_byte":22626,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"482533523","text":"\"\"\" Utility functions and classes for SRP\n\nContext : SRP\nModule : GP\nVersion : 1.0.0\nAuthor : Stefano Covino\nDate : 29/01/2020\nE-mail : stefano.covino@brera.inaf.it\nURL: : http://www.merate.mi.astro.it/utenti/covino\n\nUsage : to be imported\n\nRemarks : From C. E. Rasmussen & C. K. I. Williams, Gaussian Processes\n for Machine Learning, the MIT Press, 2006\n\nHistory : (29/01/2020) First version.\n\n\"\"\"\n\n\nimport numpy as np\nimport scipy.special as ss\n\n\ndef MaternKernel (A,r,L,nu):\n fi = (2**(1-nu))/ss.gamma(nu)\n sem = np.sqrt(2*nu)*r/L\n se = sem**nu\n te = ss.kv(nu,sem)\n return A*fi*se*te\n","sub_path":"Misc/SRPStatistics/GP/MaternKernel.py","file_name":"MaternKernel.py","file_ext":"py","file_size_in_byte":624,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"616141636","text":"from sqlalchemy import Column, String, Integer, Boolean\nfrom sqlalchemy.orm.exc import FlushError\n\nfrom base import Base, Session\n\n\nclass Alarm(Base):\n \"\"\"Alarm Class\"\"\"\n __tablename__ = 'alarm'\n\n id = Column('id',Integer, primary_key=True)\n message = Column('message',String(32))\n active = Column('activate', Boolean)\n\n def __init__(self, message):\n self.message = message\n self.active = True\n\n def set_message(self, message):\n self.message = message\n self.save()\n\n def set_active(self, active):\n self.active = active\n self.save()\n\n def save(self, commit=True):\n session = Session()\n try:\n session.add(self)\n except FlushError:\n pass\n\n if commit:\n session.commit()\n\n def delete(self, commit=True):\n session = Session()\n session.delete(self)\n\n if commit:\n session.commit()\n","sub_path":"alarm.py","file_name":"alarm.py","file_ext":"py","file_size_in_byte":938,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"131737565","text":"#!python3\r\n\r\n#libs\r\nimport discord\r\nfrom discord.ext import commands\r\nimport asyncio\r\n\r\nclient = discord.Client()\r\n\r\ndescription = '''The Bot that makes your rp experience much eaiser! Made by Councilman of Lore, Verlzonia'''\r\nbot = commands.Bot(command_prefix='!rp ', description=description)\r\n\r\n@bot.event\r\nasync def on_ready():\r\n print('Logged in as')\r\n print(bot.user.name)\r\n print(bot.user.id)\r\n print('------')\r\n\r\n@bot.command()\r\nasync def greet():\r\n await bot.say('```Hello!```')\r\n\r\n@bot.command()\r\nasync def info():\r\n await bot.say('```I am a bot that serves the purpose of assisting people with the RP in the region of TOS, I was created by Councilman of Lore Verl with help from Karenus```')\r\n\r\n@bot.event\r\nasync def on_message(message):\r\n if message.content.startswith('donttreadonme'):\r\n commands(message)\r\n await bot.process_commands(message)\r\n\r\nasync def commands(message):\r\n await client.send_message(message.user, \"The quick brown fox jumped over the lazy cow.\")\r\n\r\n@bot.command()\r\nasync def cmds():\r\n await bot.say(\"```!rp greet : the bot will greet you \\n !rp info : the bot will tell you about itself \\n !rp cmds : you already know this```\")\r\n\r\nbot.run('MzAwMTAyNjExODMzNzE2NzQ4.C8noXA.278MKMSmiLADXZOkmdd0E997T9A')\r\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1273,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"292214095","text":"# Copyright (c) 2020 Graphcore Ltd. All rights reserved.\nimport os\nimport gc\nimport pytest\nimport shutil\nimport torch\nimport poptorch\nimport popart\nfrom poptorch.optim import SGD\nimport import_helper\nfrom train import TrainingModelWithLoss\nimport datasets\nimport models\nfrom utils import get_train_accuracy, get_test_accuracy, run_script\n\n\n@pytest.mark.ipus(1)\ndef test_recomputation_checkpoints():\n gc.collect()\n # run the model with and without recomputation\n\n def train(model, recompute):\n input_data = torch.ones(1, 3, 224, 224)\n labels_data = torch.ones(1).long()\n opts = poptorch.Options()\n if recompute:\n opts._Popart.set(\"autoRecomputation\", int(popart.RecomputationType.Standard))\n opts.outputMode(poptorch.OutputMode.All)\n opts.randomSeed(0)\n opts.Training.gradientAccumulation(1)\n opts.Precision.enableStochasticRounding(False)\n model_with_loss = TrainingModelWithLoss(model)\n optimizer = SGD(model_with_loss.parameters(), lr=0.01, momentum=0., use_combined_accum=True)\n training_model = poptorch.trainingModel(model_with_loss, opts, optimizer=optimizer)\n predictions = []\n for _ in range(3):\n preds, _, _ = training_model(input_data, labels_data)\n predictions.append(preds)\n training_model.destroy()\n return predictions\n\n class Options():\n def __init__(self):\n self.model = \"resnet18\"\n self.precision = \"16.16\"\n self.norm_type = \"group\"\n self.norm_eps = 1e-5\n self.norm_num_groups = 32\n self.normalization_location = \"none\"\n self.pipeline_splits = []\n self.eight_bit_io = False\n self.num_io_tiles = 0\n args = Options()\n torch.manual_seed(0)\n model = models.get_model(args, datasets.datasets_info[\"cifar10\"], pretrained=True)\n no_recompute_predictions = train(model, False)\n args.recompute_checkpoints = [\"conv\", \"norm\"]\n torch.manual_seed(0)\n model = models.get_model(args, datasets.datasets_info[\"cifar10\"], pretrained=True)\n recompute_predictions = train(model, True)\n for pred1, pred2 in zip(no_recompute_predictions, recompute_predictions):\n assert torch.allclose(pred1, pred2, atol=1e-04)\n\n\n@pytest.mark.ipus(4)\ndef test_replicas_reduction():\n gc.collect()\n\n def common_opts():\n opts = poptorch.Options()\n opts.Training.accumulationAndReplicationReductionType(poptorch.ReductionType.Mean)\n opts.outputMode(poptorch.OutputMode.All)\n opts.randomSeed(0)\n opts.Training.gradientAccumulation(1)\n return opts\n\n def run_model(opts):\n input_data = torch.ones(4, 1)\n labels_data = torch.ones(4).long()\n model = torch.nn.Linear(1, 2, bias=False)\n model_with_loss = TrainingModelWithLoss(model, 0.1)\n optimizer = SGD(model_with_loss.parameters(), lr=0.1, momentum=0., use_combined_accum=True)\n training_model = poptorch.trainingModel(model_with_loss, opts, optimizer=optimizer)\n for _ in range(3):\n preds, loss, _ = training_model(input_data, labels_data)\n # return the weights of the model\n return list(model_with_loss.model.named_parameters())[0][1], loss\n\n # Single replica\n opts = common_opts()\n opts.replicationFactor(1)\n single_replica_weights, single_replica_loss = run_model(opts)\n # 4 replica running\n gc.collect()\n opts = common_opts()\n opts.replicationFactor(4)\n replicated_weights, replicated_loss = run_model(opts)\n\n assert torch.allclose(single_replica_weights, replicated_weights, atol=1e-05)\n assert torch.allclose(single_replica_loss, replicated_loss, atol=1e-05)\n\n\n@pytest.mark.ipus(1)\ndef test_generated():\n gc.collect()\n run_script(\"train/train.py\", f\"--data generated --model resnet18 --epoch 1 --precision 16.16 --validation-mode none --optimizer sgd_combined --lr 0.001 --gradient-accumulation 128 --batch-size 1 --dataloader-worker 4 --seed 0\")\n\n\n@pytest.mark.ipus(1)\n@pytest.mark.parametrize(\"precision\", [\"16.16\", \"32.32\"])\ndef test_synthetic(precision):\n gc.collect()\n run_script(\"train/train.py\", f\"--data synthetic --model resnet18 --epoch 1 --precision {precision} --validation-mode none --optimizer sgd_combined --lr 0.001 --gradient-accumulation 64 --batch-size 1 --dataloader-worker 4 --seed 0\")\n\n\n@pytest.mark.parametrize(\"label_smoothing\", [0.0, 1.0, 0.1, 0.5])\ndef test_loss_function(label_smoothing):\n torch.manual_seed(0)\n inp = torch.rand(4, 10) * 10 - 5 # create random input between [-5,5)\n label = torch.ones(4).long()\n # calculate the ground truth\n log_pred = torch.nn.functional.log_softmax(inp, dim=-1)\n ground_truth = - torch.mean(torch.sum((label_smoothing / 10.0) * log_pred, dim=1) + (1.0 - label_smoothing) * log_pred[:, 1])\n model_with_loss = TrainingModelWithLoss(lambda x: x, label_smoothing=label_smoothing)\n _, loss, _ = model_with_loss(inp, label)\n assert torch.allclose(ground_truth, loss, atol=1e-05)\n\n\n@pytest.mark.ipus(1)\ndef test_mixup():\n gc.collect()\n run_script(\"train/train.py\", f\"--mixup-alpha 0.1 --data generated --model resnet18 --epoch 1 --validation-mode none --optimizer sgd_combined --batch-size 3 --dataloader-worker 1 --seed 0\")\n\n\n@pytest.mark.ipus(1)\ndef test_cutmix():\n gc.collect()\n run_script(\"train/train.py\", f\"--cutmix-lambda-low 0.0 --cutmix-lambda-high 1.0 --data generated --model resnet18 --epoch 1 --validation-mode none --optimizer sgd_combined --batch-size 3 --dataloader-worker 1 --seed 0\")\n\n\nclass TestSynthetic:\n @pytest.mark.ipus(2)\n @pytest.mark.ipu_version(\"ipu2\")\n def test_synthetic_mixed_precision(self):\n gc.collect()\n run_script(\"train/train.py\", \"--data synthetic --model resnet18 --epoch 1 --precision 16.32 --pipeline-splits layer4/0 \"\n \"--validation-mode none --optimizer sgd_combined --lr 0.001 --gradient-accumulation 64 --dataloader-worker 4 --seed 0\")\n\n\nclass TestTrainCIFAR10:\n @pytest.mark.ipus(1)\n def test_single_ipu_validation_groupnorm(self):\n gc.collect()\n out = run_script(\"train/train.py\", \"--data cifar10 --model resnet18 --epoch 3 --precision 16.16 --optimizer sgd_combined --lr 0.1 --batch-size 2 --gradient-accumulation 32 \"\n \"--norm-type group --norm-num-groups 32 --enable-stochastic-rounding --dataloader-worker 4 --seed 0\")\n acc = get_test_accuracy(out)\n assert acc > 15.0\n\n\n @pytest.mark.ipus(1)\n @pytest.mark.ipu_version(\"ipu2\")\n def test_single_ipu_validation_batchnorm(self):\n gc.collect()\n out = run_script(\"train/train.py\", \"--data cifar10 --model resnet18 --epoch 2 --precision 16.16 --optimizer sgd_combined --lr 0.1 --gradient-accumulation 8 \"\n \"--norm-type batch --batch-size 16 --enable-stochastic-rounding --dataloader-worker 4 --seed 0\")\n acc = get_test_accuracy(out)\n assert acc > 15.0\n\n\n @pytest.mark.ipus(2)\n def test_replicas(self):\n gc.collect()\n out = run_script(\"train/train.py\", \"--data cifar10 --model resnet18 --epoch 2 --replicas 2 --precision 16.16 --validation-mode none --optimizer sgd_combined --lr 0.1 \"\n \"--gradient-accumulation 32 --enable-stochastic-rounding --dataloader-worker 4 --seed 0\")\n acc = get_train_accuracy(out)\n assert acc > 15.0\n\n\n @pytest.mark.ipus(2)\n def test_efficient_net(self):\n gc.collect()\n out = run_script(\"train/train.py\", \"--data cifar10 --epoch 4 --model efficientnet-b0 --precision 16.32 --validation-mode none --optimizer sgd_combined --lr 0.1 --gradient-accumulation 64 \"\n \"--pipeline-splits blocks/2/1 --norm-type group --norm-num-groups 4 --enable-stochastic-rounding --dataloader-worker 4 --seed 0\")\n acc = get_train_accuracy(out)\n assert acc > 15.0\n\n\n @pytest.mark.ipus(1)\n def test_full_precision(self):\n gc.collect()\n out = run_script(\"train/train.py\", \"--data cifar10 --epoch 2 --model resnet18 --precision 32.32 --optimizer sgd_combined --lr 0.1 --batch-size 1 --gradient-accumulation 64 --dataloader-worker 4 --seed 0\")\n acc = get_train_accuracy(out)\n assert acc > 15.0\n\n\n @pytest.mark.ipus(2)\n @pytest.mark.ipu_version(\"ipu2\")\n def test_mixed_precision(self):\n gc.collect()\n out = run_script(\"train/train.py\", \"--data cifar10 --epoch 2 --model resnet18 --pipeline-splits layer4/0 --precision 16.32 --optimizer sgd_combined \"\n \"--lr 0.1 --batch-size 1 --gradient-accumulation 64 --validation-mode none --dataloader-worker 4 --seed 0\")\n acc = get_train_accuracy(out)\n assert acc > 15.0\n\n @pytest.mark.ipus(1)\n def test_single_ipu_mobilenet_v3_small_validation_batchnorm(self):\n gc.collect()\n out = run_script(\"train/train.py\", \"--data cifar10 --model mobilenet-v3-small --epoch 3 --precision 16.32 --optimizer sgd_combined --lr 0.1 --batch-size 2 --gradient-accumulation 32 \"\n \"--norm-type batch --enable-stochastic-rounding --dataloader-worker 4 --seed 0\")\n acc = get_test_accuracy(out)\n assert acc > 15.0\n\n @pytest.mark.ipus(1)\n @pytest.mark.ipu_version(\"ipu2\")\n def test_single_ipu_mobilenet_v3_large_validation_batchnorm(self):\n gc.collect()\n out = run_script(\"train/train.py\", \"--data cifar10 --model mobilenet-v3-large --epoch 3 --precision 16.32 --optimizer sgd_combined --lr 0.1 --batch-size 2 --gradient-accumulation 32 \"\n \"--norm-type batch --enable-stochastic-rounding --dataloader-worker 4 --seed 0\")\n acc = get_test_accuracy(out)\n assert acc > 15.0\n\n @pytest.mark.ipus(1)\n @pytest.mark.ipu_version(\"ipu2\")\n def test_half_resolution_training(self):\n gc.collect()\n out = run_script(\"train/train.py\", \"--data cifar10 --model resnet18 --epoch 1 --precision 16.32 --optimizer sgd_combined --lr 0.1 --batch-size 2 --gradient-accumulation 32 \"\n \"--norm-type batch --dataloader-worker 4 --half-res-training --fine-tune-epoch 1 --fine-tune-first-trainable-layer layer3 --weight-avg-strategy exponential \"\n \"--weight-avg-exp-decay 0.97 --checkpoint-path test_half_resolution_training --seed 0\")\n acc = get_test_accuracy(out)\n assert acc > 15.0\n # remove folder\n parent_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))\n shutil.rmtree(os.path.join(parent_dir, \"test_half_resolution_training\"))\n\n\nclass TestRestoreCheckpoint:\n @pytest.mark.ipus(1)\n def test_restore_train(self):\n gc.collect()\n # create a model\n out = run_script(\"train/train.py\", \"--data cifar10 --epoch 2 --model resnet18 --precision 16.16 --optimizer sgd_combined --lr 0.1 --batch-size 2 --gradient-accumulation 32 --seed 0 \"\n \"--validation-mode none --norm-type group --norm-num-groups 32 --checkpoint-path restore_test_path_test_restore_train --dataloader-worker 4\")\n saved_train_acc = get_train_accuracy(out)\n # reload the model\n out = run_script(\"train/restore.py\", \"--checkpoint-path restore_test_path_test_restore_train/resnet18_cifar10_1.pt\")\n acc = get_train_accuracy(out)\n assert acc > saved_train_acc - 5.0\n # remove folder\n parent_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))\n shutil.rmtree(os.path.join(parent_dir, \"restore_test_path_test_restore_train\"))\n\n\n @pytest.mark.ipus(1)\n def test_validation(self):\n gc.collect()\n # create a model\n out = run_script(\"train/train.py\", \"--data cifar10 --epoch 1 --model resnet18 --precision 16.16 --optimizer sgd_combined --lr 0.1 --batch-size 2 --gradient-accumulation 32 --seed 0 \"\n \"--norm-type group --norm-num-groups 32 --checkpoint-path restore_test_path_test_validation --dataloader-worker 4\")\n saved_test_acc = get_test_accuracy(out)\n # validate the model\n out = run_script(\"train/validate.py\", \"--checkpoint-path restore_test_path_test_validation/resnet18_cifar10_1.pt\")\n acc = get_test_accuracy(out)\n # close enough\n assert abs(saved_test_acc - acc) < 0.01\n # remove folder\n parent_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))\n shutil.rmtree(os.path.join(parent_dir, \"restore_test_path_test_validation\"))\n\n\n @pytest.mark.ipus(1)\n def test_weight_avg(self):\n gc.collect()\n parent_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))\n out1 = run_script(\"train/train.py\", \"--data cifar10 --epoch 3 --model resnet18 --precision 16.16 --weight-avg-strategy mean --norm-type group \"\n \"--norm-num-groups 32 --optimizer sgd_combined --lr 0.1 --batch-size 2 --gradient-accumulation 32 --checkpoint-path restore_test_path_weight_avg \"\n \"--weight-avg-N 2 --dataloader-worker 4 --seed 0\")\n os.remove(os.path.join(parent_dir, \"restore_test_path_weight_avg\", \"resnet18_cifar10_3_averaged.pt\"))\n _ = run_script(\"train/weight_avg.py\", \"--checkpoint-path restore_test_path_weight_avg --weight-avg-strategy mean --weight-avg-N 2\")\n out2 = run_script(\"train/validate.py\", \"--checkpoint-path restore_test_path_weight_avg/resnet18_cifar10_3_averaged.pt\")\n acc1 = get_test_accuracy(out1)\n acc2 = get_test_accuracy(out1)\n assert acc1 > 15\n assert acc1 == acc2\n shutil.rmtree(os.path.join(parent_dir, \"restore_test_path_weight_avg\"))\n\n @pytest.mark.ipus(1)\n def test_mixup_cutmix_validation_weight_avg(self):\n # Only make sure that checkpoint loading works with mixup model wrapper.\n gc.collect()\n parent_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))\n run_script(\"train/train.py\", f\"--mixup-alpha 0.1 --cutmix-lambda-low 0.2 --cutmix-lambda-high 0.8 --data generated --checkpoint-path test_mixup_cutmix_validation_weight_avg --weight-avg-strategy exponential --weight-avg-exp-decay 0.97 --model resnet18 --epoch 2 --validation-mode after --optimizer sgd_combined --batch-size 4 --dataloader-worker 1 --seed 0\")\n shutil.rmtree(os.path.join(parent_dir, \"test_mixup_cutmix_validation_weight_avg\"))\n\n @pytest.mark.ipus(1)\n @pytest.mark.ipu_version(\"ipu2\")\n def test_mixup_cutmix_restore_train(self):\n # Only make sure that checkpoint loading works with mixup model wrapper.\n gc.collect()\n parent_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))\n run_script(\"train/train.py\", f\"--mixup-alpha 0.1 --cutmix-lambda-low 0.5 --cutmix-lambda-high 0.5 --data generated --checkpoint-path test_mixup_cutmix_restore_train --model resnet18 --epoch 2 --validation-mode none --optimizer sgd_combined --batch-size 4 --dataloader-worker 1 --seed 0\")\n run_script(\"train/restore.py\", \"--checkpoint-path test_mixup_cutmix_restore_train/resnet18_generated_1.pt\")\n shutil.rmtree(os.path.join(parent_dir, \"test_mixup_cutmix_restore_train\"))\n","sub_path":"applications/pytorch/cnns/tests/test_train.py","file_name":"test_train.py","file_ext":"py","file_size_in_byte":15371,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"569627574","text":"#! /usr/bin/env python\n# Copyright (c) 2019 Uber Technologies, Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\nimport functools\nimport logging\nimport os\nimport sys\nfrom collections.abc import Iterable\nfrom io import BytesIO\nfrom typing import BinaryIO, List, Optional, TextIO, Tuple, Union\n\nimport numpy as np\nimport torch\nimport torchvision.transforms.functional as F\nfrom torchvision.io import decode_image\n\nfrom ludwig.constants import CROP_OR_PAD, INTERPOLATE\nfrom ludwig.utils.data_utils import get_abs_path\nfrom ludwig.utils.fs_utils import is_http, open_file, path_exists, upgrade_http\n\nlogger = logging.getLogger(__name__)\n\n\ndef get_gray_default_image(num_channels: int, height: int, width: int) -> np.ndarray:\n return np.full((num_channels, height, width), 128, dtype=np.uint8)\n\n\ndef get_average_image(image_lst: List[np.ndarray]) -> np.array:\n return np.mean([x for x in image_lst if x is not None], axis=(0))\n\n\n@functools.lru_cache(maxsize=32)\ndef get_image_from_http_bytes(img_entry) -> BytesIO:\n import requests\n\n data = requests.get(img_entry, stream=True)\n if data.status_code == 404:\n upgraded = upgrade_http(img_entry)\n if upgraded:\n logger.info(f\"reading image url {img_entry} failed. upgrading to https and retrying\")\n return get_image_from_http_bytes(upgraded)\n else:\n raise requests.exceptions.HTTPError(f\"reading image url {img_entry} failed and cannot be upgraded to https\")\n return BytesIO(data.raw.read())\n\n\ndef get_image_from_path(\n src_path: Union[str, torch.Tensor], img_entry: Union[str, bytes], ret_bytes: bool = False\n) -> Union[BytesIO, BinaryIO, TextIO, bytes, str]:\n if not isinstance(img_entry, str):\n return img_entry\n if is_http(img_entry):\n if ret_bytes:\n # Returns BytesIO.\n return get_image_from_http_bytes(img_entry)\n return img_entry\n if src_path or os.path.isabs(img_entry):\n return get_abs_path(src_path, img_entry)\n if path_exists(img_entry):\n with open_file(img_entry, \"rb\") as f:\n if ret_bytes:\n return f.read()\n return f\n else:\n return bytes(img_entry, \"utf-8\")\n\n\ndef is_image(src_path: str, img_entry: Union[bytes, str], column: str) -> bool:\n if not isinstance(img_entry, str):\n return False\n try:\n import imghdr\n\n img = get_image_from_path(src_path, img_entry, True)\n if isinstance(img, bytes):\n return imghdr.what(None, img) is not None\n return imghdr.what(img) is not None\n except Exception as e:\n logger.warning(f\"While assessing potential image in is_image() for column {column}, encountered exception: {e}\")\n return False\n\n\n# For image inference, want to bias towards both readable images, but also account for unreadable (i.e. expired) urls\n# with image extensions\ndef is_image_score(src_path, img_entry, column: str):\n if is_image(src_path, img_entry, column):\n return 1\n elif isinstance(img_entry, str) and img_entry.lower().endswith((\".png\", \".jpg\", \".jpeg\", \".tiff\", \".bmp\", \".gif\")):\n return 0.5\n return 0\n\n\n@functools.lru_cache(maxsize=32)\ndef read_image(img: Union[str, bytes, BytesIO, torch.Tensor], num_channels: Optional[int] = None) -> torch.Tensor:\n \"\"\"Returns a tensor with CHW format.\n\n If num_channels is not provided, the image is read in unchanged format. Returns None if the image could not be read.\n \"\"\"\n if isinstance(img, torch.Tensor):\n return img\n if isinstance(img, str):\n return read_image_from_str(img, num_channels)\n if isinstance(img, bytes):\n with BytesIO(img) as buffer:\n buffer_view = buffer.getbuffer()\n image_tensor = decode_image(torch.frombuffer(buffer_view, dtype=torch.uint8))\n del buffer_view\n return image_tensor\n if isinstance(img, BytesIO):\n buffer_view = img.getbuffer()\n try:\n image_tensor = decode_image(torch.frombuffer(buffer_view, dtype=torch.uint8))\n del buffer_view\n return image_tensor\n except RuntimeError as e:\n logger.warning(f\"Encountered torchvision error while reading {img}: {e}\")\n logger.warning(f\"Could not read image {img}, unsupported type {type(img)}\")\n\n\n@functools.lru_cache(maxsize=32)\ndef read_image_from_str(img: str, num_channels: Optional[int] = None) -> torch.Tensor:\n try:\n from torchvision.io import ImageReadMode, read_image\n except ImportError:\n logger.error(\n \" torchvision is not installed. \"\n \"In order to install all image feature dependencies run \"\n \"pip install ludwig[image]\"\n )\n sys.exit(-1)\n\n try:\n if num_channels == 1:\n return read_image(img, mode=ImageReadMode.GRAY)\n elif num_channels == 2:\n return read_image(img, mode=ImageReadMode.GRAY_ALPHA)\n elif num_channels == 3:\n return read_image(img, mode=ImageReadMode.RGB)\n elif num_channels == 4:\n return read_image(img, mode=ImageReadMode.RGB_ALPHA)\n else:\n return read_image(img)\n except Exception as e:\n upgraded = upgrade_http(img)\n if upgraded:\n logger.info(f\"reading image url {img} failed due to {e}. upgrading to https and retrying\")\n return read_image_from_str(upgraded, num_channels)\n logger.info(f\"reading image url {img} failed due to {e}\")\n return None\n\n\ndef pad(\n img: torch.Tensor,\n new_size: Union[int, Tuple[int, int]],\n) -> torch.Tensor:\n \"\"\"torchscript-compatible implementation of pad.\n\n Args:\n img (torch.Tensor): image with shape [..., height, width] to pad\n new_size (Union[int, Tuple[int, int]]): size to pad to. If int, resizes to square image of that size.\n\n Returns:\n torch.Tensor: padded image of size [..., size[0], size[1]] or [..., size, size] if size is int.\n \"\"\"\n new_size = to_tuple(new_size)\n old_size = img.shape[-2:]\n pad_size = (torch.tensor(new_size) - torch.tensor(old_size)) / 2\n padding = torch.cat((torch.floor(pad_size), torch.ceil(pad_size)))\n padding[padding < 0] = 0\n padding = [int(x) for x in padding]\n return F.pad(img, padding=padding, padding_mode=\"edge\")\n\n\ndef crop(\n img: torch.Tensor,\n new_size: Union[int, Tuple[int, int]],\n) -> torch.Tensor:\n \"\"\"torchscript-compatible implementation of crop.\n\n Args:\n img (torch.Tensor): image with shape [..., height, width] to crop\n size (Union[int, Tuple[int, int]]): size to crop to. If int, crops to square image of that size.\n\n Returns:\n torch.Tensor: cropped image of size [..., size[0], size[1]] or [..., size, size] if size is int.\n \"\"\"\n new_size = to_tuple(new_size)\n return F.center_crop(img, output_size=new_size)\n\n\ndef crop_or_pad(img: torch.Tensor, new_size: Union[int, Tuple[int, int]]):\n \"\"\"torchscript-compatible implementation of resize using constants.CROP_OR_PAD.\n\n Args:\n img (torch.Tensor): image with shape [..., height, width] to resize\n new_size (Union[int, Tuple[int, int]]): size to resize to. If int, resizes to square image of that size.\n\n Returns:\n torch.Tensor: resized image of size [..., size[0], size[1]] or [..., size, size] if size is int.\n \"\"\"\n new_size = to_tuple(new_size)\n if list(new_size) == list(img.shape[-2:]):\n return img\n img = pad(img, new_size)\n img = crop(img, new_size)\n return img\n\n\ndef resize_image(\n img: torch.Tensor,\n new_size: Union[int, Tuple[int, int]],\n resize_method: str,\n crop_or_pad_constant: str = CROP_OR_PAD,\n interpolate_constant: str = INTERPOLATE,\n) -> torch.Tensor:\n \"\"\"torchscript-compatible implementation of resize.\n\n Args:\n img (torch.Tensor): image with shape [..., height, width] to resize\n new_size (Union[int, Tuple[int, int]]): size to resize to. If int, resizes to square image of that size.\n resize_method (str): method to use for resizing. Either constants.CROP_OR_PAD or constants.INTERPOLATE.\n\n Returns:\n torch.Tensor: resized image of size [..., size[0], size[1]] or [..., size, size] if size is int.\n \"\"\"\n new_size = to_tuple(new_size)\n if list(img.shape[-2:]) != list(new_size):\n if resize_method == crop_or_pad_constant:\n return crop_or_pad(img, new_size)\n elif resize_method == interpolate_constant:\n return F.resize(img, new_size)\n raise ValueError(f\"Invalid image resize method: {resize_method}\")\n return img\n\n\ndef grayscale(img: torch.Tensor) -> torch.Tensor:\n \"\"\"Grayscales RGB image.\"\"\"\n return F.rgb_to_grayscale(img)\n\n\ndef num_channels_in_image(img: torch.Tensor):\n \"\"\"Returns number of channels in image.\"\"\"\n if img is None or img.ndim < 2:\n raise ValueError(\"Invalid image data\")\n\n if img.ndim == 2:\n return 1\n else:\n return img.shape[0]\n\n\ndef to_tuple(v: Union[int, Tuple[int, int]]) -> Tuple[int, int]:\n \"\"\"Converts int or tuple to tuple of ints.\"\"\"\n if torch.jit.isinstance(v, int):\n return v, v\n else:\n return v\n\n\ndef to_np_tuple(prop: Union[int, Iterable]) -> np.ndarray:\n \"\"\"Creates a np array of length 2 from a Conv2D property.\n\n E.g., stride=(2, 3) gets converted into np.array([2, 3]), where the\n height_stride = 2 and width_stride = 3. stride=2 gets converted into\n np.array([2, 2]).\n \"\"\"\n if type(prop) == int:\n return np.ones(2).astype(int) * prop\n elif isinstance(prop, Iterable) and len(prop) == 2:\n return np.array(list(prop)).astype(int)\n elif type(prop) == np.ndarray and prop.size == 2:\n return prop.astype(int)\n else:\n raise TypeError(f\"prop must be int or iterable of length 2, but is {prop}.\")\n\n\ndef get_img_output_shape(\n img_height: int,\n img_width: int,\n kernel_size: Union[int, Tuple[int]],\n stride: Union[int, Tuple[int]],\n padding: Union[int, Tuple[int], str],\n dilation: Union[int, Tuple[int]],\n) -> Tuple[int]:\n \"\"\"Returns the height and width of an image after a 2D img op.\n\n Currently supported for Conv2D, MaxPool2D and AvgPool2d ops.\n \"\"\"\n if padding == \"same\":\n return (img_height, img_width)\n elif padding == \"valid\":\n padding = np.zeros(2)\n else:\n padding = to_np_tuple(padding)\n\n kernel_size = to_np_tuple(kernel_size)\n stride = to_np_tuple(stride)\n dilation = to_np_tuple(dilation)\n shape = np.array([img_height, img_width])\n\n out_shape = np.floor(((shape + 2 * padding - dilation * (kernel_size - 1) - 1) / stride) + 1)\n\n return tuple(out_shape.astype(int))\n","sub_path":"ludwig/utils/image_utils.py","file_name":"image_utils.py","file_ext":"py","file_size_in_byte":11271,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"101557391","text":"#!/usr/bin/python\n\nimport sys\ntry:\n\twith open(sys.argv[1]) as f:\n\t\tfor line in f:\n\t\t\tname, age, bs = line.split()\n\t\t\tage = int(age)\n\t\t\tbs = float(bs)\n\t\t\tprint(name, age, bs)\nexcept IOError:\n\tprint(\"Cannot open file\")\n\tsys.exit(1)\n","sub_path":"test_file.py","file_name":"test_file.py","file_ext":"py","file_size_in_byte":230,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"384472944","text":"## Animal is-a object (yes, sort of confusing) look at the extra credit\nclass Animal(object):\n\n def __init(self, size):\n self.size = None\n\n## Dog is-a Animal\nclass Dog(Animal):\n\n def __init__(self, name):\n ## dog has-a name attribute\n self.name = name\n\n## Cat is-a Animal\nclass Cat(Animal):\n\n def __init__(self, name):\n ## Cat has-a name attribute\n self.name = name\n\n## Person is-a Object\nclass Person(object):\n\n def __init__(self, name):\n ## Person has-a name attribute\n self.name = name\n\n ## Person has-a pet of some kind\n self.pet = None\n\n## Employee is-a Person\nclass Employee(Person):\n\n def __init__(self, name, salary):\n ## Employee has-a super attribute?\n super(Employee, self).__init__(name)\n ## Employee has a salary\n self.salary = salary\n\n## Fish is-a Object\nclass Fish(object):\n def __init__(self, color):\n self.color = color\n\n## Salmon is-a Fish\nclass Salmon(Fish):\n pass\n\n## Halibut is-a Fish\nclass Halibut(Fish):\n pass\n\n## rover is-a Dog\nrover = Dog(\"Rover\")\n\n## satan is-a Cat\nsatan = Cat(\"Satan\")\n\n## mary is a Person\nmary = Person(\"Mary\")\n\n## mary's pet is-a Satan\nmary.pet = satan\n\n## frank is-a Employee\nfrank = Employee(\"Frank\", 120000)\n\n## frank's pet is-a rover\nfrank.pet = rover\n\n## flipper is-a Fish\nflipper = Fish(\"White\")\n\n## crouse is-a Salmon\ncrouse = Salmon(\"White\")\n\n## harry is-a Halibut\nharry = Halibut(\"Iksnaphet\")\n\nclass Book(object):\n\n def __init__(self, cover):\n self.cover = cover\n\nclass Popscience(Book):\n\n def __init__(self, cover, chapters):\n self.chapters = chapters\n super(Popscience, self).__init__(cover)\n\nclass Stories(Book):\n\n def __init__(self, chapters):\n self.chapters = chapters\n\nsi_chapters = ([1, 2, 3])\n\nsocial_intelligence = Popscience(\"soft cover\", si_chapters)\n\nprint(social_intelligence.cover)\n\n#print(social_intelligence.chapters)\n\nms_chapters = ({'1': 'inleiding',\n '2': 'middenstuk',\n '3': 'slot'})\n\nmisdaad_en_straf = Stories(ms_chapters)\n\n#print(misdaad_en_straf.chapters)\n","sub_path":"ex42.py","file_name":"ex42.py","file_ext":"py","file_size_in_byte":2124,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"35696788","text":"from otree.api import (\n models,\n widgets,\n BaseConstants,\n BaseSubsession,\n BaseGroup,\n BasePlayer,\n Currency as c,\n currency_range,\n)\nfrom timeit import default_timer as timer\nimport numpy as np\nimport pandas as pd\nimport time\n\nauthor = 'Your name here'\n\ndoc = \"\"\"\nYour app description\n\"\"\"\n\ndef practice_generator(treatment):\n\n rewards = [12.3, 19.7, 8.55, 6.35, 16.6, 23.4]\n risks = [82, 58, 82, 41, 58, 41]\n certainty = [6.85, 15.67, 5.79, 1.36, 7.44, 14.11]\n display = [0, 1, 1, 1, 0, 1]\n\n if treatment == 'E':\n for i in range(len(rewards)):\n rewards[i] = round(rewards[i])\n\n for i in range(len(risks)):\n risks[i] = round(risks[i] / 10) * 10\n\n for i in range(len(certainty)):\n certainty[i] = round(certainty[i], 1)\n\n trial_list = []\n for i in range(len(rewards)):\n dum = [rewards[i], risks[i], certainty[i], display[i]]\n trial_list.append(dum)\n\n trial_table = pd.DataFrame(trial_list)\n\n columns = ['reward', 'risk', 'certainty', 'display']\n trial_table.columns = columns\n\n # np.random.seed(520)\n # trial_table = trial_table.sample(frac=1).reset_index(drop=True)\n\n return trial_table\n\ndef set_time():\n\n time_now = timer()\n\n return time_now\n\n\nclass Constants(BaseConstants):\n name_in_url = 'choice_practice'\n players_per_group = None\n num_rounds = 6 #should be 6\n\n\nclass Subsession(BaseSubsession):\n pass\n\n\nclass Group(BaseGroup):\n pass\n\n\nclass Player(BasePlayer):\n treatment = models.StringField()\n\n choice = models.StringField()\n\n # decision time collected by JavaScript method\n jsdectime_start = models.FloatField()\n jsdectime_end = models.FloatField()\n jsdectime = models.FloatField()\n # decision time collected by Python method\n pydectime = models.FloatField()\n\n pyresttime = models.FloatField()\n\n reward = models.FloatField()\n risk = models.FloatField()\n certainty = models.FloatField()\n display = models.IntegerField()\n\n lottery = models.IntegerField()\n","sub_path":"choice_practice/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":2057,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"541014090","text":"from django.db import models\n\nfrom datetime import *\n\n# Create your models here.\n\n\n\t\t\t\nclass Donation(models.Model):\n\t\"\"\"docstring for ClassName\"\"\"\n\tnickname = models.CharField(max_length=50, verbose_name=\"Nickname\" )\n\temail = models.CharField(max_length=50, verbose_name=\"E-mail\" )\n\ttext = models.CharField(max_length=300,verbose_name=\"Text\")\n\tamount=models.FloatField(verbose_name=\"Amount\")\n\n\t\n\t\n\tclass Meta:\n\t\tverbose_name= \"Donation\"\n\t\tverbose_name_plural=\"Donations\"\n\tdef __str__(self):\n\t\treturn 'Donation %s' % self.nickname\n\n\nclass ReqToken(models.Model):\n\t\"\"\"docstring for ClassName\"\"\"\n\tDonationID=models.ForeignKey(Donation)\n\tkey = models.CharField(max_length=25, verbose_name=\"Key\" )\n\tisUsed= models.BooleanField( verbose_name=\"isUsed\" )\n\tclass Meta:\n\t\tverbose_name= \"ReqToken\"\n\t\tverbose_name_plural=\"ReqTokens\"\n\tdef __str__(self):\n\t\treturn 'ReqToken %s' % self.key\n\n\nclass Request(models.Model):\n\t\"\"\"docstring for ClassName\"\"\"\n\tDonationID=models.ForeignKey(Donation)\n\tTokenID=models.ForeignKey(ReqToken)\n\tword = models.CharField(max_length=30, verbose_name=\"Word\" )\n\timage=models.ImageField(max_length=400,verbose_name=\"Image\")\n\tpathArray = models.SlugField( verbose_name=\"PathArray\" )\n\tCurrentStatus = models.CharField(max_length=15, verbose_name=\"Current Status\" )\n\tclass Meta:\n\t\tverbose_name= \"Request\"\n\t\tverbose_name_plural=\"Requests\"\n\tdef __str__(self):\n\t\treturn 'Request %s' % self.word\n\nclass Winner(models.Model):\n\t\"\"\"docstring for ClassName\"\"\"\n\tUsername = models.CharField(max_length=50, verbose_name=\"Username\" )\n\tRequestID = models.ForeignKey(Request)\n\tclass Meta:\n\t\tverbose_name= \"Winner\"\n\t\tverbose_name_plural=\"Winners\"\n\tdef __str__(self):\n\t\treturn 'Winner %s' % self.Username\n","sub_path":"prog/polls/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":1701,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"363098023","text":"# -*- coding: utf-8 -*-\nfrom selenium import webdriver\nimport unittest, time\n\n\nclass lesson10Chrome(unittest.TestCase):\n def setUp(self):\n self.wd = webdriver.Chrome()\n self.verificationErrors = []\n\n def test_lesson10_Chrome(self):\n wd = self.wd\n # Получение свойств элементов на главной странице\n wd.get(\"http://afl-test.test.aeroflot.ru/ru-ru\")\n log = []\n for l in wd.get_log(\"browser\"):\n log.append(l)\n print(len(log))\n assert len(log) == 0\n #wd.get(\"http://localhost/litecart/en/\")\n\n def tearDown(self):\n self.wd.quit()\n self.assertEqual([], self.verificationErrors)\n\nif __name__ == \"__main__\":\n unittest.main()\n\n","sub_path":"test_log2.py","file_name":"test_log2.py","file_ext":"py","file_size_in_byte":770,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"481658918","text":"# Time: O(logn)\n# Space: O(1)\n\n# 540\n# Given a sorted array consisting of only integers\n# where every element appears twice except for one element\n# which appears once. Find this single element that appears only once.\n#\n# Example 1:\n# Input: [1,1,2,3,3,4,4,8,8]\n# Output: 2\n# Example 2:\n# Input: [3,3,7,7,10,11,11]\n# Output: 10\n# Note: Your solution should run in O(log n) time and O(1) space.\n\n\n# 对所有三种算法,即使数组没有经过排序,只要将同一元素放在一起,该算法仍然起作用\n# 顺序无关紧要,重要的是含有单个元素的子数组元素个数为奇数。\n\nclass Solution(object):\n # 对偶数索引进行二分搜索 O(logn)\n # 这是对方法3(线性搜索偶数索引)的优化。原数组是奇数长度;一对数应为偶-奇索引。\n # 取mid并调整为偶数索引,如果它与其后元素相同,则单个元素在其后;如果不同,则单个元素\n # 在mid或之前。一旦只剩一个元素,即为返回值。\n def singleNonDuplicate(self, nums): # USE THIS\n \"\"\"\n :type nums: List[int]\n :rtype: int\n \"\"\"\n lo, hi = 0, len(nums) - 1\n while lo < hi:\n mid = lo + (hi - lo) // 2\n if mid % 2 == 1:\n mid -= 1\n\n if nums[mid] == nums[mid + 1]:\n lo = mid + 2\n else:\n hi = mid\n return nums[lo]\n\n # 对所有索引进行二分搜索 O(logn),需要判断的情况较多\n def singleNonDuplicate2(self, nums):\n lo, hi = 0, len(nums) - 1\n while lo < hi:\n mid = lo + (hi - lo) // 2\n halves = hi - mid # mid后有几个数\n if nums[mid + 1] == nums[mid]:\n if halves % 2 == 0:\n lo = mid + 2\n else:\n hi = mid - 1\n elif nums[mid - 1] == nums[mid]:\n if halves % 2 == 0:\n hi = mid - 2\n else:\n lo = mid + 1\n else:\n return nums[mid]\n return nums[lo]\n\n\n # 对偶数索引进行线性搜索 Time O(n)\n def singleNonDuplicate3(self, nums):\n for i in range(0, len(nums) - 2, 2):\n if nums[i] != nums[i + 1]:\n return nums[i]\n return nums[-1]\n\nprint(Solution().singleNonDuplicate([1,1,2,3,3,4,4,8,8])) # 2","sub_path":"Python/single-element-in-a-sorted-array.py","file_name":"single-element-in-a-sorted-array.py","file_ext":"py","file_size_in_byte":2388,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"388587201","text":"# Import libraries and change working directory.\nimport os\nos.chdir('/home/pascal/MasterThesis/DownloadImages/code/')\n\n# Area parameters used in this script.\n\n# For Chicago:\n# area_name_tag = 'chicago'\n# highway_dict_tag = 'highwayTags2'\n# lng_start = 41.94388\n# lat_start = -87.81855\n# lng_dif = 0.0025\n# lat_dif = 0.0040\n# tiles_lng = 50\n# tiles_wdt = 50\n\n# For Zurich:\narea_name_tag = 'zurich'\nhighway_dict_tag = 'highwayTags1'\nlng_start = 47.422512\nlat_start = 8.504446\nlng_dif = 0.0025\nlat_dif = 0.0040\ntiles_lng = 15\ntiles_wdt = 15\n\n\n# List that stores output.\nout_list = []\n\n# Specify header of output list.\nfirst_entry = 'name,upperLeftLng,upperLeftLat,lowerRightLng,lowerRightLat,highwayDictEntry'\nout_list.append(first_entry)\n\n# Specify entries of output list.\nfor i in range(0, tiles_lng):\n for j in range(0, tiles_wdt):\n area_nr = i * tiles_wdt + j + 1\n new_entry = \\\n area_name_tag + str(area_nr) + \\\n ',' + str(lng_start - i * lng_dif) + \\\n ',' + str(lat_start + j * lat_dif) + \\\n ',' + str(lng_start - (i + 1) * lng_dif) + \\\n ',' + str(lat_start + (j + 1) * lat_dif) + \\\n ',' + highway_dict_tag\n out_list.append(new_entry)\n\n# Store list as txt file.\nout_file = open('coordinates_' + area_name_tag + '.txt','w')\nfor item in out_list:\n out_file.write(\"%s\\n\" % item)\nout_file.close()","sub_path":"DownloadImages_corners/chicago/code/calcTileCoordsForArea.py","file_name":"calcTileCoordsForArea.py","file_ext":"py","file_size_in_byte":1386,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"4098685","text":"from django.conf.urls import patterns, url\nfrom rest_framework.urlpatterns import format_suffix_patterns\nfrom accounts import views\n\nURL_PART = 'api/users'\n\nurlpatterns = format_suffix_patterns(patterns('accounts.views',\n url(r'^'+URL_PART+'$', views.UserList.as_view(), name='myuser-list'),\n url(r'^'+URL_PART+'/(?P[0-9]+)$', views.UserDetail.as_view(), name='myuser-detail'),\n url(r'^'+URL_PART+'/(?P\\d+)/tickets$', views.TicketsByUserList.as_view(), name='myuser-tickets'),\n url(r'^'+URL_PART+'/login$', views.UserLogin.as_view(), name='myuser-login'),\n url(r'^'+URL_PART+'/isAuthenticated$', views.UserIsAuthenticated.as_view(), name='myuser-isAuthenticated'),\n url(r'^'+URL_PART+'/logout$', views.UserLogout.as_view(), name='myuser-logout'),\n))","sub_path":"ezdict_backend/accounts/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":780,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"2963687","text":"from django.shortcuts import render, get_object_or_404\nfrom .models import *\nfrom cart.forms import AddProductForm\nfrom .forms import *\nfrom django.http import HttpResponseRedirect\nfrom django.urls import reverse\nfrom django.shortcuts import redirect\n\ndef category(request, category_slug=None): # 카테고리 페이지\n current_category = None\n categories = Category.objects.all()\n products = Product.objects.filter(available_display=True)\n results = Product.objects.filter(available_display=True)\n\n if category_slug:\n current_category = get_object_or_404(Category, slug=category_slug)\n if current_category.is_child_node():\n results = products.filter(categories=current_category)\n else:\n results = products.filter(categories=current_category)\n for slug in current_category.get_descendants(include_self=True):\n results = results | products.filter(categories=slug)\n\n return render(request,'shop/list.html', {'current_category': current_category, 'categories': categories, 'products': results})\n\n\n\ndef product_detail(request, id, product_slug=None): # 제품 상세 뷰\n categories = Category.objects.all()\n product = get_object_or_404(Product, id=id, slug=product_slug)\n add_to_cart = AddProductForm(initial={'quantity':1})\n relative_products = Product.objects.filter(company=product.company).exclude(slug=product_slug)\n \n #comment 부분\n #comments = Comment.objects.all()\n if request.method == \"POST\":\n comment_form = CommentForm(request.POST)\n comment_form.instance.author_id = request.user.id\n comment_form.instance.product_slug = product_slug\n if comment_form.is_valid():\n comment = comment_form.save()\n # models.py에서 document의 related_name을 comments로 해놓았다.\n\n comment_form = CommentForm()\n comments = Comment.objects.filter(product=product)\n print(comments)\n return render(request, 'shop/detail.html', {'product':product, 'add_to_cart':add_to_cart, 'relative_products': relative_products,\n 'comments':comments, 'comment_form':comment_form})\n\n\ndef comment(request, id, product_slug=None):\n product = get_object_or_404(Product, id=id, slug=product_slug)\n comments = Comment.objects.filter(product=product)\n if request.method == \"POST\":\n form = CommentForm(request.POST)\n if form.is_valid():\n comment = form.save(commit=False)\n comment.product = Product.objects.get(id=id)\n comment.save()\n return redirect('shop:comment')\n else:\n form = CommentForm()\n\n return render(request, 'shop/comment.html', {'form': form, 'comments':comments})\n\ndef home(request) :\n categories = Category.objects.all()\n current_category = None\n banners = Banner.objects.all()\n return render(request, 'shop/home.html', {'categories' : categories, 'current_category' : current_category, 'banners' : banners})\n \ndef search(request):\n print(\"here\")\n #products = Product.objects.filter(available_display=True)\n categories = Category.objects.all()\n products = Product.objects.all()\n search_term = ''\n if 'search' in request.GET:\n search_term = request.GET['search']\n products = products.filter(name__contains=search_term)\n return render(request, 'shop/search.html',\n {'products': products, 'search_term' : search_term, 'categories' : categories})\n","sub_path":"shop/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":3494,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"392641226","text":"import string\nimport sys\n\nwords = {}\nstrip = string.whitespace+string.punctuation+string.digits+\"\\\"'\"\nfor line in open(\"ex.txt\"):\n for word in line.lower().split():\n word = word.strip(strip)\n if len(word) > 2:\n words[word]= words.get(word,0)+1\nfor word in sorted(words):\n print(\"'{0}' occurs {1} times\".format(word,words[word]))\n","sub_path":"uniquewords1.py","file_name":"uniquewords1.py","file_ext":"py","file_size_in_byte":360,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"419750868","text":"import a0\nimport time\n\na0.InitGlobalTopicManager('''{\n \"container\": \"localizer\"\n}''')\n\np = a0.Publisher('location')\nfor i in range(10):\n payload = 'here (ts={})'.format(i)\n print('publishing:', payload)\n p.pub(payload)\n time.sleep(1)\n\nprint('Done!')\n","sub_path":"examples/pub.py","file_name":"pub.py","file_ext":"py","file_size_in_byte":265,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"16189682","text":"from pysketcher import *\nimport numpy as np\n\n# Define the grid lines (uniform spacing)\nx = np.linspace(0, 3, 4)\ny = np.linspace(0, 1, 3)\n\n# Define drawing area (add some space to each side of the grid)\nxmin = x[0] - 0.5\nxmax = x[-1] + 0.5\nymin = y[0] - 0.5\nymax = y[-1] + 0.5\n\ndrawing_tool.set_coordinate_system(xmin, xmax, ymin, ymax, axis=False)\ndrawing_tool.set_linecolor('black')\ndrawing_tool.set_linewidth(1)\n\n# Store each line in lists for the two directions\nx_lines = [Line((x[i], y[0]), (x[i], y[-1])) for i in range(len(x))]\ny_lines = [Line((x[0], y[i]), (x[-1], y[i])) for i in range(len(y))]\n# Transform lists to dicts since pysketcher works with dicts\nx_lines = {'x%d' % i: x_line for i, x_line in enumerate(x_lines)}\ny_lines = {'y%d' % i: y_line for i, y_line in enumerate(y_lines)}\n\n# Collect all lines in one grid object\ngrid = Composition(dict(x_lines=Composition(x_lines),\n y_lines=Composition(y_lines)))\n\n# Make text objects for the coordinates of the grid points\ncoordinates = {}\nd = point(0.02,0.02) # text displacement, 45 degrees up-right\nfor x_ in x:\n for y_ in y:\n coordinates['%s%s' % (x_, y_)] = Text(\n '(%g,%g)' % (x_, y_),\n point(x_, y_) + d,\n alignment='left',\n fontsize=8)\ncoordinates = Composition(dict(coordinates=Composition(coordinates)))\n\n# Draw grid with coordinates\ngrid.draw()\ncoordinates.draw()\ndrawing_tool.display()\ndrawing_tool.savefig('tmp1')\n\n# Make new drawing with indices instead of coordinates\ndrawing_tool.erase()\nindices = {}\nfor i, x_ in enumerate(x):\n for j, y_ in enumerate(y):\n indices['%s%s' % (i, j)] = Text(\n '(%g,%g)' % (i, j),\n point(x_, y_) + d,\n alignment='left',\n fontsize=8)\nindices = Composition(dict(indices=Composition(indices)))\n\ngrid.draw()\nindices.draw()\ndrawing_tool.display()\ndrawing_tool.savefig('tmp2')\n\nraw_input()\n","sub_path":"slides/plot/html/fig-plot/grid2D.py","file_name":"grid2D.py","file_ext":"py","file_size_in_byte":1921,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"304540495","text":"from pyramid.response import Response\nfrom pyramid.view import view_config\nfrom sentiment import mainTest\nfrom PredictStock import predict\nfrom scrapper import scrape\nfrom news_scrapper import scrape as news_scrape\ntrend = {}\n\n@view_config(route_name='predictstock', renderer = 'json')\ndef predictstock(request):\n ticker = request.matchdict['ticker']\n std = request.matchdict['start_date']\n end = request.matchdict['end_date']\n std_p = request.matchdict['start_date_predict']\n end_p = request.matchdict['end_date_predict']\n \n delta = 0\n trend['ticker'] = ticker\n trend['start_date'] = std\n trend['end_date'] = end\n clf = predict.getTrainedClassifier(ticker, std, end)\n delta = predict.getTrendPredictions(clf, ticker, std_p, end_p)\n trend['delta'] = str(delta)\t\n return trend\n\n@view_config(route_name='getsentiment', renderer = 'json')\ndef getsentiment(request):\n source = request.matchdict['source']\n sentiment, news_list = mainTest.getNewsSentiment(source)\n trend['sentiment'] = sentiment\n trend['newslist'] = news_list\n return trend\n\n@view_config(route_name='livedata', renderer = 'json')\ndef livedata(request):\n\tticker = request.matchdict['ticker']\n\tdata = scrape.givelivedata(ticker)\n\treturn data\n\n@view_config(route_name='getstocknews', renderer = 'json')\ndef getstocknews(request):\n\tticker = request.matchdict['ticker']\n\tresults = news_scrape.SentimentNews(ticker)\n\treturn results\t\n\n@view_config(route_name='getstocksentiment', renderer = 'json')\ndef getstocksentiment(request):\t\n\tticker = request.matchdict['ticker']\n\treturn trend\n","sub_path":"Stock-Market-Analysis-REST-API/ProjectApi/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1595,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"468665114","text":"from pages.drivers import Drivers\nfrom pages.online_banking_page import OnlineBankingPage\nfrom pages.base_tree import Root\nfrom pages.accounts_acttivity_table import Listings\n\nbrowser = Drivers('--start-maximized').chrome()\nobp = OnlineBankingPage(driver=browser)\n\n# test setup\nuser = 'Bruce Lee'\npw = 'password'\ncheck_mark = '✔'\namount = '150.0'\nclearence_date = '2017-01-04'\n\n# test start\n# login\nobp.go()\nobp.username.input_text(user)\nobp.password.input_text(pw)\nobp.login.click()\n\n# reset acc\nobp.reset_open.click()\nobp.reset.click()\n\n# pay a bill\nobp.pay_bill.click()\nobp.recipient.click()\nobp.recipient.arrow_down()\nobp.recipient.enter()\n\n# get recipient name for later validation\nrec_name = obp.recipient.text()\n\nobp.bill_date.click()\nobp.bill_day_third.click()\nobp.bill_amount.input_text(amount)\nobp.bill_memo.input_text('Thank you for everything')\nobp.bill_submit.click()\nalr = obp.alert.text()\nprint(alr)\nprint()\n\n# forward date by 4 days\nobp.day_date.click()\n\n# arrow down three times\ncount = 0\nwhile count < 3:\n obp.day_date.arrow_down()\n count += 1\nelse:\n obp.day_date.enter()\nobp.date_go.click()\n\n# switch to account activity\nobp.home.click()\nobp.acc_activity.click()\n\n# lxml setup\nhtml = browser.page_source\nlisting = Root(html, locator='.//tbody/tr')\nchecking_firs_row = listing.get_listings(Listings)[0]\nchecking_second_row = listing.get_listings(Listings)[1]\n\n# remove the currency symbols\nchecking_amount_sent = float(checking_firs_row.amount[2:])\nchecking_balance_after = float(checking_firs_row.balance[1:])\nchecking_balance_before = float(checking_second_row.balance[1:])\nname = checking_firs_row.name\n\n# assert correct recipient name\nassert name.lower() == rec_name.lower()\nprint(f'expected recipient name: {rec_name}')\nprint(f'recipient name: {name}', check_mark)\n\n# assert the correct amount left the checking acc\nassert float(amount) == checking_amount_sent\n\nprint(f'checking balance before bill: {checking_balance_before}')\nprint(f'transfer amount sent: {checking_amount_sent}')\nprint(f'checking balance after bill: {checking_balance_after}', check_mark)\ndifference_checking = round(checking_balance_before - checking_balance_after)\nassert difference_checking == checking_amount_sent\nprint('checking account ok')\nprint()\n\n# forward date by one month\nobp.month_date.click()\nobp.month_date.arrow_down()\nobp.month_date.enter()\nobp.date_go.click()\n\n# lxml setup get new listings\nhtml = browser.page_source\nlisting = Root(html, locator='.//tbody/tr')\nchecking_firs_row = listing.get_listings(Listings)[0]\n\nchecking_amount_sent = float(checking_firs_row.amount[2:])\n\n# assert correct recipient name\nprint('one month later')\nprint()\nname = checking_firs_row.name\nassert name.lower() == rec_name.lower()\nprint(f'expected recipient name: {rec_name}')\nprint(f'recipient name: {name}', check_mark)\n\n# assert the correct amount left the checking acc\nassert float(amount) == checking_amount_sent\nprint(f'transfer amount sent: {checking_amount_sent}')\nprint()\nbrowser.quit()\nprint('Test passed.')\n","sub_path":"test_pay_bill.py","file_name":"test_pay_bill.py","file_ext":"py","file_size_in_byte":3108,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"276912702","text":"import requests\nfrom bs4 import BeautifulSoup\nimport os\nfrom time import strftime\n\nprint(\"Scraping Data From OverWatchTracker\")\noverwatchstats = \"https://overwatchtracker.com/profile/pc/global/ClasherMan-1365?update=1\"\n\nsession = requests.Session()\n\nsession.headers[\n \"User-Agent\"] = \"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/64.0.3282.167 Safari/537.36\"\n\npage = session.get(overwatchstats)\ncontents = page.content\n\nsoup = BeautifulSoup(contents, 'lxml')\n\nvalues = []\n\nfor kd in soup.find_all('div', class_='value'):\n values.append(kd.string)\nnewlist = []\nfor value in values:\n for v in value:\n if v == '\\n':\n newval = value.replace(v, \"\")\n newlist.append(newval)\n# newlist[0] is level\n# newlist[1] is kd ratio\n# newlist[4] is eliminationsperminute\nlevelonline = newlist[0]\ndaytime = strftime(\"%m/%d/%Y\")\nstatsfile = open('OverwatchStats.txt', 'r')\nstats = statsfile.read()\nstats = stats.splitlines()\nstats.pop(0)\nrecentday = stats[-1]\nday, level, kdratio, epm, time = stats[-1].split(\" \")\ntime = time.replace(\"Date: \", \"\")\nday = day.replace(\"Day :\", \"\")\nif daytime == time:\n stats = open(\"OverwatchStats.txt\", 'r')\n allines = stats.readlines()\n del allines[-1]\n newstat = 'Day :' + day + \" \" + \"Level: \" + levelonline + \" \" + \"K/D Ratio : \" + newlist[\n 1] + \" \" + \"Eliminations Per Minute : \" + newlist[4] + \" \" + \"Date: \" + daytime\n allines.append(newstat)\n stats.close()\n stats = open(\"OverwatchStats.txt\", 'w')\n stats.writelines(allines)\n stats.close()\nif daytime != time:\n stats = open(\"OverwatchStats.txt\", 'a')\n newday = int(day) + 1\n newstat = 'Day :' + str(newday) + \" \" + \"Level: \" + levelonline + \" \" + \"K/D Ratio : \" + newlist[\n 1] + \" \" + \"Eliminations Per Minute : \" + newlist[4] + \" \" + \"Date: \" + daytime\n stats.write(\"\\n\" + newstat)\n stats.close()\n\nprint(\"If you see this then I have finished scraping the OverWatch data. You may now press enter to exit\")\ninput(\"Press enter to exit\")\n","sub_path":"WebScraping/OverwatchWebScrape.py","file_name":"OverwatchWebScrape.py","file_ext":"py","file_size_in_byte":2071,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"442672341","text":"import os\nimport json\nimport django\n\nos.environ.setdefault(\"DJANGO_SETTINGS_MODULE\", \"euro16.settings\")\n\nif django.VERSION >= (1, 7):\n django.setup()\n\n\ndef main():\n from euro16 import models\n f = file('stage.json')\n json_f = json.load(f)\n for part in json_f:\n if not models.Stage.objects.filter(number=part['number']).exists():\n s = models.Stage(number=part['number'], desc=part['desc'], start=part['start'], end=part['end'])\n s.save()\n else:\n models.Stage.objects.filter(number=part['number']).delete()\n s = models.Stage(number=part['number'], desc=part['desc'], start=part['start'], end=part['end'])\n s.save()\n f.close()\n\n\nif __name__ == \"__main__\":\n main()\n print('Done!')\n","sub_path":"euro16-server/euro16/input.py","file_name":"input.py","file_ext":"py","file_size_in_byte":769,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"599876830","text":"import urllib.request\nimport time\nimport json\nimport datetime\n\ng_Radiator = False\ng_Gas_Valve = False\ng_Balcony_Windows = False\ng_Door = False\ng_AI_Mode = False\n\naccess_key = \"\"\n\ndef get_request_url(url):\n req = urllib.request.Request(url)\n try:\n response = urllib.request.urlopen(req)\n if response.getcode() == 200:\n print(\"[%s] URL request Success\" %datetime.datetime.now())\n return response.read().decode('UTF-8')\n except Exception as e:\n print(e)\n print(\"[%s] Error for URL:%s\"%(datetime.datetime.now(),url))\n return None\n\ndef getForecastTimeDataResponse(base_date,base_time,nx,ny):\n end_point = \"http://newsky2.kma.go.kr/service/SecndSrtpdFrcstInfoService2/ForecastTimeData\"\n parameters = \"?base_date=\" + base_date\n parameters += \"&base_time=\" + base_time\n parameters += \"&nx=\" + nx\n parameters += \"&ny=\" + ny\n parameters += \"&_type=json&serviceKey=\" + access_key\n\n url = end_point+parameters\n\n retData = get_request_url(url)\n\n if(retData == None):\n return None\n else:\n return json.loads(retData)\n\ndef main():\n jsonResult = []\n base_date = time.strftime(\"%Y%m%d\", time.localtime(time.time()))\n base_time = time.strftime(\"%H%M\", time.localtime(time.time()))\n nx = \"89\"\n ny = \"91\"\n jsonData = getForecastTimeDataResponse(base_date,base_time,nx,ny)\n\n# HK Comment] JSON 데이터 분석하는 코드를 작성할 것\n\n print(\"%s_%s_Weather.json\" %(base_date,base_time))\n\n with open(\"%s_%s_Weather.json\" %(base_date,base_time),'w',encoding='utf8')as outfile:\n retJson = json.dumps(jsonResult, indent=4, sort_keys=True,ensure_ascii=False)\n outfile.write(retJson)\n\ndef print_main_menu():\n print(\"\\n1. 장비상태 확인\")\n print(\"2. 장비제어\")\n print(\"3. 스마트모드\")\n print(\"4. 시뮬레이션 모드\")\n print(\"5. 프로그램 종료\")\n\ndef print_device_status(device_name,devcie_status):\n print(\"%s 상태: \"%device_name, end=\"\")\n if devcie_status == True : print(\"작동\") # HK Comment] 장비에 맞는 상태 메세지를 출력할 것\n # 힌트] 메세지와 관련된 파라메터 추가\n else: print(\"정지\")\n\ndef check_device_status():\n print_device_status('\\n난방기',g_Radiator)\n print_device_status('가스밸브', g_Gas_Valve)\n print_device_status('발코니(베란다) 창문', g_Balcony_Windows)\n print_device_status('출입문 상태', g_Door)\n\ndef print_device_menu():\n print(\"\\n상태 변경할 기기를 선택하세요.\")\n print(\"1. 난방기\")\n print(\"2. 가스밸브\")\n print(\"3. 발코니(베란다)창\")\n print(\"4. 출입문\")\n\ndef control_device():\n global g_Radiator, g_Gas_Valve, g_Balcony_Windows, g_Door\n check_device_status()\n print_device_menu()\n menu_num = int(input(\"번호를 입력하세요: \"))\n if menu_num == 1: g_Radiator = not g_Radiator\n if menu_num == 2: g_Gas_Valve = not g_Gas_Valve\n if menu_num == 3: g_Balcony_Windows = not g_Balcony_Windows\n if menu_num == 4: g_Door = not g_Door\n check_device_status()\n\ndef smart_mode():\n global g_AI_Mode\n print(\"1. 인공지능 모드 조회\")\n print(\"2. 인공지능 모드 상태 변경\")\n print(\"3. 실시간 기상정보 Update 신청하기\")\n print(\"4. 실시간 기상정보 Update 불러오기\")\n menu_num = int(input(\"메뉴를 선택하세요: \"))\n if menu_num == 1:\n print(\"현재 인공지능 모드: \", end='')\n if g_AI_Mode == True: print(\"작동\")\n else: print(\"정지\")\n\n elif menu_num == 2:\n g_AI_Mode = not g_AI_Mode\n print(\"현재 인공지능 모드: \", end='')\n if g_AI_Mode == True: print(\"작동\")\n else: print(\"정지\")\n\n elif menu_num == 3:\n print(\"실시간 기상정보 Update 신청하기\")\n get_realtime_weather_info()\n\n else:\n print(\"\\n실시간 기상정보 Update 불러오기\")\n print(\"=\" *50)\n get_weather_info()\n\ndef get_realtime_weather_info():\n if __name__ == '__main__':\n main()\n\ndef get_weather_info():\n base_date = time.strftime(\"%Y%m%d\", time.localtime(time.time()))\n base_time = time.strftime(\"%H%M\", time.localtime(time.time()))\n with open(\"%s_%s_Weather.json\" %(base_date,base_time), encoding='UTF8') as json_file:\n json_object = json.load(json_file)\n json_string = json.dumps(json_object)\n retJson = json.loads(json_string)\n for retJson in retJson:\n print(\"['baseDate'] = \" + str(retJson['baseDate']))\n print(\"['category'] = \" + str(retJson['category']))\n print(\"['fcstDate'] = \" + str(retJson['fcstDate']))\n print(\"['fcstTime'] = \" + str(retJson['fcstTime']))\n print(\"['fcstValue'] = \" + str(retJson['fcstValue']))\n print(\"['nx'] = \" + str(retJson['nx']))\n print(\"['ny'] = \" + str(retJson['ny']))\n print(\"=\" *50)\n\ndef simulation_mode():\n print(\"\\n1. Rain Day Simulation (발코니창 제어)\")\n print(\"2. Damp Day Simulation (제습기 제어)\")\n print(\"3. Dry Day Simulation (가습기 제어)\")\n print(\"4. Sunny Day Simulation (제습기/가습기 제어)\")\n menu_num = int(input(\"메뉴를 선택하세요: \"))\n if menu_num == 1:\n precipitation_forecast_simulation()\n\n elif menu_num == 2:\n dry_forecast_simulation()\n\n else:\n return None\n\ndef precipitation_forecast_simulation():\n global g_Balcony_Windows # 가상의 json 파일을 만들것\n# ex)[{\"baseDate\":현재날짜,\"baseTime\":\"현재시간\",\"category\":\"RN1\",\"fcstDate\":현재날짜\"fcstTime\":1000,\"fcstValue\":10,\"nx\":89,\"ny\":91}]\n# 장비 상태를 바꾼다.\n# 시뮬레이션 용 Json 파일을 저장한다.\n with open(\"Weather_info.json\", encoding='UTF8') as json_file:\n json_object = json.load(json_file)\n json_string = json.dumps(json_object)\n retJson = json.loads(json_string)\n for retJson in retJson:\n if (retJson['category']) == 'RN1':\n print(\"RN1//fcstValue = \"+str(retJson['fcstValue']))\n if retJson['fcstValue'] > 0:\n if g_Balcony_Windows == True:\n print(\"\\n***비가 올 예정이오니 열려 있는 창문을 닫습니다.***\")\n g_Balcony_Windows = not g_Balcony_Windows\n check_device_status()\n\ndef Damp_forecast_simulation():\n global g_Balcony_Windows\n with open(\"Weather_info.json\", encoding='UTF8') as json_file:\n json_object = json.load(json_file)\n json_string = json.dumps(json_object)\n retJson = json.loads(json_string)\n for retJson in retJson:\n if (retJson['category']) == 'REH':\n print(\"REH//fcstValue = \"+str(retJson['fcstValue']))\n if retJson['fcstValue'] < 0:\n if g_Balcony_Windows == True:\n print(\"\\n***습도가 높으므로 제습기를 가동합니다.***\")\n g_Balcony_Windows = not g_Balcony_Windows\n check_device_status()\n\ndef dry_forecast_simulation():\n global g_Balcony_Windows\n with open(\"Weather_info.json\", encoding='UTF8') as json_file:\n json_object = json.load(json_file)\n json_string = json.dumps(json_object)\n retJson = json.loads(json_string)\n for retJson in retJson:\n if (retJson['category']) == 'REH':\n print(\"REH//fcstValue = \"+str(retJson['fcstValue']))\n if retJson['fcstValue'] < 0:\n if g_Balcony_Windows == True:\n print(\"\\n***습도가 높으므로 제습기를 가동합니다.***\")\n g_Balcony_Windows = not g_Balcony_Windows\n check_device_status()\n\nwhile True:\n print_main_menu()\n menu_num = int(input(\"메뉴를 선택하세요: \"))\n\n if menu_num == 1:\n# check_device_status() HK Comment] 장비 상태 출력하는 함수를 작성할 것\n pass\n\n elif menu_num == 2:\n control_device()\n\n elif menu_num == 3:\n smart_mode()\n\n elif menu_num == 4:\n simulation_mode()\n\n else: break","sub_path":"03. Data_Science/1. Collection/OpenAPI/학생/김건홍/Ver1.py","file_name":"Ver1.py","file_ext":"py","file_size_in_byte":8239,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"456620458","text":"\n\nfrom xai.brain.wordbase.adjectives._resident import _RESIDENT\n\n#calss header\nclass _RESIDENTS(_RESIDENT, ):\n\tdef __init__(self,): \n\t\t_RESIDENT.__init__(self)\n\t\tself.name = \"RESIDENTS\"\n\t\tself.specie = 'adjectives'\n\t\tself.basic = \"resident\"\n\t\tself.jsondata = {}\n","sub_path":"xai/brain/wordbase/adjectives/_residents.py","file_name":"_residents.py","file_ext":"py","file_size_in_byte":262,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"542070107","text":"from step_sizes import make_safe, step_sizes\nimport pdb\n\ndef step(vector, direction, step_size):\n return [\n vector_component + direction_i * step_size\n for vector_component, direction_i\n in zip(vector, direction)\n ]\n\ndef minimize_batch(target_fn, gradient_fn, theta_0, tolerance=0.000001):\n # use gradient descent to find theta that minimizes target function\n theta = theta_0\n safe_target_fn = make_safe(target_fn)\n value = safe_target_fn(theta)\n\n while True:\n # pdb.set_trace()\n gradient = gradient_fn(theta)\n\n next_thetas = [\n step(theta, gradient, -step_size) for step_size in step_sizes\n ]\n\n next_theta = min(next_thetas, key=safe_target_fn)\n if safe_target_fn(next_theta) == float('inf'):\n pdb.set_trace()\n next_value = safe_target_fn(next_theta)\n print('next tolerance: %s' % (abs(value - next_value)))\n if abs(value - next_value) < tolerance:\n return theta\n else:\n theta, value = next_theta, next_value\n","sub_path":"09-exercises/tie_together.py","file_name":"tie_together.py","file_ext":"py","file_size_in_byte":1066,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"58877723","text":"from scrapy.http import Request\nfrom bs4 import BeautifulSoup\nimport scrapy\nimport requests\n# from ..items import EbayFranceItem\nimport csv\n\n\nclass GetProduct(scrapy.Spider):\n name = 'getProduct'\n # allowed_domains = ['www.ebay.fr']\n start_url = \"http://www.ebay.fr/sch/allcategories/all-categories\"\n writer = csv.writer(open('E:\\\\Documents\\\\ebay\\\\ebay1.csv', 'a', encoding='utf-8'))\n class_name = ''\n\n def start_requests(self):\n # content = requests.get(self.start_url)\n # self.parse_catagory(content)\n yield Request(url=self.start_url,callback=self.parse_catagory)\n\n def parse_catagory(self,content):\n soup = BeautifulSoup(content.text,\"lxml\")\n all_a = soup.find_all('a',class_=\"ch\")\n print(\"一共有 \"+str(len(all_a))+\" 个分类\")\n # num = 0\n for a in all_a:\n class_name = a.get_text()\n href = a['href']\n for i in range(1,51):\n url = href+\"?_pgn=\"+str(i)\n # num = num + 1\n # print(className+\" \"+url)\n yield Request(url=url,callback=self.parsePage,meta={'class_name':class_name,'href':href})\n\n def parsePage(self,response):\n soup = BeautifulSoup(response.text, \"lxml\")\n all_li1 = soup.find_all('li', class_=\"sresult lvresult clearfix li shic\")\n all_li2 = soup.find_all('li', class_=\"sresult lvresult clearfix li\")\n all_li = all_li1 + all_li2\n num = len(all_li)\n # print(\"该页有 \" + str(len(all_li)) + \" 个商品\")\n # lists = []\n for li in all_li:\n title = li.find('a', class_=\"vip\").get_text().strip()\n url = li.find('a', class_=\"vip\")['href']\n price = li.find('span', class_=\"bold\").get_text().strip()\n list = []\n list.append(response.meta['class_name'])\n list.append(response.meta['href'])\n list.append(self.class_name)\n list.append(title)\n list.append(url)\n list.append(price)\n self.writer.writerow(list)\n # lists.append(list)\n # self.send_item(lists=lists)\n\n def send_item(self,list):\n # item = EbayFranceItem()\n # item['lists'] = lists\n # return item\n file_path = \"E:\\\\Documents\\\\ebay\\\\ebay1.csv\"\n file = open(file_path,'a',encoding='utf-8')\n file.write(list)\n\n\n\n\n\n","sub_path":"ebay/ebay_france/spiders/getProduct.py","file_name":"getProduct.py","file_ext":"py","file_size_in_byte":2368,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"307649521","text":"#!/usr/bin/env python\n\nimport options as opt,os,sys\n\n### MANUAL ###\n# import os,sys\n#tag = sys.argv[1] #nocorr,wN,wNR,wR\n# os.system(\"./findmf_mocks.py {0}\".format(tag))\n# os.system(\"./findpk_mocks.py {0}\".format(tag))\n\n### AUTOMATIC ###\nprint(\"\\nRunning `quick_mock.py`\\n\")\nprint(\"Correction tags:\\n[nocorr, wN, wNR,wR,wN_SN10,nocorr_n5000,wNR_n5000]\")\nprint(\"dz: {0}\".format(opt.dz))\n\nwhich_catalog = int(sys.argv[1])\nif which_catalog == 0:\n tag_list = [\"nocorr\",\"wN\",\"wN_SN10\"]\nif which_catalog == 1:\n tag_list = [\"wNR_n5000\",\"nocorr_n5000\"]\nprint(tag_list)\n\nfor tag in tag_list:#[\"nocorr\", \"wN\",\"wNR\", \"wR\",\"wN_SN10\"]:\n print(\"\\n\\n{0}\\n\\n\".format(tag))\n os.system(\"./findmf_mocks.py {0} {1}\".format(tag,which_catalog))\n os.system(\"./findpk_mocks.py {0} {1}\".format(tag,which_catalog))\n #os.system(\"./bootstrap.py {0} 1 1000\".format(tag))\nprint(\"\\nScript complete! :)\\n\")\n","sub_path":"summer2018/pipeline/mocks/quick_mock.py","file_name":"quick_mock.py","file_ext":"py","file_size_in_byte":893,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"213014791","text":"import os\nimport json\nimport glob\n\nfrom avalon import io\nfrom avalon.pipeline import AVALON_CONTAINER_ID\n\nKEEP_VERSION = 5\nPREMIERE_JSON_DIR = r'{}\\{}\\.premiere_avalon'\n\nITEM_DATA = {}\n\n\ndef get_node_data():\n import pymiere\n\n project = pymiere.objects.app.project\n root_item = project.rootItem\n\n _get_children(root_item)\n\n return ITEM_DATA\n\n\ndef _get_children(item):\n for child in item.children:\n\n ITEM_DATA[child.nodeId] = {'item': child}\n\n if child.children:\n _get_children(child)\n\n\ndef ls_containers(file_id):\n \"\"\"\n Return containers from json file.\n The json file is created from loader plugin.\n\n :param file_id: (str) premiere file id\n :return: (dict)\n \"\"\"\n # Get project data\n proj_data = io.find_one(\n {\"type\": \"project\"}, projection={\"data.root\": True, \"name\": True})\n\n _json_dir = os.path.join(\n PREMIERE_JSON_DIR.format(proj_data['data']['root'], proj_data['name']),\n file_id\n )\n json_file = max(glob.iglob('{}/*'.format(_json_dir)), key=os.path.getctime)\n\n with open(json_file, \"r\") as file:\n json_data = json.load(file)\n\n return json_data\n\n\ndef read(container_data, item):\n container_data.update({\n 'item': item,\n 'objectName': item.treePath,\n 'namespace': item.name,\n 'id': AVALON_CONTAINER_ID,\n 'schema': 'avalon-core:container-2.0'\n # 'customIconPath': u'$REVERIES_PATH\\\\res\\\\icons\\\\container-01.png',\n })\n\n return container_data\n","sub_path":"avalon/premiere/lib.py","file_name":"lib.py","file_ext":"py","file_size_in_byte":1506,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"201286766","text":"class ListNode(object):\n def __init__(self, x, node=None):\n self.val = x\n self.next = node\n\n\nclass Solution(object):\n def removeElements(self, head, val):\n \"\"\"\n\n :type head: ListNode\n :type val: int\n :rtype: ListNode\n \"\"\"\n curr = head\n prev = head\n\n while curr is not None:\n if head.val == val:\n head = head.next\n elif curr.val == val:\n prev.next = curr.next\n else:\n prev = curr\n curr = curr.next\n\n return head\n\n\ndef main():\n solution = Solution()\n ret = solution.removeElements([1, 2, 3, 1])\n print(ret)\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"simple/203_remove_elements.py","file_name":"203_remove_elements.py","file_ext":"py","file_size_in_byte":726,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"562487183","text":"# This is an extension of 4_consume_json.py and will therefore not run on its own\n\nfrom datetime import datetime\n\nwhile True:\n messages = consumer.consume(num_messages=5, timeout=10)\n print(f\"Total number of messages: {len(messages)}\")\n # Only write a file if there's data to write\n if len(messages) > 0:\n # Use \"a\" here for append, if we use \"w\" then we risk overriding a file\n with open(f\"batch_{datetime.utcnow().isoformat()}.json\", \"a\") as file:\n for message in messages:\n json.dump(json.loads(message.value()), file)\n # Remember to end with a newline!\n file.write(\"\\n\")\n","sub_path":"Part4/6_save_json.py","file_name":"6_save_json.py","file_ext":"py","file_size_in_byte":657,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"652626547","text":"from celery import shared_task\n\nfrom games.models import (\n SwitchGame,\n SwitchGameUS,\n SwitchGameEU,\n SwitchGamePrice,\n SwitchGameSale,\n)\n\nfrom games.tasks.update_utils import treated_request\n\nfrom games.serializers import (\n SwitchGamePriceSerializer,\n SwitchGameSaleSerializer,\n)\n\n\n@shared_task()\ndef update_switch_price():\n print('Updating Switch games\\' prices...')\n\n # Prices in the America region\n for country in ['US', 'CA', 'MX']: # , 'AR', 'BR', 'CL']:\n update_country(country, SwitchGameUS)\n\n # Prices in the Europe region\n for country in ['GB', 'DE', 'FR', 'ZA', 'RU']:\n update_country(country, SwitchGameEU)\n\n print('Finished updating Switch games\\' prices.')\n\n\ndef update_country(country, model):\n url = 'https://api.ec.nintendo.com/v1/price'\n count = model.objects.count()\n\n found_price = 0\n found_sales = 0\n\n for offset in range(0, count, 50):\n print('Updating {}\\'s price offset {}'.format(country, offset))\n\n games = model.objects.all()[offset:offset+50].values('nsuid')\n games = list(map(lambda game: game['nsuid'], games))\n games = ','.join([nsuid for nsuid in games if nsuid != None])\n\n params = {'lang': 'en', 'country': country, 'ids': games}\n req = treated_request(url, params, 'US Switch price')\n\n data = req.json()['prices']\n\n for price_info in data:\n if model.objects.filter(nsuid=price_info['title_id']).count() > 1:\n print('Multiple games found for nsuid {}'.format(price_info['title_id']))\n\n game = model.objects.filter(nsuid=price_info['title_id'])[0]\n\n if 'regular_price' in price_info:\n found_price = found_price + 1\n\n if SwitchGamePrice.objects.filter(game=game.switchgame,\n country=country).exists():\n price = SwitchGamePrice.objects.get(\n game=game.switchgame, country=country)\n\n serialized = SwitchGamePriceSerializer(\n data=price_info['regular_price'],\n context={'game': game.switchgame, 'country': country},\n instance=price)\n else:\n serialized = SwitchGamePriceSerializer(\n data=price_info['regular_price'],\n context={'game': game.switchgame, 'country': country})\n\n if serialized.is_valid():\n serialized.save()\n\n if 'discount_price' in price_info:\n found_sales = found_sales + 1\n\n if SwitchGameSale.objects.filter(game=game.switchgame,\n country=country).exists():\n price = SwitchGameSale.objects.get(\n game=game.switchgame, country=country)\n\n serialized = SwitchGameSaleSerializer(\n data=price_info['discount_price'],\n instance=price,\n context={'game': game.switchgame, 'country': country})\n else:\n serialized = SwitchGameSaleSerializer(\n data=price_info['discount_price'],\n context={'game': game.switchgame, 'country': country})\n\n if serialized.is_valid():\n serialized.save()\n\n else:\n SwitchGameSale.objects \\\n .filter(game=game.switchgame, country=country).delete()\n\n print('Found {} prices and {} sales for country {}'\n .format(found_price, found_sales, country))\n","sub_path":"eshop-index-back/games/tasks/update_switch_price.py","file_name":"update_switch_price.py","file_ext":"py","file_size_in_byte":3689,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"386835203","text":"# 打开文件--源文件\nfile_read = open(\"README\")\n\n# 打开文件--目标文件\nfile_write = open(\"README[复件]\", \"w\")\n\nwhile True:\n\n # 读写\n text_read = file_read.readline()\n\n if not text_read:\n break\n\n file_write.write(text_read)\n\n# 关闭\nfile_read.close()\nfile_write.close()\n","sub_path":"文件/复制文件.py","file_name":"复制文件.py","file_ext":"py","file_size_in_byte":305,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"227510053","text":"class Matriz:\n def __init__(self,celdas=[]):\n self.celdas=celdas\n\n def __repr__(self):\n s=\"\"\n for i in range(len(self.celdas)):\n for j in range(len(self.celdas)):\n s+=\"{0: >5} \".format(self.celdas[i][j])\n s+=\"\\n\"\n return s\n\n def __mul__(self, other):\n m1 = self.celdas\n m2 = other.celdas\n mr = []\n m2_trans = []\n for i in range(len(m2)):\n m2_trans.append([])\n for fila in m2:\n for j in range(len(fila)):\n m2_trans[j].append(fila[j])\n for fila in m1:\n fila_nueva = []\n for col in m2_trans:\n fila_nueva.append(sum(fila[i]*col[i] for i in range(len(fila))))\n mr.append(fila_nueva)\n return Matriz(celdas=mr)\n\n\nif __name__ == \"__main__\":\n a=Matriz([[1,2],[3,4]])\n b=Matriz([[5,6],[7,8]])\n r=a*b\n print(r)","sub_path":"tema9_ej2/tema9_ej2_15634728.py","file_name":"tema9_ej2_15634728.py","file_ext":"py","file_size_in_byte":926,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"210006332","text":"import unittest\nimport os\nfrom libs.ddt import ddt, data\nfrom scripts.handle_excel import HandleExcel\nfrom scripts.handle_config import config_read_file\nfrom scripts.constant import DATA_DIR\nfrom scripts.handle_requests import HandleRequest\nfrom scripts.handle_log import logger\nfrom scripts.handle_context import HandleContext\n\n# 读取excel\ndo_excel = HandleExcel(os.path.join(DATA_DIR, config_read_file.get_value(\"excel\", \"case_excel\")), \"login\")\nall_cases = do_excel.get_cases()\n\n\n@ddt\nclass TestLogin(unittest.TestCase):\n \"\"\"\n 定义一个测试登录的测试类\n \"\"\"\n\n @classmethod\n def setUpClass(cls):\n cls.resp = HandleRequest()\n logger.info(\"******************************************************\")\n logger.info(\"{0}\".format(\"开始执行用例\"))\n\n @classmethod\n def tearDownClass(cls):\n cls.resp.close()\n logger.info(\"{0}\".format(\"执行用例完成\"))\n logger.info(\"******************************************************\")\n\n @data(*all_cases)\n def test_user_login(self, one_case):\n logger.info(\"正在执行第{0}条用例:{1}\".format(one_case[\"case_id\"], one_case[\"title\"]))\n url_new = config_read_file.get_value(\"api\", \"url\") + one_case[\"url\"]\n new_data = HandleContext.invest_context(one_case[\"data\"])\n logger.info(\"\\n请求url为{0}\\ndata为{1}\".format(url_new, new_data))\n login = self.resp.sendRequests(method=one_case[\"method\"], url=url_new, data=eval(new_data)) # 返回登录响应对象\n logger.info(\"\\n响应数据为{1}\".format(url_new, login.text))\n try:\n self.assertEqual(one_case[\"expected\"], login.text, msg=one_case[\"title\"])\n logger.info(\"\\'{0}\\'用例执行成功\".format(one_case[\"title\"]))\n result = \"True\"\n except AssertionError as err:\n logger.error(\"{0}用例执行失败,错误信息为: {1}\".format(one_case[\"title\"], err))\n result=\"False\"\n raise err\n finally:\n logger.info(\"写入结果开始\")\n\n do_excel.write_result(row=one_case[\"case_id\"] + 1, column=8, result=result)\n do_excel.write_result(row=one_case[\"case_id\"] + 1, column=7, result=login.text)\n\n logger.info(\"写入结果结束\")\n\n\nif __name__ == \"__main__\":\n unittest.main()\n\n\n\n\n","sub_path":"cases/test_02_login.py","file_name":"test_02_login.py","file_ext":"py","file_size_in_byte":2329,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"517258692","text":"from flask import Flask,render_template, Response, request\nimport sys\n# Tornado web server\nfrom tornado.wsgi import WSGIContainer\nfrom tornado.httpserver import HTTPServer\nimport tornado.ioloop\nfrom tornado.ioloop import IOLoop\nfrom text2speech import T2S\nimport os\n\nlanguage = 'kr' \nt2s = T2S(language)\nsample_text = {\n 'kr' : '여기에 텍스트 입력',\n 'en' : 'Enter the text'\n}\n\n# Initialize Flask.\napp = Flask(__name__)\n\n@app.route('/tts', methods=['GET', 'POST'])\ndef texttospeech():\n if request.method == 'POST':\n result = request.form\n lang = result['input_language']\n text = result['input_text']\n if lang == t2s.language:\n audio = t2s.tts(text)\n else:\n audio = t2s.update_model(lang).tts(text)\n print(audio)\n return render_template('simple.html', voice=audio, sample_text=text, opt_lang=t2s.language)\n\n \n#Route to render GUI\n@app.route('/')\ndef show_entries():\n return render_template('simple.html', sample_text=sample_text.get(t2s.language), voice=None, opt_lang=t2s.language)\n\n#Route to stream music\n@app.route('/', methods=['GET'])\ndef streammp3(voice):\n \n def generate(): \n with open(os.path.join('wavs',voice), \"rb\") as fwav:\n data = fwav.read(1024)\n while data:\n yield data\n data = fwav.read(1024)\n \n return Response(generate(), mimetype=\"audio/mp3\")\n\n#launch a Tornado server with HTTPServer.\nif __name__ == \"__main__\":\n port = 5000\n http_server = HTTPServer(WSGIContainer(app))\n http_server.listen(port)\n io_loop = tornado.ioloop.IOLoop.current()\n io_loop.start()\n \n","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1687,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"570265282","text":"\"\"\" Clustering users with k-means algorithm\n\n:Author: Yassmine Chebaro \n:Date: 2019-09-25\n:License: MIT\n\"\"\"\n\n# Import Libraries\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport numpy as np\nfrom scipy.sparse import csr_matrix\nfrom sklearn.cluster import KMeans\nfrom sklearn.metrics import mean_squared_error\nimport itertools\nfrom collections import Counter\n\n\n\n\nclass ClusteringUsersStep1(object):\n\t\"\"\" K-means clustering of users using book ratings \n\t\tThis is the first step of the clustering\n\n\t\"\"\"\n\n\tdef clustering_users_ratings(self, df_ratings):\n\t\t\"\"\" Cluster users books ratings matrix\n\t\tAfter evaluation of the optimal number of k for this sparse matrix, the k chosen at this \n\t\tstage is 25\n\t\n\t\tArgs:\n\t\t\tdf_ratings (:obj:`DataFrame`): pandas DataFrame of user books ratings\n\t\tReturns:\n\t\t\t:obj:`user_clustid`: pandas DataFrame with clustered used in 'group' column\n\t\t\"\"\"\n\t\tuser_book_ratings = pd.pivot_table(df_ratings, index='user_idx', columns='book_idx', values='rating')\n\n\t\t# Conversion to sparse csr matrix\n\t\tsparse_ratings = csr_matrix(pd.SparseDataFrame(user_book_ratings).to_coo())\n\t\t# 25 clusters\n\t\tpredictions = KMeans(n_clusters=25).fit_predict(sparse_ratings)\n\n\t\tclustered = pd.concat([user_book_ratings.reset_index(), pd.DataFrame({'group':predictions})], axis=1)\n\n\t\tuser_clustid = clustered.drop(clustered.columns[1:10269], axis=1)\n\n\t\treturn user_clustid\n\n\n\tdef draw_movies_heatmap(user_book_ratings, axis_labels=True):\n\t\t\n\t\tfig = plt.figure(figsize=(15,4))\n\t\tax = plt.gca()\n\t\n\t\t# Draw heatmap\n\t\theatmap = ax.imshow(user_book_ratings, interpolation='nearest', vmin=0, vmax=5, aspect='auto')\n\t\tif axis_labels:\n\t\t\tax.set_yticks(np.arange(user_book_ratings.shape[0]) , minor=False)\n\t\t\tax.set_xticks(np.arange(user_book_ratings.shape[1]) , minor=False)\n\t\t\tax.invert_yaxis()\n\t\t\tax.xaxis.tick_top()\n\t\t\tax.set_yticklabels(user_book_ratings.index, minor=False)\n\n\t\tax.set_ylabel('User idx')\n\t\t# Separate heatmap from color bar\n\t\tdivider = make_axes_locatable(ax)\n\t\tcax = divider.append_axes(\"right\", size=\"5%\", pad=0.05)\n\t\t# Color bar\n\t\tcbar = fig.colorbar(heatmap, ticks=[5, 4, 3, 2, 1, 0], cax=cax)\n\t\tplt.show()\n\t\n\nclass ClusteringUsersStep2(object):\n\t\"\"\" K-means clustering of users using book publication year and most populated tag\n\t\tThis is the second step of the clustering for more specifity\n\n\t\"\"\"\n\n\tdef get_publication_year(self, user_clusters, book_year, ratings_books):\n\t\t\"\"\" Getting book publication and including it in the matrix (since initially I \n\t\tdid not consider the year, these entries are missing...)\n\t\n\t\tArgs:\n\t\t\tusers_clusters (:obj:`DataFrame`): pandas DataFrame of users clusters from Step1\n\t\t\tbook_year (:obj:`DataFrame`): pandas DataFrame of books year and most populated tags\n\t\t\tratings_books (:obj:`DataFrame`): pandas DataFrame of \n\n\t\tReturns:\n\t\t\t:obj:`user_all`: pandas DataFrame with user_idx, group (cluster) number, publication year\n\t\t\tand most popular shelf appended to book ratings/users DataFrame\n\t\t\"\"\"\n\t\tlistbooks_ratings = list(set(ratings_books['book_id_gr']))\n\t\tlistbooks_ratings_idx = list(set(ratings_books['book_idx']))\n\t\tlistbooks_year = list(set(book_year['book_id']))\n\n\t\tbook_year.rename(columns={'book_id':'book_id_gr', 'publication_year':'pub_year'}, inplace=True)\n\t\tbook_year.drop(book_year.columns[[2, 3, 4, 7, 8]], axis=1, inplace=True)\n\n\t\t# Get books publication year from books_sci-fi, matching book_id from rated and reviewed data\n\t\tratings_book_year = ratings_books[ratings_books['book_id_gr'].isin(listbooks_year)]\n\t\tratings_book_year = pd.merge(ratings_books, book_year, on='book_id_gr')\n\t\tuser_all = pd.merge(user_clusters, ratings_book_year, on='user_idx')\n\t\tuser_all.dropna(inplace=True)\n\n\t\treturn user_all\n\n\tdef get_cluster_group(self, dfin, clustid):\n\t\t\"\"\" Getting cluster group matrix with publication years and tag\n\t\n\t\tArgs:\n\t\t\tdfin (:obj:`DataFrame`): pandas DataFrame with user group, publication year and popular shelve \n\t\t\tappended to book ratings/user DataFrame\n\t\t\tclustid (:obj:`int`): number of the cluster from Step 1\n\n\t\tReturns:\n\t\t\t:obj:`tag_year`: pandas DataFrame pivotted with tag and shelve for desired cluster\n\t\t\"\"\"\n\t\tdf_clust = dfin[dfin['group'] == clustid]\n\t\tdf_clust_filtered = df_clust.drop(df_clust.columns[[1, 2, 3, 4, 5, 6, 7, 8, 10]], axis=1) \n\t\t\n\t\tcount_user_year = df_clust_filtered.groupby(['user_idx', 'pub_year']).size().reset_index(name ='year_count')\n\t\tuser_year_pivot = pd.pivot_table(count_user_year, index='user_idx', columns= 'pub_year', values='year_count')\n\t\n\t\tcount_user_tag = df_clust_filtered.groupby(['user_idx', 'popular_shelves']).size().reset_index(name ='tag_count')\n\t\tlisttags = Counter(count_user_tag['popular_shelves'])\n\t\tlisttags_freq = [k for k,v in listtags.items() if v>=10]\n\t\tdf_u_tags = count_user_tag[count_user_tag['popular_shelves'].isin(listtags_freq)]\n\t\tuser_tag_pivot = pd.pivot_table(df_u_tags, index='user_idx', columns= 'popular_shelves', values='tag_count')\n\n\t\ttag_year = pd.merge(user_year_pivot, user_tag_pivot, on='user_idx')\n\t\ttag_year.fillna(0, inplace=True)\n\n\t\treturn tag_year\n\n\tdef compute_clusters(self, tag_year, k):\n\t\t\"\"\" Compute clusters of users matrix with publication years and tag\n\t\n\t\tArgs:\n\t\t\ttag_year (:obj:`DataFrame`): pandas DataFrame pivotted with tag and shelve for desired cluster from Step 1 \n\t\t\tk (:obj:`int`): number of k clusters for k-means\n\n\t\tReturns:\n\t\t\t:obj:`clustered`: pandas DataFrame of user_idx and resulting cluster number\n\t\t\"\"\"\t\t\t\n\n\t\tpredictions = KMeans(n_clusters= k).fit_predict(tag_year)\n\t\tclustered = pd.concat([tag_year.reset_index(), pd.DataFrame({'group':predictions})], axis=1)\n\n\t\treturn clustered\n\nclass GetAllClusters(object):\n\t\"\"\" In this step clusters from Step 1 which are preserved and not used in Step 2 because of their\n\tappropriate size are combined with clusters from Step 2.\n\tThe group numbers are reassigned so as to maintain a consecutive and non-redundant count\n\n\t\"\"\"\n\n\tdef remap_clusters_step1(self, user_clusters):\n\t\t\"\"\" Remap cluster group number from Step 1\n\t\n\t\tArgs:\n\t\t\tusers_clusters (:obj:`DataFrame`): pandas DataFrame of users clusters from Step1\n\n\t\tReturns:\n\t\t\t:obj:`clust1`: pandas DataFrame of user_idx and group number from cluster 1\n\t\t\t:obj:`clust2`: pandas DataFrame of user_idx and group number from cluster 2\n\t\t\t:obj:`clust3`: pandas DataFrame of user_idx and group number from cluster 3\n\t\t\t:obj:`clust4`: pandas DataFrame of user_idx and group number from cluster 4\n\t\t\"\"\"\t\t\t\n\n\t\tclust1 = user_clusters[user_clusters['group'] == 2]\n\t\tclust1['group'] = clust1['group'].map({2: 1})\n\t\tclust2 = user_clusters[user_clusters['group'] == 10]\n\t\tclust2['group'] = clust2['group'].map({10: 2})\n\t\tclust3 = user_clusters[user_clusters['group'] == 13]\n\t\tclust3['group'] = clust3['group'].map({13: 3})\n\t\tclust4 = user_clusters[user_clusters['group'] == 20]\n\t\tclust4['group'] = clust4['group'].map({20: 4})\n\n\t\treturn clust1, clust2, clust3, clust4\n\n\tdef get_clusters_prop(self, dfcluster, start):\n\t\t\"\"\" Extract clusters from Step 2\n\t\n\t\tArgs:\n\t\t\tdfcluster (:obj:`DataFrame`): pandas DataFrame of cluster from Step 2\n\n\t\tReturns:\n\t\t\t:obj:`dfclean`: pandas DataFrame of user_idx and group number for subcluster\n\t\t\"\"\"\t\t\t\n\t\t# copy just in case :)\n\t\tdfclean = dfcluster.copy()\n\t\n\t\t# count the number of groups\n\t\tcount = len(dfclean.groupby('group').count())\n\t\n\t\t# renumber group id according to start \n\t\tfor i in range(count):\n\t\t\tdfclean['new_group'] = dfclean['group']+start\n\n\t\tdfclean['new_group'] = dfclean['new_group'].astype(int)\n\t\t\n\t\t# do some cleaning\n\t\tdfclean.drop('group', axis=1, inplace=True)\n\t\tdfclean.rename(columns={'user_idx': 'user_idx', 'new_group': 'group'}, inplace=True)\n\t\t\n\t\treturn dfclean\n\n\tdef get_merged_clusters(self, clust1, clust2, clust3, clust4, clustered_gp4, clustered_gp11,\n\t\tclustered_gp12, clustered_gp14):\n\t\t\"\"\" Merge all clusters and subclusters\n\t\n\t\tArgs:\n\t\t\tclust1 (:obj:`DataFrame`): pandas DataFrame of user_idx and group number from cluster 1\n\t\t\tclust2 (:obj:`DataFrame`): pandas DataFrame of user_idx and group number from cluster 2\n\t\t\tclust3 (:obj:`DataFrame`): pandas DataFrame of user_idx and group number from cluster 3\n\t\t\tclust4 (:obj:`DataFrame`): pandas DataFrame of user_idx and group number from cluster 4\n\n\t\tReturns:\n\t\t\t:obj:`final_sup10`: pandas DataFrame of user_idx and group number for all\n\t\t\"\"\"\t\t\t\n\t\tclust5 = self.get_clusters_prop(clustered_gp4.loc[:, ['user_idx','group']], 5)\n\t\tclust6 = self.get_clusters_prop(clustered_gp11.loc[:, ['user_idx','group']], 17)\n\t\tclust7 = self.get_clusters_prop(clustered_gp12.loc[:, ['user_idx','group']], 19)\n\t\tclust8 = self.get_clusters_prop(clustered_gp14.loc[:, ['user_idx','group']], 26)\n\n\t\tfinal_clusters = pd.concat([clust1, clust2, clust3, clust4, clust5, clust6, \n\t\t\tclust7, clust8], ignore_index=True)\n\n\t\tcount_user_clust_final = final_clusters.groupby(['group']).size().reset_index(name ='gp_count')\n\t\tsup10 = count_user_clust_final[count_user_clust_final['gp_count'] >= 10]\n\t\tlistsup10 = sup10['group'].to_list()\n\t\tfinal_sup10 = final_clusters[final_clusters['group'].isin(listsup10)]\n\n\t\treturn final_sup10\n\n\ndef main():\n\n\tbook_year = pd.read_csv('book_year_1tag-5-10.csv')\n\tratings_books = pd.read_csv('ratings_books_u80_b10.csv')\n\n\tusers_clusters = ClusteringUsersStep1().clustering_users_ratings(ratings_books)\n\n\t#user_clusters = pd.read_csv('user_clustid_k25.csv')\n\n\tuser_all = ClusteringUsersStep2().get_publication_year(user_clusters, book_year, ratings_books)\n\n\t#user_all.to_csv(\"testusersall.csv\",index=False)\n\ttag_year_gp4 = ClusteringUsersStep2().get_cluster_group(user_all, 4)\n\ttag_year_gp11 = ClusteringUsersStep2().get_cluster_group(user_all, 11)\n\ttag_year_gp12 = ClusteringUsersStep2().get_cluster_group(user_all, 12)\n\ttag_year_gp14 = ClusteringUsersStep2().get_cluster_group(user_all, 14)\n\n\tclustered_gp4 = ClusteringUsersStep2().compute_clusters(tag_year_gp4, 12)\n\tclustered_gp11 = ClusteringUsersStep2().compute_clusters(tag_year_gp11, 2)\n\tclustered_gp12 = ClusteringUsersStep2().compute_clusters(tag_year_gp12, 7)\n\tclustered_gp14 = ClusteringUsersStep2().compute_clusters(tag_year_gp14, 12)\n\n\n\tclust1, clust2, clust3, clust4 = GetAllClusters().remap_clusters_step1(user_clusters)\n\tfinal = GetAllClusters().get_merged_clusters(clust1, clust2, clust3, clust4, clustered_gp4, clustered_gp11,\n\t\tclustered_gp12, clustered_gp14)\n\n\tfinal.to_csv('users-clustered-final.csv', index=False)\n\n\nif __name__ == '__main__':\n\tmain()","sub_path":"scificrew/clustering_users.py","file_name":"clustering_users.py","file_ext":"py","file_size_in_byte":10369,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"251011213","text":"'''\nEinstein program for CS 108\nCreated Spring 2019\nLab 02\n@author: Ethan Walters (emw45)\n'''\n\n\n# Prompt user for required integers\nnumber = int(input('Please enter a 3 digit number where the first and last digits differ by at least 2: '))\n\n# Store inputed integers\ndigit1 = ((number//100)%10)\ndigit2 = ((number//10)%10)\ndigit3 = ((number//1)%10)\n\n# Reverse integer order\nrev_number = (digit3 * 100 + digit2 * 10 + digit3 * 1)\n\n# Calculate difference\ndifference = abs(number - rev_number)\n\n# Store difference integers\ndiff_digit1 = (difference//100)%10\ndiff_digit2 = (difference//10)%10\ndiff_digit3 = (difference//1)%10\n\n# Reverse difference integers\nrev_diff = (diff_digit3 * 100 + diff_digit2 * 10 + diff_digit1 * 1)\n\n# Print sum of difference and rev_diff\nprint(difference + rev_diff)\n\n\n\n\n","sub_path":"lab02/einstein.py","file_name":"einstein.py","file_ext":"py","file_size_in_byte":792,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"542766990","text":"# solar.py\n\n# Dependencies\n\nimport matplotlib.pyplot as plt # for plotting\nimport numpy as np # numpy arrays for calculations and matplotlib plots\nfrom scipy import constants\n\n# Intra-package references\nimport planets\nimport plotter\n\npi = planets.pi # so we don't have to type np.pi all the time\nsigma = planets.sigma\n\n# Moon climate parameters\n\nS_E = planets.Moon.S\nalbedo = planets.Moon.albedo\nP_moon = planets.Moon.day\nepsilon = planets.Moon.emissivity\nq_c = planets.q_c\nq_g = planets.Moon.Qb\n\nSabs = S_E * (1.0 - albedo)\n\n# estimate mean equatorial soil heat flux from ballpark nighttime temp of 100 K\nq_soil = epsilon*sigma*(100**4) - q_c - q_g\nprint('q_soil: {:.2f} W m-2'.format(q_soil))\n\n# Model parameters\nmodelruntime = 2*P_moon\ndt = 1.0 * 3600. # timestep in seconds (1.0*3600*24 = 1 earth day)\nNt = int(round(modelruntime/dt)) # Calculate number of timesteps (rounded and converted to integer)\n\n# Create numpy arrays for calculations and plotting\nt = np.zeros(Nt) # start the clock at zero\nh = np.zeros(Nt) # hour angle\npsi = np.zeros(Nt) # clipping function\nsolar = np.zeros(Nt) # array of insolation values to be calculated \nT_s = np.zeros(Nt) # time array of surface temperatures\n\n# Create numpy array for lunar hour\n\nlunarhour = np.zeros(Nt)\n\n#print(int(P_moon/2*dt), Nt)\n# Time loop, goes until modelruntime\nfor n in range(0, Nt):\n\tt[n] = n*dt # Calculate the model time based on the current timestep number\n\th[n] = 2*pi*(t[n]/P_moon) # Calculate hour angle\n\tpsi[n] = 0.5 * ( np.cos(h[n]) + np.abs(np.cos(h[n]))) # Calculate clipping function\n\tsolar[n] = Sabs * psi[n] # Calculate solar flux\n\n\t#T_s[n] = ((1-albedo)*solar[n]/epsilon/sigma)**(1/4) # Calculate surface temperature given solar flux\n\t#T_s[n] = (((1-albedo)*solar[n] + q_c)/epsilon/sigma)**(1/4) # Calculate surface temperature given solar flux and cosmic flux\n\t#T_s[n] = (((1-albedo)*solar[n] + q_c + q_g)/epsilon/sigma)**(1/4) # Calculate surface temperature given solar, cosmic, and geothermal flux\n\tT_s[n] = (((1-albedo)*solar[n] + q_c + q_g + q_soil)/epsilon/sigma)**(1/4) # Calculate surface temperature given solar, cosmic, and geothermal flux\n\n\tlunarhour[n] = t[n]/P_moon*24 - 12 # Calculate lunar hour (for plotting only)\n\n\n\n\n# Plot arrays using local plotter module\n\nplot_title = 'Model lunar equatorial sfc temp with: S, q_c, q_g, q_soil'\nplotter.plot(x=lunarhour, y=T_s, title = plot_title)\n\n","sub_path":"sandbox/solar.py","file_name":"solar.py","file_ext":"py","file_size_in_byte":2399,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"190935529","text":"\"\"\"\nHere be production settings\n\"\"\"\n\nimport os\nfrom .base import *\n\nDEBUG = False\nTEMPLATE_DEBUG = DEBUG\n\nSECRET_KEY = os.getenv('SECRET_KEY', SECRET_KEY)\n\nCSRF_COOKIE_SECURE = True\nSESSION_COOKIE_SECURE = True\n\n# Parse database configuration from $DATABASE_URL\n# import dj_database_url\n# DATABASES['default'] = dj_database_url.config()\n\n# Enable Connection Pooling\n# DATABASES['default']['ENGINE'] = 'django_postgrespool'\n\n# Honor the 'X-Forwarded-Proto' header for request.is_secure()\nSECURE_PROXY_SSL_HEADER = ('HTTP_X_FORWARDED_PROTO', 'https')\n\n# Allow all host headers\nALLOWED_HOSTS = ['*']\n","sub_path":"settings/heroku.py","file_name":"heroku.py","file_ext":"py","file_size_in_byte":598,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"255118619","text":"from django.db import models\n\n# Create your models here.\n\nfrom django.db import models\nfrom multiselectfield import MultiSelectField\nfrom django.utils.safestring import mark_safe\n\n\n# from rbac.models import User\n\nclass UserInfo(models.Model):\n \"\"\"\n 用户信息表\n \"\"\"\n username=models.CharField(max_length=16)\n password=models.CharField(max_length=32)\n telephone=models.CharField(max_length=11)\n email=models.EmailField()\n is_active=models.BooleanField(default=True) # 当前员工是否还是我的员工 辞职改为Fasle\n depart=models.ForeignKey('Department', on_delete=models.CASCADE, null=True, blank=True)\n\n def __str__(self):\n return self.username\n\n class Meta:\n ordering=[\"id\"]\n verbose_name=\"用户信息表\"\n verbose_name_plural=\"用户信息表\"\n\n\n# Create your models here.\n\ncourse_choices=(('LinuxL', 'Linux中高级'),\n ('PythonFullStack', 'Python高级全栈开发'),)\n\nclass_type_choices=(('fulltime', '脱产班',),\n ('online', '网络班'),\n ('weekend', '周末班',),)\n\nsource_type=(('qq', \"qq群\"),\n ('referral', \"内部转介绍\"),\n ('website', \"官方网站\"),\n ('baidu_ads', \"百度推广\"),\n ('office_direct', \"直接上门\"),\n ('WoM', \"口碑\"),\n ('public_class', \"公开课\"),\n ('website_luffy', \"路飞官网\"),\n ('others', \"其它\"),)\n\nenroll_status_choices=(('signed', \"已报名\"),\n ('unregistered', \"未报名\"),\n ('studying', '学习中'),\n ('paid_in_full', \"学费已交齐\"))\n\nseek_status_choices=(('A', '近期无报名计划'), ('B', '1个月内报名'), ('C', '2周内报名'), ('D', '1周内报名'),\n ('E', '定金'), ('F', '到班'), ('G', '全款'), ('H', '无效'),)\npay_type_choices=(('deposit', \"订金/报名费\"),\n ('tuition', \"学费\"),\n ('transfer', \"转班\"),\n ('dropout', \"退学\"),\n ('refund', \"退款\"),)\n\nattendance_choices=(('checked', \"已签到\"),\n ('vacate', \"请假\"),\n ('late', \"迟到\"),\n ('absence', \"缺勤\"),\n ('leave_early', \"早退\"),)\n\nscore_choices=((100, 'A+'),\n (90, 'A'),\n (85, 'B+'),\n (80, 'B'),\n (70, 'B-'),\n (60, 'C+'),\n (50, 'C'),\n (40, 'C-'),\n (0, ' D'),\n (-1, 'N/A'),\n (-100, 'COPY'),\n (-1000, 'FAIL'),)\n\n\n# class UserInfo(User):\n# \"\"\"\n# 用户表:销售\\讲师、班主任\n# \"\"\"\n# username = models.CharField(max_length=16)\n# password = models.CharField(max_length=32)\n# email = models.EmailField()\n# telephone = models.CharField(max_length=16)\n# is_active = models.BooleanField(default=True)\n# depart = models.ForeignKey('Department',on_delete=models.CASCADE,null=True,blank=True)\n# def __str__(self):\n# return self.username\n\nclass Department(models.Model):\n \"\"\"\n 部门表\n \"\"\"\n name=models.CharField(max_length=32)\n count=models.IntegerField()\n\n class Meta:\n ordering=[\"id\"]\n verbose_name=\"部门表\"\n verbose_name_plural=\"部门表\"\n\n def __str__(self):\n return self.name\n\n\nclass Customer(models.Model):\n \"\"\"\n 客户表(最开始的时候大家都是客户,销售就不停的撩你,你还没交钱就是个客户)\n \"\"\"\n qq=models.CharField(verbose_name='QQ', max_length=64, unique=True, help_text='QQ号必须唯一')\n qq_name=models.CharField('QQ昵称', max_length=64, blank=True, null=True) # requierd:False\n name=models.CharField('姓名', max_length=32, blank=True, null=True, help_text='学员报名后,请改为真实姓名') # 可为空,有些人就不愿意给自己的真实姓名\n sex_type=(('male', '男'), ('female', '女')) #\n sex=models.CharField(\"性别\", choices=sex_type, max_length=16, default='male', blank=True,\n null=True) # 存的是male或者female,字符串\n birthday=models.DateField('出生日期', default=None, help_text=\"格式yyyy-mm-dd\", blank=True, null=True)\n phone=models.BigIntegerField('手机号', blank=True, null=True) # 手机号改成字符串的,不然不好搜索\n # phone = models.CharField('手机号', blank=True, null=True)\n source=models.CharField('客户来源', max_length=64, choices=source_type, default='qq')\n\n introduce_from=models.ForeignKey('self', verbose_name=\"转介绍自学员\", blank=True, null=True,\n on_delete=models.CASCADE) # self指的就是自己这个表,和下面写法是一样的效果\n # '''\n # id name introduce_from\n # 1 dz None\n # 2 xf 1\n # 3 cg 1\n #\n # '''\n\n # introduce_from = models.ForeignKey('Customer', verbose_name=\"转介绍自学员\", blank=True, null=True,on_delete=models.CASCADE)\n course=MultiSelectField(\"咨询课程\", choices=course_choices) # 多选,并且存成一个列表的格式,通过modelform来用的时候,会成为一个多选框\n\n # course = models.CharField(\"咨询课程\", choices=course_choices) #如果你不想用上面的多选功能,可以使用Charfield来存\n class_type=models.CharField(\"班级类型\", max_length=64, choices=class_type_choices, default='fulltime')\n customer_note=models.TextField(\"客户备注\", blank=True, null=True, )\n status=models.CharField(\"状态\", choices=enroll_status_choices, max_length=64, default=\"unregistered\",\n help_text=\"选择客户此时的状态\") # help_text这种参数基本都是针对admin应用里面用的\n\n date=models.DateTimeField(\"咨询日期\", auto_now_add=True) # 这个没啥用昂,我问销售,销售说是为了一周年的时候给客户发一个祝福信息啥的\n last_consult_date=models.DateField(\"最后跟进日期\", auto_now_add=True) # 考核销售的跟进情况,如果多天没有跟进,会影响销售的绩效等\n next_date=models.DateField(\"预计再次跟进时间\", blank=True, null=True) # 销售自己大概记录一下自己下一次会什么时候跟进,也没啥用\n\n # 用户表中存放的是自己公司的所有员工。\n consultant=models.ForeignKey('UserInfo', verbose_name=\"销售\", related_name='customers', blank=True, null=True,\n on_delete=models.CASCADE)\n\n # 一个客户可以报多个班,报个脱产班,再报个周末班等,所以是多对多。\n class_list=models.ManyToManyField('ClassList', verbose_name=\"已报班级\", blank=True)\n\n # 成单日期,统计成单日期的时候会用到\n deal_date=models.DateField(null=True, blank=True)\n\n # delete_status = models.BooleanField(default=True,blank=True)\n class Meta:\n ordering=['id', ]\n verbose_name='客户信息表'\n verbose_name_plural='客户信息表'\n\n def __str__(self):\n return self.name + \":\" + self.qq\n\n enroll_status_choices=(('signed', \"已报名\"),\n ('unregistered', \"未报名\"),\n ('studying', '学习中'),\n ('paid_in_full', \"学费已交齐\"))\n\n def status_show(self):\n status_color={\n 'paid_in_full': 'green',\n 'unregistered': 'red',\n 'studying': 'lightblue',\n 'signed': 'yellow',\n }\n\n return mark_safe(\"{1}\".format(status_color[self.status],\n self.get_status_display()))\n\n def get_classlist(self): # 当我们通过self.get_classlist的时候,就拿到了所有的班级信息,前端显示的时候用\n\n l=[]\n for cls in self.class_list.all():\n l.append(str(cls))\n return mark_safe(\",\".join(l)) # 纯文本,不用mark_safe也可以昂\n\n\nclass Campuses(models.Model):\n \"\"\"\n 校区表\n \"\"\"\n name=models.CharField(verbose_name='校区', max_length=64)\n address=models.CharField(verbose_name='详细地址', max_length=512, blank=True, null=True)\n\n class Meta:\n ordering=['id', ]\n verbose_name='校区表'\n verbose_name_plural='校区表'\n\n def __str__(self):\n return self.name\n\n\nclass ClassList(models.Model):\n \"\"\"\n 班级表\n \"\"\"\n course=models.CharField(\"课程名称\", max_length=64, choices=course_choices)\n semester=models.IntegerField(\"学期\") # python20期等\n campuses=models.ForeignKey('Campuses', verbose_name=\"校区\", on_delete=models.CASCADE)\n price=models.IntegerField(\"学费\", default=10000)\n memo=models.CharField('说明', blank=True, null=True, max_length=100)\n start_date=models.DateField(\"开班日期\")\n graduate_date=models.DateField(\"结业日期\", blank=True, null=True) # 不一定什么时候结业,哈哈,所以可为空\n\n # contract = models.ForeignKey('ContractTemplate', verbose_name=\"选择合同模版\", blank=True, null=True,on_delete=models.CASCADE)\n teachers=models.ManyToManyField('UserInfo',\n verbose_name=\"老师\") # 对了,还有一点,如果你用的django2版本的,那么外键字段都需要自行写上on_delete=models.CASCADE\n\n class_type=models.CharField(choices=class_type_choices, max_length=64, verbose_name='班额及类型', blank=True, null=True)\n\n class Meta:\n unique_together=(\"course\", \"semester\", 'campuses')\n ordering=['id', ]\n verbose_name='班级表'\n verbose_name_plural='班级表'\n\n def __str__(self):\n return \"{}{}({})\".format(self.get_course_display(), self.semester, self.campuses)\n\n\n############################下面的表以后再说#################################\n\n# class ContractTemplate(models.Model):\n# \"\"\"\n# 合同模板表\n# \"\"\"\n# name = models.CharField(\"合同名称\", max_length=128, unique=True)\n# content = models.TextField(\"合同内容\")\n# date = models.DateField(auto_now=True)\n#\n#\n\nclass ConsultRecord(models.Model):\n \"\"\"\n 跟进记录表\n \"\"\"\n customer=models.ForeignKey('Customer', verbose_name=\"所咨询客户\")\n note=models.TextField(verbose_name=\"跟进内容...\")\n status=models.CharField(\"跟进状态\", max_length=8, choices=seek_status_choices, help_text=\"选择客户此时的状态\")\n consultant=models.ForeignKey(\"UserInfo\", verbose_name=\"跟进人\", related_name='records')\n date=models.DateTimeField(\"跟进日期\", auto_now_add=True)\n delete_status=models.BooleanField(verbose_name='删除状态', default=False)\n\n def __str__(self):\n return self.customer.name +\"<-- \" + self.consultant.username\n\n class Meta:\n ordering=['id', ]\n verbose_name='跟进记录表'\n verbose_name_plural='跟进记录表'\n\n\nclass Enrollment(models.Model):\n \"\"\"\n 报名表\n \"\"\"\n why_us=models.TextField(\"为什么报名\", max_length=1024, default=None, blank=True, null=True)\n your_expectation=models.TextField(\"学完想达到的具体期望\", max_length=1024, blank=True, null=True)\n # contract_agreed = models.BooleanField(\"我已认真阅读完培训协议并同意全部协议内容\", default=False)\n contract_approved=models.BooleanField(\"审批通过\", help_text=\"在审阅完学员的资料无误后勾选此项,合同即生效\", default=False)\n enrolled_date=models.DateTimeField(auto_now_add=True, verbose_name=\"报名日期\")\n memo=models.TextField('备注', blank=True, null=True)\n delete_status=models.BooleanField(verbose_name='删除状态', default=False)\n customer=models.ForeignKey('Customer', verbose_name='客户名称')\n school=models.ForeignKey('Campuses')\n enrolment_class=models.ForeignKey(\"ClassList\", verbose_name=\"所报班级\")\n\n class Meta:\n unique_together=('enrolment_class', 'customer')\n\n ordering=['id', ]\n verbose_name='报名表'\n verbose_name_plural='报名表'\n\n def __str__(self):\n return self.customer.name\n\n\n# class PaymentRecord(models.Model):\n# \"\"\"\n# 缴费记录表\n# \"\"\"\n# pay_type = models.CharField(\"费用类型\", choices=pay_type_choices, max_length=64, default=\"deposit\")\n# paid_fee = models.IntegerField(\"费用数额\", default=0)\n# note = models.TextField(\"备注\", blank=True, null=True)\n# date = models.DateTimeField(\"交款日期\", auto_now_add=True)\n# course = models.CharField(\"课程名\", choices=course_choices, max_length=64, blank=True, null=True, default='N/A')\n# class_type = models.CharField(\"班级类型\", choices=class_type_choices, max_length=64, blank=True, null=True,\n# default='N/A')\n# enrolment_class = models.ForeignKey('ClassList', verbose_name='所报班级', blank=True, null=True)\n# customer = models.ForeignKey('Customer', verbose_name=\"客户\")\n# consultant = models.ForeignKey('UserProfile', verbose_name=\"销售\")\n# delete_status = models.BooleanField(verbose_name='删除状态', default=False)\n#\n# status_choices = (\n# (1, '未审核'),\n# (2, '已审核'),\n# )\n# status = models.IntegerField(verbose_name='审核', default=1, choices=status_choices)\n#\n# confirm_date = models.DateTimeField(verbose_name=\"确认日期\", null=True, blank=True)\n# confirm_user = models.ForeignKey(verbose_name=\"确认人\", to='UserProfile', related_name='confirms', null=True,\n# blank=True)\n#\n#\n\n# course_record_id student 考勤 本节成绩 homework_note\n# # 1 74 crm 跟进记录操作 1 迟到 60 写的很好\n# # 1 74 crm 跟进记录操作 2 签到 80 写的非常好\n# #\n# # id 节次 本节课程标题 本节课程内容\n# # 1 74 crm 跟进记录操作\n\n\nclass CourseRecord(models.Model):\n \"\"\"课程记录表\"\"\"\n day_num=models.IntegerField(\"节次\", help_text=\"此处填写第几节课或第几天课程...,必须为数字\")\n date=models.DateField(auto_now_add=True, verbose_name=\"上课日期\")\n course_title=models.CharField('本节课程标题', max_length=64, blank=True, null=True)\n course_memo=models.TextField('本节课程内容', max_length=300, blank=True, null=True)\n has_homework=models.BooleanField(default=True, verbose_name=\"本节有作业\")\n homework_title=models.CharField('本节作业标题', max_length=64, blank=True, null=True)\n homework_memo=models.TextField('作业描述', max_length=500, blank=True, null=True)\n scoring_point=models.TextField('得分点', max_length=300, blank=True, null=True)\n\n re_class=models.ForeignKey('ClassList', verbose_name=\"班级\")\n teacher=models.ForeignKey('UserInfo', verbose_name=\"讲师\")\n\n class Meta:\n unique_together=('re_class', 'day_num')\n ordering=[\"id\"]\n verbose_name=\"课程记录表\"\n verbose_name_plural=\"课程记录表\"\n\n def __str__(self):\n return str(self.day_num)\n\n\nclass StudyRecord(models.Model):\n \"\"\"\n 学习记录\n \"\"\"\n attendance=models.CharField(\"考勤\", choices=attendance_choices, default=\"checked\", max_length=64)\n score=models.IntegerField(\"本节成绩\", choices=score_choices, default=-1)\n homework_note=models.CharField(max_length=255, verbose_name='作业批语', blank=True, null=True)\n date=models.DateTimeField(auto_now_add=True)\n note=models.CharField(\"备注\", max_length=255, blank=True, null=True)\n homework=models.FileField(verbose_name='作业文件', blank=True, null=True, default=None)\n course_record=models.ForeignKey('CourseRecord', verbose_name=\"某节课程\")\n student=models.ForeignKey('Customer', verbose_name=\"学员\")\n\n class Meta:\n unique_together=('course_record', 'student')\n\n ordering=[\"id\"]\n verbose_name=\"学习记录\"\n verbose_name_plural=\"学习记录\"\n\n def __str__(self):\n return self.student.name + ':' + str(self.course_record.day_num)\n","sub_path":"app01/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":16275,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"123845516","text":"#!/usr/bin/env python\n# Copyright (c) 2016, the Dart project authors. Please see the AUTHORS file\n# for details. All rights reserved. Use of this source code is governed by a\n# BSD-style license that can be found in the LICENSE file.\n\nimport argparse\nimport os\nimport subprocess\nimport sys\nimport utils\n\nusage = \"\"\"patch_sdk.py [options]\"\"\"\n\ndef BuildArguments():\n result = argparse.ArgumentParser(usage=usage)\n result.add_argument(\"--dart-executable\", help=\"dart executable\", default=None)\n return result\n\ndef main():\n # Parse the options.\n parser = BuildArguments()\n (options, args) = parser.parse_known_args()\n if options.dart_executable is not None:\n options.dart_executable = os.path.abspath(options.dart_executable)\n else:\n options.dart_executable = os.path.join(utils.CheckedInSdkPath(), 'bin', 'dart')\n dart_file = os.path.join(os.path.dirname(__file__), 'patch_sdk.dart')\n subprocess.check_call([options.dart_executable, dart_file] + args);\n\nif __name__ == '__main__':\n main()\n","sub_path":"tools/patch_sdk.py","file_name":"patch_sdk.py","file_ext":"py","file_size_in_byte":1004,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"529531581","text":"from django.shortcuts import render_to_response, render\nfrom grid_profiles.forms import UserProfileForm\nfrom grid_profiles.models import Userprofile\nfrom django.http import HttpResponse,HttpResponseRedirect\nfrom django.contrib.auth import authenticate\nfrom django.contrib.auth.decorators import login_required\n\n@login_required\ndef profile(request):\n\tcontext = {}\n\tuser = authenticate(email='abc@abc.com', password='12345')\n\tuser_profile = user \n\tuserprofileobj, crt = Userprofile.objects.get_or_create(user=user_profile)\n\tif request.method=='POST':\n\t\tform = UserProfileForm(request.POST, request.FILES, instance=userprofileobj)\n\t\tif form.is_valid():\n\t\t\tup = form.save(commit=False)\n\t\t\tup.user = user_profile\n\t\t\tup.save()\n\n\t\t\treturn HttpResponseRedirect('/')\n\t\telse:\n\t\t\tform.errors\n\telse:\n\t\tform = UserProfileForm(instance=userprofileobj)\n\tcontext['form'] = form\n\treturn render(request, \"profile.html\", context)\n \n\n","sub_path":"grid_profiles/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":915,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"115655880","text":"from pytest import mark, raises\n\nfrom evaluator import evaluate, global_environment\nimport errors\n\n\ndef test_evaluate_number():\n got = evaluate(7)\n assert 7 == got\n\n\n@mark.parametrize(\"ast, value\", [\n (['+', 1, 2], 3),\n (['*', 6, ['+', 3, 4]], 42),\n (['/', ['*', ['-', 100, 32], 5], 9], 37)\n])\ndef test_expression(ast, value):\n got = evaluate(ast)\n assert value == got\n\n\ndef test_evaluate_undefined_variable():\n ast = 'x'\n with raises(errors.UndefinedVariable) as excinfo:\n evaluate(ast)\n assert \"Undefined variable: 'x'.\" == str(excinfo.value)\n\n\ndef test_set():\n ast = ['set', 'test_set_var', ['/', 6, 2]]\n want_name = 'test_set_var'\n want_value = 3\n got = evaluate(ast)\n assert want_value == got\n assert want_name in global_environment\n assert want_value == global_environment[want_name]\n del global_environment[want_name]\n","sub_path":"plain/CalcWithVars/evaluator_test.py","file_name":"evaluator_test.py","file_ext":"py","file_size_in_byte":894,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"601759434","text":"import requests\nimport sys\n\n# Insert GitHub API token here, in place of *TOKEN*.\nheaders = {\"Authorization\": \"token *TOKEN*\"}\n\n# Constants & language argument.\nNUM_REPOS = 10_000\nMIN_STARS = 50\nLANGUAGE = \"java\" if len(sys.argv) <= 1 else sys.argv[1]\n\n\ndef main():\n\trepositories = set() # Use a set to avoid duplicate entries across pages.\n\tmax_stars = 1_000_000_000 # Initialize at a very high value.\n\twhile len(repositories) < NUM_REPOS:\n\t\tnew_repositories = run_query(max_stars)\n\t\tmax_stars = min([stars for _, stars in new_repositories])\n\t\t# If a query returns no new repositories, drop it.\n\t\tif len(repositories | new_repositories) == len(repositories):\n\t\t\tbreak\n\t\trepositories.update(new_repositories)\n\t\tprint(f'Collected {len(repositories):,} repositories so far; lowest number of stars: {max_stars:,}')\n\t\n\twith open(f'{LANGUAGE}-top-repos.txt', 'w') as f:\n\t\tfor repository, _ in sorted(repositories, key=lambda e: e[1], reverse=True):\n\t\t\tf.write(f'{repository}\\n')\n\n\ndef run_query(max_stars):\n\tend_cursor = None # Used to track pagination.\n\trepositories = set()\n\n\twhile end_cursor != \"\":\n\t\t# Extracts non-fork, recently active repositories in the provided language, in groups of 100.\n\t\t# Leaves placeholders for maximum stars and page cursor. The former allows us to retrieve more than 1,000 repositories\n\t\t# by repeatedly lowering the bar.\n\t\tquery = f\"\"\"\n\t\t{{\n\t\t search(query: \"language:{LANGUAGE} fork:false pushed:>2020-01-01 sort:stars stars:<{max_stars}\", type: REPOSITORY, first: 100 {', after: \"' + end_cursor + '\"' if end_cursor else ''}) {{\n\t\t\tedges {{\n\t\t\t node {{\n\t\t\t\t... on Repository {{\n\t\t\t\t url\n\t\t\t\t isPrivate\n\t\t\t\t isDisabled\n\t\t\t\t isLocked\n\t\t\t\t stargazers {{\n\t\t\t\t\ttotalCount\n\t\t\t\t }}\n\t\t\t\t}}\n\t\t\t }}\n\t\t\t}}\n\t\t\tpageInfo {{\n\t\t\t hasNextPage\n\t\t\t endCursor\n\t\t\t}}\n\t\t }}\n\t\t}}\n\t\t\"\"\"\n\t\tprint(f' Retrieving next page; {len(repositories)} repositories in this batch so far.')\n\t\trequest = requests.post('https://api.github.com/graphql', json={'query': query}, headers=headers)\n\t\tcontent = request.json()\n\t\tend_cursor = get_end_cursor(content)\n\t\trepositories.update(get_repositories(content))\n\t\tif len(repositories) > NUM_REPOS:\n\t\t\tbreak\n\treturn repositories\n\n\ndef get_end_cursor(content):\n\tpage_info = content['data']['search']['pageInfo']\n\thas_next_page = page_info['hasNextPage']\n\tif has_next_page:\n\t\treturn page_info['endCursor']\n\treturn \"\"\n\n\ndef get_repositories(content):\n\tedges = content['data']['search']['edges']\n\trepositories_with_stars = []\n\tfor edge in edges:\n\t\tif edge['node']['isPrivate'] is False and edge['node']['isDisabled'] is False and edge['node']['isLocked'] is False:\n\t\t\trepository = edge['node']['url']\n\t\t\tstar_count = edge['node']['stargazers']['totalCount']\n\t\t\trepositories_with_stars.append((repository, star_count))\n\treturn repositories_with_stars\n\n\nif __name__ == '__main__':\n\tmain()\n","sub_path":"github-scraping/01_metadata_by_github_api.py","file_name":"01_metadata_by_github_api.py","file_ext":"py","file_size_in_byte":2833,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"571868441","text":"'''this script retrieves the url of each movie script and \nwrites a text file with the urls needed to collect movie scripts\n'''\n\n#read html code\nimport urllib2\n\n# for retreiving information from websites \nfrom bs4 import BeautifulSoup as bs\n\n#to add randomised waiting time\nimport time, random\n\n#get lower case characters\nimport string\n\n\nurl = 'http://www.springfieldspringfield.co.uk'\n\n#get url for each letter\nupper = string.ascii_uppercase\nalpha_url = 'http://www.springfieldspringfield.co.uk/movie_scripts.php?order='\nalphabet_links = [alpha_url + char for char in upper]\n\ndef index(find_links, lower):\n \"\"\"This function finds the number of pages each link contains.\n Input: find_links a list of all links on the page, and \n lower the lower case alphabet character of that main link.\n Output: page index (int)\n \"\"\"\n page_num = [page_num for page_num in find_links if 'page=' in page_num][-1]\n \n if lower in \"quxyz\":\n page_index = page_num[-1]\n else:\n page_index = page_num[-2:]\n \n return int(page_index)\n\n\nscript_links = []\nlower = string.ascii_lowercase\n\nfor num,upper_link in enumerate(alphabet_links):\n first_delay = random.randint(1,2)\n #pause execution for 1-2 seconds\n time.sleep(first_delay)\n #get html content\n open_link = urllib2.urlopen(upper_link).read()\n #parse html\n soup = bs(open_link, \"html.parser\")\n #get all links on page\n find_links = [link.get(\"href\") for link in soup.find_all('a')]\n #get page index\n page_index = index(find_links, lower[num])\n \n for iter in range(1, page_index+1):\n second_delay = random.randint(1,2)\n #pause execution for 1-3 seconds\n time.sleep(second_delay)\n #open next page\n lowercase_link = urllib2.urlopen(upper_link+'&page='+str(iter))\n lowercase_soup = bs(lowercase_link, \"html.parser\")\n #get all links on page\n get_links = [links.get(\"href\") for links in lowercase_soup.find_all('a')]\n #get movie links:\n movie_links = [ml for ml in get_links if \"movie=\" in ml and ml[1]==\"m\"]\n #convert to string\n links = [url + str(ml) for ml in movie_links]\n\n #populate script_links list\n for full_links in links:\n script_links.append(full_links)\n\n#write to text file\nwith open(\"links.txt\", \"w\") as text_file:\n text_file.write('\\n'.join(script_links))\n","sub_path":"Web Scraper/link_scraper.py","file_name":"link_scraper.py","file_ext":"py","file_size_in_byte":2388,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"159386104","text":"import sys\nfrom io import BytesIO\nfrom math import sqrt\n\nimport requests\nfrom PIL import Image\nfrom choose_size import *\n\ntoponym_to_find = \" \".join(sys.argv[1:])\n\ngeocoder_api_server = \"http://geocode-maps.yandex.ru/1.x/\"\n\ngeocoder_params = {\n \"apikey\": \"40d1649f-0493-4b70-98ba-98533de7710b\",\n \"geocode\": toponym_to_find,\n \"format\": \"json\"}\n\nresponse_toponym = requests.get(geocoder_api_server, params=geocoder_params)\n\nif not response_toponym:\n print(\"toponym_error\")\n print(response_toponym.status_code)\n print(response_toponym.content)\n exit(-1)\n\n# Преобразуем ответ в json-объект\njson_response_toponym = response_toponym.json()\n\ntoponym_longitude, toponym_lattitude, toponym_delta = map(float,\n choose_size_geocoder(\n json_response_toponym))\n\nsearch_api_server = \"https://search-maps.yandex.ru/v1/\"\napi_key = \"dda3ddba-c9ea-4ead-9010-f43fbc15c6e3\"\n\naddress_ll = f\"{toponym_longitude},{toponym_lattitude}\"\n\nsearch_params = {\n \"ll\": address_ll,\n # \"spn\": \"1,1\",\n \"text\": \"аптека\",\n \"type\": \"biz\",\n \"apikey\": api_key,\n \"lang\": \"ru_RU\"\n}\n\nresponse = requests.get(search_api_server, params=search_params)\nif not response:\n print(response.status_code)\n print(response.content)\n exit(-1)\n\njson_response = response.json()\npoints = [address_ll + \",comma\"]\n\n\nfor i in range(len(json_response[\"features\"])):\n # Получаем первую найденную организацию.\n organization = json_response[\"features\"][i]\n # Название организации.\n org_name = organization[\"properties\"][\"CompanyMetaData\"][\"name\"]\n # Адрес организации.\n org_address = organization[\"properties\"][\"CompanyMetaData\"][\"address\"]\n # Часы работы\n org_hours = organization[\"properties\"][\"CompanyMetaData\"][\"Hours\"][\"text\"]\n\n # Получаем координаты ответа.\n org_longitude, org_lattitude, org_delta = choose_size_search_api(organization)\n org_coords = \"{},{},pmwts{}\".format(org_longitude, org_lattitude, i + 1)\n print(org_coords)\n points.append(org_coords)\n\n# Получаем первую найденную организацию.\norganization = json_response[\"features\"][0]\norg_longitude, org_lattitude, org_delta = choose_size_search_api(organization)\n\ndelta = str(2 * max(abs(org_longitude - float(toponym_longitude)), abs(org_lattitude -\n float(toponym_lattitude))))\n\n# Собираем параметры для запроса к StaticMapsAPI:\nmap_params = {\n # позиционируем карту центром на наш исходный адрес\n \"ll\": f\"{toponym_longitude},{toponym_lattitude}\",\n \"spn\": \",\".join([delta, delta]),\n \"l\": \"map\",\n # добавим точку, чтобы указать найденную аптеку\n \"pt\": '~'.join(points)\n}\n\ndistance = sqrt((toponym_longitude - org_longitude) ** 2 + (toponym_lattitude - org_lattitude)\n ** 2)\nCONST_METERS_IN_DEGREE = float(pow(10, 5)) * 0.5\n\n# Формирование сниппета\nsnippet = [org_name, org_address, org_hours, int(CONST_METERS_IN_DEGREE * distance)]\nsnippet = list(map(str, snippet))\nsnippet[3] += \" м\"\n\n# Печать сниппетов1\nprint('\\n'.join(snippet))\n\nmap_api_server = \"http://static-maps.yandex.ru/1.x/\"\nresponse = requests.get(map_api_server, params=map_params)\n\nif response:\n Image.open(BytesIO(response.content)).show()\nelse:\n print(\"code:\", response.status_code)\n print(response.content)","sub_path":"Web 6/find-drugstore-v2/search.py","file_name":"search.py","file_ext":"py","file_size_in_byte":3704,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"3509699","text":"from typing import Tuple\n\nimport torch\nfrom ignite.engine import Engine\nfrom torch.cuda.amp import GradScaler, autocast\n\n\ndef create_trainer(config, model, optimizer, loss_fn, device):\n\n use_amp = config.use_amp\n scaler = GradScaler(enabled=use_amp)\n\n def train_step(engine, batch):\n\n input_ids = batch[\"input_ids\"]\n attention_mask = batch[\"attention_mask\"]\n token_type_ids = batch[\"token_type_ids\"]\n labels = batch[\"label\"].view(-1, 1)\n\n if input_ids.device != device:\n input_ids = input_ids.to(device, non_blocking=True, dtype=torch.long)\n attention_mask = attention_mask.to(device, non_blocking=True, dtype=torch.long)\n token_type_ids = token_type_ids.to(device, non_blocking=True, dtype=torch.long)\n labels = labels.to(device, non_blocking=True, dtype=torch.float)\n\n model.train()\n\n with autocast(enabled=use_amp):\n y_pred = model(input_ids, attention_mask, token_type_ids)\n loss = loss_fn(y_pred, labels)\n\n optimizer.zero_grad()\n scaler.scale(loss).backward()\n scaler.step(optimizer)\n scaler.update()\n\n loss_value = loss.item()\n engine.state.metrics = {\"epoch\": engine.state.epoch, \"train_loss\": loss_value}\n return loss_value\n\n trainer = Engine(train_step)\n return trainer\n\n\ndef create_evaluator(config, model, loss_fn, device):\n use_amp = config.use_amp\n\n @torch.no_grad()\n def evaluate_step(engine, batch):\n model.eval()\n input_ids = batch[\"input_ids\"]\n attention_mask = batch[\"attention_mask\"]\n token_type_ids = batch[\"token_type_ids\"]\n labels = batch[\"label\"].view(-1, 1)\n\n if input_ids.device != device:\n input_ids = input_ids.to(device, non_blocking=True, dtype=torch.long)\n attention_mask = attention_mask.to(device, non_blocking=True, dtype=torch.long)\n token_type_ids = token_type_ids.to(device, non_blocking=True, dtype=torch.long)\n labels = labels.to(device, non_blocking=True, dtype=torch.float)\n\n with autocast(enabled=use_amp):\n outputs = model(input_ids, attention_mask, token_type_ids)\n loss = loss_fn(outputs, labels)\n\n loss_value = loss.item()\n engine.state.metrics = {\"eval_loss\": loss_value}\n return outputs, labels\n\n evaluator = Engine(evaluate_step)\n return evaluator\n\n\n# function for creating engines which will be used in main.py\n# any necessary arguments can be provided.\ndef create_trainers(config, model, optimizer, loss_fn, device) -> Tuple[Engine, Engine]:\n \"\"\"Create Engines for training and evaluation.\n\n Returns\n -------\n trainer, evaluator\n \"\"\"\n trainer = create_trainer(config, model, optimizer, loss_fn, device)\n evaluator = create_evaluator(config, model, loss_fn, device)\n return trainer, evaluator\n","sub_path":"templates/text_classification/trainers.py","file_name":"trainers.py","file_ext":"py","file_size_in_byte":2896,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"530796243","text":"\"\"\"Define support for Wyze Camera cameras\"\"\"\nimport asyncio\nimport logging\nimport hashlib\nfrom random import SystemRandom\nfrom enum import Enum\n\nfrom .wyzeapi.wyzeapi import WyzeApi\nfrom . import DOMAIN\n\n\nfrom haffmpeg.camera import CameraMjpeg\nfrom haffmpeg.tools import ImageFrame, IMAGE_JPEG\n\nfrom homeassistant.components.camera import SUPPORT_ON_OFF, SUPPORT_STREAM, Camera\nfrom homeassistant.components.ffmpeg import DATA_FFMPEG\nfrom homeassistant.const import ATTR_ATTRIBUTION\nfrom homeassistant.helpers.aiohttp_client import async_aiohttp_proxy_stream\n\n\n_LOGGER = logging.getLogger(__name__)\n\nATTR_HARDWARE_VERSION = \"hardware_version\"\nATTR_SERIAL = \"serial_number\"\nATTR_SOFTWARE_VERSION = \"software_version\"\n\nDEFAULT_ATTRIBUTION = \"Data provided by Wyze\"\nDEFAULT_FFMPEG_ARGUMENTS = \"-pred 1 -vcodec copy -fflags +genpts+discardcorrupt\"\nDEFAULT_FFMPEG_ARGUMENTS_IMAGE = \"-vframes 1\"\n\n_RND = SystemRandom()\n\nasync def async_setup_platform(hass, config, async_add_entities, discovery_info=None):\n \"\"\"Set up the Wyze camera platform.\"\"\"\n _LOGGER.debug(\"\"\"Creating new WyzeApi camera component\"\"\")\n async_add_entities(WyzeCamera(hass, camera) for camera in await hass.data[DOMAIN][\"wyzeapi_account\"].async_list_camera())\n\n\nclass WyzeCamera(Camera):\n \"\"\"Define a Wyze Camera.\"\"\"\n\n def __init__(self, hass, camera):\n \"\"\"Initialize.\"\"\"\n super().__init__()\n\n self._camera = camera\n self._name = camera._friendly_name\n self._state = camera._state\n self._ssid = camera._ssid\n self._local_ip = camera._ip\n self._ssid = camera._ssid\n self._device_mac = camera._device_mac\n self._device_model = camera._device_model\n self._username = \"admin\"\n self._password = \"admin\"\n self.is_streaming = False\n self._ffmpeg = hass.data[DATA_FFMPEG]\n self._ffmpeg_arguments = DEFAULT_FFMPEG_ARGUMENTS\n self._ffmpeg_arguments_image = DEFAULT_FFMPEG_ARGUMENTS_IMAGE\n self._ffmpeg_image_frame = ImageFrame(self._ffmpeg.binary, loop=hass.loop)\n self._ffmpeg_stream = CameraMjpeg(self._ffmpeg.binary, loop=hass.loop)\n self._last_image = None\n self._last_image_url = None\n self._local_rtsp_port = camera._local_rtsp_port\n self._stream_url = f\"rtsp://{self._username}:{self._password}@{self._local_ip}:{self._local_rtsp_port}/live\"\n self.access_tokens = self.update_tokens()\n\n\n def update_tokens(self):\n \"\"\"Update the used token.\"\"\"\n token = hashlib.sha256(_RND.getrandbits(256).to_bytes(32, \"little\")).hexdigest()\n return token\n\n @property\n def brand(self):\n \"\"\"Return the camera brand.\"\"\"\n return \"Wyze\"\n\n @property\n def device_state_attributes(self):\n \"\"\"Return the state attributes.\"\"\"\n attributes = {\n ATTR_ATTRIBUTION: DEFAULT_ATTRIBUTION,\n ATTR_HARDWARE_VERSION: self._device_model,\n ATTR_SERIAL: self._device_mac,\n ATTR_SOFTWARE_VERSION: \"1.0.0\",\n \"access_token\": self.access_tokens,\n }\n return attributes\n\n @property\n def model(self):\n \"\"\"Return the name of this camera.\"\"\"\n return self._device_model\n\n @property\n def name(self):\n \"\"\"Return the name of this camera.\"\"\"\n return self._name\n\n @property\n def should_poll(self):\n \"\"\"Return False, updates are controlled via the hub.\"\"\"\n return False\n\n @property\n def supported_features(self):\n \"\"\"Return supported features.\"\"\"\n return SUPPORT_ON_OFF | SUPPORT_STREAM\n\n @property\n def unique_id(self):\n \"\"\"Return a unique ID.\"\"\"\n return self._device_mac\n\n async def async_camera_image(self):\n \"\"\"Return a frame from the camera stream.\"\"\"\n self._last_image = await asyncio.shield(self._ffmpeg_image_frame.get_image(self._stream_url, output_format=IMAGE_JPEG,extra_cmd=self._ffmpeg_arguments_image,)\n )\n _LOGGER.debug(\"Camera %s source image: %s\", self._device_mac, self._stream_url)\n return self._last_image\n\n async def async_disable_motion_detection(self):\n \"\"\"Disable doorbell's motion detection\"\"\"\n\n async def async_enable_motion_detection(self):\n \"\"\"Enable doorbell's motion detection\"\"\"\n\n async def async_turn_off(self):\n await self._camera.async_turn_off()\n\n async def async_turn_on(self):\n \"\"\"Turn on the RTSP stream.\"\"\"\n await self._camera.async_turn_on()\n\n async def stream_source(self):\n \"\"\"Return the stream source.\"\"\"\n if self._local_rtsp_port:\n rtsp_stream_source = (\n f\"rtsp://{self._username}:{self._password}@\"\n f\"{self._local_ip}:{self._local_rtsp_port}/live\"\n )\n _LOGGER.debug(\"Camera %s source stream: %s\", self._device_mac, rtsp_stream_source)\n self._rtsp_stream = rtsp_stream_source\n self.is_streaming = True\n return rtsp_stream_source\n return None\n\n\n async def handle_async_mjpeg_stream(self, request):\n \"\"\"Generate an HTTP MJPEG stream from the camera.\"\"\"\n #if not self._stream_url:\n #return await self.async_camera_image()\n\n await self._ffmpeg_stream.open_camera(self._stream_url, extra_cmd=self._ffmpeg_arguments)\n\n try:\n stream_reader = await self._ffmpeg_stream.get_reader()\n _LOGGER.debug(\"Camera %s mjpg stream: %s\", self._device_mac, rtsp_stream_source)\n return await async_aiohttp_proxy_stream(self.hass,request,stream_reader,self._ffmpeg.ffmpeg_stream_content_type,)\n finally:\n await self._ffmpeg_stream.close()","sub_path":"custom_components/wyzeapi/camera.py","file_name":"camera.py","file_ext":"py","file_size_in_byte":5670,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"646041344","text":"import pandas as pd\nimport pickle\nfrom matplotlib import pyplot as plt\nimport statsmodels.api as sm\nimport numpy as np\nimport math\nfrom ft_platform.utils import utils_calculation as uc\nfrom utils_func import query_data\n\n\ndata_pat = 'E:/FT_Users/LihaiYang/Files/factor_comb_data/fac_meaning/all_cluster'\nnew_f = pd.read_pickle(data_pat + '/fac_reshape.pkl')\n\n\ntrade_days = query_data.get_trade_days('d', 'SSE', '2017-01-01', '2021-02-28') # 记得修改\ntrade_days = [pd.to_datetime(i) for i in trade_days]\n# 每天的截面回归估计因子暴露\ncoef = {}\nR_sq = {}\nfor date in trade_days:\n sub_data = new_f.loc[date,]\n model = sm.OLS(sub_data.iloc[:, -1], sm.add_constant(sub_data.iloc[:, 0:-1]), missing='drop').fit()\n coef[date] = model.params\n R_sq[date] = model.rsquared_adj\n print(date)\ncoef_param = pd.concat(coef.values(), axis=1, keys=coef.keys())\ncoef_param = pd.DataFrame(coef_param.values.T, index=coef_param.columns, columns=coef_param.index) # 转置\nr2_param = pd.DataFrame(R_sq.values(), index=R_sq.keys(), columns=['R_square_adj'])\ncoef_param = uc.ts_delay(coef_param, 2) # 2天后才能用估计出的参数\nr2_param = uc.ts_delay(r2_param, 2) # 2天后才能用估计出的参数\ncoef_param = coef_param.groupby(coef_param.index.strftime('%Y-%m')).mean() # 每个月更新一次权重\nr2_param = r2_param.groupby(r2_param.index.strftime('%Y-%m')).mean() # 每个月更新一次权重\nplt.figure()\nplt.plot(r2_param.index, r2_param['R_square_adj'])\nplt.show()\ncoef_param.to_csv(data_pat + '/linear_regress_m/coef_param.csv',encoding='gbk')\nr2_param.to_csv(data_pat + '/linear_regress_m/r2_param.csv',encoding='gbk')\n\n\n# 画出因子暴露时间序列\nle = np.size(coef_param, 0)\nla = math.ceil(4*(le/100)**(2/9))\nfor coef_name in coef_param.columns:\n plt.figure()\n plt.plot(coef_param.index, coef_param[coef_name])\n plt.title(coef_name, fontproperties=\"SimSun\")\n plt.show()\n model = sm.OLS(coef_param[coef_name], [1 for i in range(le)]).fit(cov_type='HAC', cov_kwds={'maxlags': la})\n print(model.summary()) # 有些因子的系数显著为负?多因子回归的影响\n\n\"\"\"\n# 求收益率预测值(过去一个月估计的权重用于下个月)\ncoef_param = pd.DataFrame({i: uc.ts_delay(coef_param, 1).loc[i.strftime('%Y-%m')] for i in trade_days}).T # 整个月都用上个月算出的权重\nfac = {}\ncoef_param3 = pd.concat([new_f.reset_index(level=1).iloc[:, 0], coef_param], axis=1)\ncoef_param3 = coef_param3.set_index([coef_param3.index, 'level_1'])\npred2 = (coef_param3 * new_f).sum(axis=1, min_count=2) # 至少包含一个变量和一个const\npred2 = pred2.unstack()\npred2 = pred2.dropna(how='all')\nfac['1m_1m'] = pred2\nf = open(data_pat + '/linear_regress_m/1m_1m/fac.pkl', 'wb') # 记得修改\npickle.dump(fac, f, -1)\nf.close()\n\"\"\"\n\"\"\"\n# 求收益率预测值(过去三个月估计的权重用于下个月)\ncoef_param = pd.DataFrame({i: uc.ts_delay(coef_param.rolling(3).mean(), 1).loc[i.strftime('%Y-%m')] for i in trade_days}).T # 整个月都用前三个月算出的权重\nfac = {}\ncoef_param3 = pd.concat([new_f.reset_index(level=1).iloc[:, 0], coef_param], axis=1)\ncoef_param3 = coef_param3.set_index([coef_param3.index, 'level_1'])\npred2 = (coef_param3 * new_f).sum(axis=1, min_count=2) # 至少包含一个变量和一个const\npred2 = pred2.unstack()\npred2 = pred2.dropna(how='all')\nfac['3m_1m'] = pred2\nf = open(data_pat + '/linear_regress_m/3m_1m/fac.pkl', 'wb') # 记得修改\npickle.dump(fac, f, -1)\nf.close()\n\"\"\"\n\"\"\"\n# 求收益率预测值(过去六个月估计的权重用于下个月)\ncoef_param = pd.DataFrame({i: uc.ts_delay(coef_param.rolling(6).mean(), 1).loc[i.strftime('%Y-%m')] for i in trade_days}).T # 整个月都用前六个月算出的权重\nfac = {}\ncoef_param3 = pd.concat([new_f.reset_index(level=1).iloc[:, 0], coef_param], axis=1)\ncoef_param3 = coef_param3.set_index([coef_param3.index, 'level_1'])\npred2 = (coef_param3 * new_f).sum(axis=1, min_count=2) # 至少包含一个变量和一个const\npred2 = pred2.unstack()\npred2 = pred2.dropna(how='all')\nfac['6m_1m'] = pred2\nf = open(data_pat + '/linear_regress_m/6m_1m/fac.pkl', 'wb') # 记得修改\npickle.dump(fac, f, -1)\nf.close()\n\"\"\"\n# \"\"\"\n# 求收益率预测值(过去十二个月估计的权重用于下个月)\ncoef_param = pd.DataFrame({i: uc.ts_delay(coef_param.rolling(12).mean(), 1).loc[i.strftime('%Y-%m')] for i in trade_days}).T # 整个月都用前十二个月算出的权重\nfac = {}\ncoef_param3 = pd.concat([new_f.reset_index(level=1).iloc[:, 0], coef_param], axis=1)\ncoef_param3 = coef_param3.set_index([coef_param3.index, 'level_1'])\npred2 = (coef_param3 * new_f).sum(axis=1, min_count=2) # 至少包含一个变量和一个const\npred2 = pred2.unstack()\npred2 = pred2.dropna(how='all')\nfac['12m_1m'] = pred2\nf = open(data_pat + '/linear_regress_m/12m_1m/fac.pkl', 'wb') # 记得修改\npickle.dump(fac, f, -1)\nf.close()\n# \"\"\"\n","sub_path":"fac_meaning_cluster_linearregress_m.py","file_name":"fac_meaning_cluster_linearregress_m.py","file_ext":"py","file_size_in_byte":4905,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"13327857","text":"import tkinter as tk\nimport os\n\n\nclass Launcher(tk.Frame):\n def __init__(self, master=None):\n super().__init__(master)\n self.master = master\n self.pack()\n self.create_widgets()\n\n def create_widgets(self):\n self.modButton = tk.Button(self)\n self.modButton['text'] = \"Modded\"\n self.modButton['font'] = (\"Purisa\", 20)\n self.modButton['pady'] = 12\n self.modButton['width'] = 15\n self.modButton['activebackground'] = '#afafaf'\n self.modButton['bd'] = 1\n self.modButton['highlightcolor'] = '#afafaf'\n self.modButton['relief'] = 'solid'\n self.modButton['command'] = self.modButtonClick\n self.modButton.pack()\n\n self.vanillaButton = tk.Button(self)\n self.vanillaButton['text'] = \"Vanilla\"\n self.vanillaButton['font'] = (\"Purisa\", 20)\n self.vanillaButton['pady'] = 12\n self.vanillaButton['width'] = 15\n self.vanillaButton['activebackground'] = '#afafaf'\n self.vanillaButton['bd'] = 1\n self.vanillaButton['highlightcolor'] = '#afafaf'\n self.vanillaButton['relief'] = 'solid'\n self.vanillaButton['command'] = self.vanillaButtonClick\n self.vanillaButton.pack()\n\n self.quitButton = tk.Button(self)\n self.quitButton['text'] = \"QUIT\"\n self.quitButton['font'] = (\"Purisa\", 20)\n self.quitButton['pady'] = 12\n self.quitButton['width'] = 15\n self.quitButton['activebackground'] = '#afafaf'\n self.quitButton['bd'] = 1\n self.quitButton['highlightcolor'] = '#afafaf'\n self.quitButton['relief'] = 'solid'\n self.quitButton['command'] = self.quitButtonClick\n self.quitButton.pack()\n\n def modButtonClick(self):\n self.master.destroy()\n os.system('picomc instance mod launch')\n\n def vanillaButtonClick(self):\n self.master.destroy()\n os.system('picomc instance instancename launch')\n\n def quitButtonClick(self):\n self.master.destroy()\n\n\nroot = tk.Tk()\nroot.title('Minecraft Launcher')\nroot.iconbitmap('C:\\\\Users\\\\matus\\\\OneDrive\\\\Počítač\\\\all\\\\icons\\\\MineCraft.ico')\napp = Launcher(master=root)\napp.mainloop()\n","sub_path":"projects/other/minecraftLauncher.py","file_name":"minecraftLauncher.py","file_ext":"py","file_size_in_byte":2193,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"26098063","text":"from wagglevision.datasets import CloudDataset\nimport matplotlib.pyplot as plt\n\n# test datasets\ntrain_data = CloudDataset('data', image_set='train', download=True)\nval_data = CloudDataset('data', image_set='val', download=False)\n\n# show a few examples\nfig, ax = plt.subplots(ncols=3, nrows=4, sharex=True, sharey=True)\n\nfor i in range(4):\n image, label = train_data[i]\n ax[i, 0].imshow(image)\n ax[i, 1].imshow(label, cmap='jet', interpolation='none')\n ax[i, 2].imshow(image)\n ax[i, 2].imshow(label, cmap='jet', interpolation='none', alpha=0.5)\n\nplt.show()\n","sub_path":"dataset_example.py","file_name":"dataset_example.py","file_ext":"py","file_size_in_byte":571,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"513767281","text":"#\n# Copyright (C) 2001-2009,2011-2016 Andreas Lang-Nevyjel, init.at\n#\n# this file is part of python-modules-base\n#\n# Send feedback to: \n#\n# This program is free software; you can redistribute it and/or modify\n# it under the terms of the GNU General Public License Version 3 as\n# published by the Free Software Foundation.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with this program; if not, write to the Free Software\n# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA\n#\n\n\"\"\" checks installed servers on system \"\"\"\n\nfrom __future__ import unicode_literals, print_function\n\nimport datetime\nimport os\nimport sys\nimport time\n\nfrom initat.icsw.icsw_tools import ICSW_DEBUG_MODE\nfrom initat.tools import logging_tools\nfrom . import container\nfrom . import instance\nfrom . import transition\nfrom .constants import STATE_DICT, LIC_STATE_DICT, CONF_STATE_DICT\nfrom .tools import query_local_meta_server\nfrom .. import icsw_logging\n\n\ndef show_form_list(form_list):\n # color strings (green / blue / red / normal)\n d_map = {\n \"ok\": \"\\033[1;32m{}\\033[m\\017\",\n \"warning\": \"\\033[1;34m{}\\033[m\\017\",\n \"critical\": \"\\033[1;31m{}\\033[m\\017\",\n }\n print(datetime.datetime.now().strftime(\"%a, %d. %b %Y %d %H:%M:%S\"))\n form_list.display_attribute_map = d_map\n print(unicode(form_list))\n\n\ndef _state_overview(opt_ns, result):\n _instances = result.xpath(\".//ns:instances/ns:instance\")\n print(\"instances reported: {}\".format(logging_tools.get_plural(\"instance\", len(_instances))))\n for _inst in _instances:\n _states = []\n last_states = None\n for _src_state in result.xpath(\".//ns:state\", start_el=_inst):\n # todo: remove duplicates\n _states.append(_src_state)\n _actions = result.xpath(\".//ns:action\", start_el=_inst)\n print(\n \"{:<30s}, target state is {:<20s} [{}, {}], {} / {} in the last 24 hours\".format(\n _inst.get(\"name\"),\n {\n 0: \"stopped\",\n 1: \"started\"\n }[int(_inst.attrib[\"target_state\"])],\n \"active\" if int(_inst.attrib[\"active\"]) else \"inactive\",\n \"ignored\" if int(_inst.attrib[\"ignore\"]) else \"watched\",\n logging_tools.get_plural(\"state\", len(_states)),\n logging_tools.get_plural(\"action\", len(_actions)),\n )\n )\n if opt_ns.state:\n for _cur_s in _states:\n print(\n \" {} pstate={}, cstate={}, license_state={} [{}]\".format(\n time.ctime(int(_cur_s.attrib[\"created\"])),\n STATE_DICT[int(_cur_s.attrib[\"pstate\"])],\n CONF_STATE_DICT[int(_cur_s.attrib[\"cstate\"])],\n LIC_STATE_DICT[int(_cur_s.attrib[\"license_state\"])],\n _cur_s.attrib[\"proc_info_str\"],\n )\n )\n if opt_ns.action:\n for _cur_a in _actions:\n print(\n \" {} action={}, runtime={} [{} / {}]\".format(\n time.ctime(int(_cur_a.attrib[\"created\"])),\n _cur_a.attrib[\"action\"],\n _cur_a.attrib[\"runtime\"],\n _cur_a.attrib[\"finished\"],\n _cur_a.attrib[\"success\"],\n )\n )\n\n\ndef version_command(opt_ns):\n from initat.cluster.backbone.models import ICSWVersion, VERSION_NAME_LIST\n from django.conf import settings\n print(\"ICSW Version info\")\n _vers = {\n \"db\": ICSWVersion.get_latest_db_dict(),\n \"sys\": settings.ICSW_VERSION_DICT,\n }\n for _key in sorted(_vers.keys()):\n if _vers[_key]:\n for _vn in VERSION_NAME_LIST:\n print(\n \" {:<3} {:<10}: {}\".format(\n _key,\n _vn,\n _vers[_key][_vn],\n )\n )\n else:\n print(\n \"* {:<3} {:<10}: missing\".format(\n _key,\n \"\",\n )\n )\n\n\ndef main(opt_ns):\n log_com = icsw_logging.get_logger(\"service\", opt_ns, all=True if opt_ns.childcom in [\"state\"] else False)\n if os.getuid():\n log_com(\"Not running as root, information may be incomplete, disabling display of memory\", logging_tools.LOG_LEVEL_ERROR)\n opt_ns.memory = False\n inst_xml = instance.InstanceXML(log_com)\n cur_c = container.ServiceContainer(log_com)\n META_COMS = [\"disable\", \"enable\", \"ignore\", \"monitor\", \"overview\"]\n if opt_ns.childcom == \"version\":\n version_command(opt_ns)\n elif opt_ns.childcom == \"status\":\n if ICSW_DEBUG_MODE:\n # activatge debug mode\n from django.conf import settings\n settings.DEBUG = True\n if opt_ns.interactive:\n from . import console\n console.main(opt_ns, cur_c, inst_xml)\n else:\n cur_c.check_system(opt_ns, inst_xml)\n if ICSW_DEBUG_MODE:\n from django.db import connection\n _time = 0.0\n for line in connection.queries:\n print(\"{} : {}\".format(line[\"time\"], line[\"sql\"]))\n _time += float(line[\"time\"])\n print()\n print(\"performed {:d} queries in {:.3f}\".format(len(connection.queries), _time))\n print()\n form_list = cur_c.instance_to_form_list(opt_ns, inst_xml.tree)\n show_form_list(form_list)\n _res = inst_xml.tree.findall(\".//result\")\n if len(_res) == 1:\n # set return state to single-state result\n _state = int(_res[0].find(\"process_state_info\").get(\"state\"))\n sys.exit(_state)\n elif opt_ns.childcom in [\"start\", \"stop\", \"restart\", \"debug\", \"reload\"]:\n if opt_ns.childcom == \"debug\":\n debug_args = opt_ns.debug_args\n if opt_ns.debug_flag:\n debug_args.append(\"--debug-flag\")\n else:\n debug_args = None\n cur_t = transition.ServiceTransition(\n opt_ns.childcom,\n opt_ns.service,\n cur_c,\n inst_xml,\n log_com,\n debug_args=debug_args,\n )\n while True:\n _left = cur_t.step(cur_c)\n if _left:\n time.sleep(1)\n else:\n break\n elif opt_ns.childcom in META_COMS:\n # contact meta-server at localhost\n _result = query_local_meta_server(inst_xml, opt_ns.childcom, services=opt_ns.service)\n if _result is None:\n log_com(\"Got no result from meta-server\")\n sys.exit(1)\n if _result.get_log_tuple()[1] > logging_tools.LOG_LEVEL_WARN:\n log_com(*_result.get_log_tuple())\n sys.exit(1)\n if opt_ns.childcom == \"overview\":\n _state_overview(opt_ns, _result)\n elif opt_ns.childcom in [\"disable\", \"enable\", \"ignore\", \"monitor\"]:\n log_com(*_result.get_log_tuple())\n else:\n log_com(\n \"unknown childcom '{}'\".format(\n opt_ns.childcom\n ),\n logging_tools.LOG_LEVEL_ERROR\n )\n","sub_path":"initat/icsw/service/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":7613,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"462270898","text":"from PyQt5.QtCore import *\nfrom PyDesignModel.PyDesignIcons import *\nfrom PyDesignData.PyDesignObject import *\nfrom PyDesignModel.PyDesignModelItem import PyDesignModelItem\nfrom PyDesignModel.PyDesignAnalysisItem import PyDesignAnalysisItem\n\n\n__author__ = 'magnus'\n\n\nclass PyDesignReportsItem(PyDesignModelItem):\n def __init__(self, py_design_document, parent):\n PyDesignModelItem.__init__(self, parent, parent.model)\n self._data_object = py_design_document\n self._data_dict[PyDesignNamedObject.NAME] = self.data_name\n self._icon = get_icon(\"reports\")\n self._type = \"PyDesignReportsModelItem\"\n for i in range(0, py_design_document.report_count):\n analysis = py_design_document.get_report(i)\n self._children.append(PyDesignReportItem(self, analysis))\n\n def data_name(self, int_role):\n if int_role == Qt.DisplayRole:\n return \"Reports\"\n elif int_role == Qt.DecorationRole:\n return self._icon\n else:\n return None\n","sub_path":"PyDesignModel/PyDesignReportsItem.py","file_name":"PyDesignReportsItem.py","file_ext":"py","file_size_in_byte":1033,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"53752368","text":"import numpy as np\nimport torch\nimport torch.nn.functional as F\n\nfrom torch import nn\n\nclass StarSpace(nn.Module):\n def __init__(self, d_embed, vocabulary, input_embedding = None, k_neg = 3, max_norm=20):\n super(StarSpace, self).__init__()\n\n self.n_input = len(vocabulary)\n self.vocab = vocabulary\n self.k_neg = k_neg\n self.d_embed = d_embed\n \n if input_embedding is None:\n self.embeddings = nn.Embedding(self.n_input, self.d_embed, max_norm=max_norm)\n else:\n self.embeddings = input_embedding\n \n if torch.cuda.is_available():\n self.device = 'cuda'\n else:\n self.device = 'cpu'\n \n self.embeddings.to(self.device)\n \n def get_positions(self,train):\n \"\"\" Get the positions of every word. Return list of list of tensors \"\"\"\n train_pos = []\n for i,doc in enumerate(train): \n # For each document\n sentences = doc.split('\\t')\n doc_positions = []\n for s in sentences: \n # For each sentence\n positions = []\n s = s.split()\n \n for tok in s: \n # For each word\n try:\n positions.append(self.vocab[tok])\n except KeyError:\n pass\n \n #TEMPORARY FIX- a totally neutral but not too common word\n if len(positions) < 1:\n positions.append(self.vocab['able'])\n \n doc_positions.append(torch.LongTensor(positions))\n\n train_pos.append(doc_positions)\n \n return np.array(train_pos)\n \n def embed_doc(self,d,normalize=True):\n \"\"\" Takes a tensor of positions and embeds it \"\"\"\n output = torch.sum(self.embeddings(d),dim=0)\n #output[output != output] = 0 #necessary for documents with all unseen vocabs\n \n output.to(self.device)\n \n if normalize:\n output = output / output.norm()\n return output\n \n def forward(self, docs): \n l_batch = []\n r_batch = []\n neg_batch = []\n \n for i,s in enumerate(docs):\n #Positive similarity between sentences\n if (type(s) == str) or (len(s) <= 1): #only one sentence in s\n a = s[0]\n b = s[0]\n else:\n choices = np.random.choice(len(s), 2, False)\n a = s[choices[0]]\n b = s[choices[1]]\n\n a_emb = self.embed_doc(a)\n b_emb = self.embed_doc(b)\n\n l_batch.append(a_emb)\n r_batch.append(b_emb)\n\n #Negative similarity\n negs = []\n num_negs = 0\n while num_negs < self.k_neg:\n index = np.random.choice(len(docs))\n \n if index != i: #if it's not from the same document\n neg_doc = docs[index]\n neg_choice = np.random.choice(len(neg_doc),1)[0]\n \n c = neg_doc[neg_choice]\n c_emb = self.embed_doc(c)\n \n negs.append(c_emb)\n num_negs += 1\n\n neg_batch.append(torch.stack(negs))\n \n l_batch = torch.stack(l_batch)\n r_batch = torch.stack(r_batch)\n neg_batch = torch.stack(neg_batch)\n \n l_batch = l_batch.unsqueeze(1)\n r_batch = r_batch.unsqueeze(1)\n \n return l_batch, r_batch, neg_batch\n","sub_path":"src/model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":3663,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"202149779","text":"\"\"\"\nA more user-friendly compilation of any\npotentially useful function, along with a\nfew more tools such as frequency comparisons, IC,\netc.\nUses cipher_core, cipher_scoring, cipher_solvers,\ndecipher\nThis is a work in progress.\n\"\"\"\n\nfrom math import log10\nimport cipher_core as core\nimport cipher_scoring as scoring\nimport cipher_solvers as solvers\nimport decipher\n\n\n# The main menu\nTITLE_MAIN = \"Working Aid 2.0 Main Menu\"\nCONTENTS_MAIN = [\n \"General analysis of message\",\n \"Convert a binary message to English characters\",\n \"Space an already solved message\",\n \"Change file\",\n \"Exit Program\",\n ]\n\n# Opens some files that may be used later.\nmonogram_d = core.ngram_create(r\"Text_Files\\monogram.txt\")\nquadgram_d = core.ngram_create(r\"Text_Files\\quadgram.txt\")\ndiction = core.diction_create(r\"Text_Files\\dictionary_with_counts.txt\")\ndiction_di = core.diction_create(r\"Text_Files\\dictionary_diword.txt\")\n\n# Always outputs the file contents of the file that has been opened.\nif input(\"Would you like to load the cipher from a file? Y/N \").lower().startswith(\"y\") is True:\n raw_str = core.file_input(None, False)\n print (\"\\nFile contents:\")\n print (raw_str)\nelse:\n raw_str = input(\"Copy your string here: \")\ntest_str = core.eng_filter(raw_str, False, True)\n\n\nwhile True:\n # This creates some initial variables about the basic file, such as length, spaces, etc.\n upper_alph = [chr(x+65) for x in range(0,26)]\n lower_alph = [chr(x+97) for x in range(0,26)]\n file_length = len(raw_str)\n file_length_char = len([x for x in raw_str if x!=\" \"])\n upper_case_length = len([x for x in raw_str if x in upper_alph])\n lower_case_length = len([x for x in raw_str if x in lower_alph])\n\n # This creates the scoring variables to tell the user about the file's match to English.\n ic_value = scoring.index_c(test_str, True)\n monogram_chi = core.dec_place(scoring.chi_score(test_str, monogram_d), 3, True)\n base_score = core.dec_place(scoring.ngram_score(test_str, quadgram_d, 4), 3, True)\n\n # This estimated score is to help the user to compare with the base score, which is meaningless without comparison.\n est_score = 0\n for k in quadgram_d:\n est_score += file_length_char*quadgram_d.get(k,1e-99)*log10(quadgram_d.get(k,1e-99))\n est_score = core.dec_place(est_score, 3, True)\n \n c_type = scoring.recognise(test_str, monogram_d)\n\n menu_main = core.menu(TITLE_MAIN, CONTENTS_MAIN, 1)\n\n\n if menu_main == 1:\n # This prints off some of the initial variables created above.\n print (\"\\n\\nInitial Analysis:\\n\")\n print (\"File Length (with spaces): %s\" % (file_length))\n print (\"File Length (no spaces): %s\" % (file_length_char))\n print (\"Upper Case Characters: %s\" % (upper_case_length))\n print (\"Lower Case Characters: %s\" % (lower_case_length))\n\n print (\"\\nIndex of Coincedence: %s\" % (ic_value))\n print (\"Chi-Squared for Monogram Frequencies: %s\" % (monogram_chi))\n print (\"Relative score from Quadgram Frequencies: %s\" % (base_score))\n print (\"Expected Relative score from Quadgram Frequencies: %s\" % (est_score))\n\n print (\"\\nLetter Frequency Comparisons:\")\n print (\"=== Letter === Raw Text === Expected ===\")\n\n letters = list(monogram_d.keys())\n letters.sort()\n for char in letters:\n print (\" | %s: | %s | %s |\" % (char,core.dec_place(test_str.count(char)/len(test_str),4,True),core.dec_place(monogram_d.get(char),4,True)))\n\n print (\"\\nGeneral Estimated Cipher Type: %s\" % (c_type))\n\n elif menu_main == 2:\n new_str = \"\"\n\n # This decides on the number of bits per character.\n bit_lengths = [str(x) for x in range (5,17) if file_length%x == 0]\n bit_len = 0\n while bit_len not in bit_lengths:\n print (\"Valid bit lengths: %s\" % (\", \".join(bit_lengths)))\n bit_len = input(\"Please enter the number of bits per letter (5-16): \")\n\n bit_len = int(bit_len)\n\n # This converts the actual text.\n for bit in range (0, file_length, bit_len):\n new_str += chr((int(raw_str[bit:bit+bit_len],2)%26)+65)\n\n print (\"\\nConverted string:\")\n print (new_str)\n\n if input(\"Would you like to save this converted string to a new file? Y/N \").lower().startswith(\"y\") is True:\n core.file_output(new_str, None)\n print (\"Done!\")\n\n elif menu_main == 3:\n # This applies the word_spacing function from cipher_scoring.\n spaced_str = scoring.word_spacing(raw_str, diction, diction_di, False, 80)\n print (\"\\nSpaced string:\")\n print (spaced_str)\n\n if input(\"Would you like to save this converted string to a new file? Y/N \").lower().startswith(\"y\") is True:\n core.file_output(spaced_str, None)\n print (\"Done!\")\n\n elif menu_main == 4:\n # This changes the file, so that if you had, say, written to file with the binary conversion you could re-open it.\n if input(\"Would you like to load the cipher from a file? Y/N \").lower().startswith(\"y\") is True:\n raw_str = core.file_input(None, False)\n print (\"\\nFile contents:\")\n print (raw_str)\n else:\n raw_str = input(\"Copy your string here: \")\n test_str = core.eng_filter(raw_str, False, True)\n\n elif menu_main == 5:\n # Exits the program.\n break\n \n\n\n\n\n\n\n\n\n\n\n\n\n","sub_path":"Working Aid 2.0.py","file_name":"Working Aid 2.0.py","file_ext":"py","file_size_in_byte":5454,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"480346558","text":"import logging\nimport os\nimport sqlite3\nimport sys\n\nfrom flask import Flask, json, render_template, request, url_for, redirect, flash, has_request_context, session\nfrom logging.config import dictConfig\n\n\n# Function to get a database connection.\n# This function connects to database with the name `database.db`\ndef get_db_connection():\n if 'dbstate' not in session:\n session['dbstate'] = ''\n if os.path.isfile('database.db') is False:\n session['dbstate'] = '\\nDatabase is not initialized!'\n app.logger.error('Database is not initialized!')\n return False\n session['dbstate'] = 'Database initialized'\n connection = sqlite3.connect('database.db')\n connection.row_factory = sqlite3.Row\n if 'dbconnections' in session:\n session['dbconnections'] += 1\n else:\n session['dbconnections'] = 0\n return connection\n\n\n# Function to get a post using its ID\ndef get_post(post_id):\n connection = get_db_connection()\n if connection is False:\n return False\n try:\n post = connection.execute('SELECT * FROM posts WHERE id = ?',\n (post_id,)).fetchone()\n connection.close()\n return post\n except Exception:\n app.logger.exception('An exception occurred when getting a post. %s', session['dbstate'])\n return False\n\n\n# Function to get all posts\ndef get_all_posts():\n connection = get_db_connection()\n if connection is False:\n return False\n try:\n posts = connection.execute('SELECT * FROM posts').fetchall()\n connection.close()\n return posts\n except Exception as err:\n app.logger.exception('An exception occurred %s. %s', str(err), session['dbstate'])\n return False\n\n\n# Function to get number of posts in database\ndef get_posts_count():\n connection = get_db_connection()\n if connection is False:\n return False\n try:\n posts = connection.execute('SELECT COUNT(*) AS count FROM posts').fetchone()\n connection.close()\n return posts['count']\n except Exception:\n app.logger.exception('An exception occurred when getting posts count. %s', session['dbstate'])\n return False\n\n\nclass RequestFormatter(logging.Formatter):\n def format(self, record):\n record.url = ''\n record.remote_addr = ''\n if has_request_context():\n record.url = request.url\n record.remote_addr = request.remote_addr\n return super().format(record)\n\n\ndictConfig({\n 'version': 1,\n 'formatters': {\n 'default': {\n 'format': '[%(asctime)s] %(levelname)s in %(module)s: %(message)s',\n }\n },\n 'handlers': {\n 'wsgi': {\n 'class': 'logging.StreamHandler',\n 'stream': 'ext://sys.stderr',\n 'formatter': 'default'\n }\n },\n 'root': {\n 'level': os.getenv('LOGLEVEL', 'DEBUG'),\n 'handlers': ['wsgi']\n }\n})\napp = Flask(__name__)\napp.config['SECRET_KEY'] = 'your secret key'\n\n\n# Define the main route of the web application\n@app.route('/')\ndef index():\n posts = get_all_posts()\n if posts is not False:\n app.logger.info('Home page was read!')\n return render_template('index.html', posts=posts)\n else:\n return 'Database error when trying to get posts'\n\n\n# Define how each individual article is rendered \n# If the post ID is not found a 404 page is shown\n@app.route('/')\ndef post(post_id):\n post = get_post(post_id)\n if post is False:\n return 'Database error when trying to get post'\n if post is None:\n app.logger.error('Post with id %s does not exist', post_id)\n return render_template('404.html'), 404\n else:\n app.logger.info('Post with id %s with title \"%s\" was read!', post_id, post['title'])\n return render_template('post.html', post=post)\n\n\n# Define the About Us page\n@app.route('/about')\ndef about():\n app.logger.info('About page was read!')\n return render_template('about.html')\n\n\n# Define the post creation functionality \n@app.route('/create', methods=('GET', 'POST'))\ndef create():\n if request.method == 'POST':\n title = request.form['title']\n content = request.form['content']\n\n if not title:\n flash('Title is required!')\n else:\n try:\n connection = get_db_connection()\n connection.execute('INSERT INTO posts (title, content) VALUES (?, ?)',\n (title, content))\n connection.commit()\n connection.close()\n app.logger.info('A new article with title \"%s\" was created', title)\n return redirect(url_for('index'))\n except Exception as err:\n app.logger.exception('An exception occurred %s. %s', str(err), session['dbstate'])\n return 'Database error when trying to insert post'\n\n return render_template('create.html')\n\n\n@app.route('/healthz')\ndef healthcheck():\n connection = get_db_connection()\n count = get_posts_count()\n response = app.response_class(\n response=json.dumps({\"result\": \"ERROR - unhealthy\"}),\n status=500,\n mimetype='application/json'\n )\n if connection and count:\n app.logger.info('Healthz page showed a healthy status!')\n response = app.response_class(\n response=json.dumps({\"result\": \"OK - healthy\"}),\n status=200,\n mimetype='application/json'\n )\n else:\n app.logger.error('Healthz page showed an unhealthy status!')\n return response\n\n\n@app.route('/metrics')\ndef metrics():\n count = get_posts_count()\n if 'dbconnections' in session:\n dbconnections = session['dbconnections']\n else:\n dbconnections = 0\n app.logger.info('Metrics were read, showing %s connections', dbconnections)\n response = app.response_class(\n response=json.dumps({\"post_count\": count, \"db_connections\": dbconnections}),\n status=200,\n mimetype='application/json'\n )\n return response\n\n\n# start the application on port 3111\nif __name__ == \"__main__\":\n formatter = RequestFormatter(\n '[%(asctime)s] %(remote_addr)s - %(url)s %(levelname)s in %(module)s: %(message)s'\n )\n\n LOGLEVEL = os.environ.get('LOGLEVEL', 'DEBUG').upper()\n\n root = logging.getLogger()\n errHandler = logging.StreamHandler(sys.stdout)\n errHandler.setFormatter(formatter)\n fileHandler = logging.FileHandler(\"app.log\")\n fileHandler.setFormatter(formatter)\n root.addHandler(fileHandler)\n root.addHandler(errHandler)\n fileHandler.setLevel(LOGLEVEL)\n errHandler.setLevel(LOGLEVEL)\n app.run(host='0.0.0.0', port='3111')\n","sub_path":"project/techtrends/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":6714,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"331182601","text":"import unittest\nfrom final_proj import *\nimport json\nimport requests\n\n\n\nclass TestDatabase(unittest.TestCase):\n def setUp(self):\n # get_movies(\"horror\")\n create_csv(get_movies(\"horror\"))\n init_db(DBNAME)\n insert_csv_data(MOVIESCSV)\n insert_csv_data2(MOVIESCSV)\n\n\n def test_movies_table(self):\n conn = sqlite3.connect(DBNAME)\n cur = conn.cursor()\n\n sql = 'SELECT MovieName FROM Movies'\n results = cur.execute(sql)\n result_list = results.fetchall()\n self.assertIn(('Get Out',), result_list)\n self.assertEqual(len(result_list), 100)\n\n sql = '''\n SELECT MovieName, Genre, Rating,\n MovieLength, Director\n FROM Movies\n WHERE Genre=\" Classics\"\n ORDER BY Rating DESC\n '''\n results = cur.execute(sql)\n result_list = results.fetchall()\n #print(result_list)\n self.assertEqual(len(result_list), 17)\n self.assertEqual(result_list[0][3], 109.0)\n\n conn.close()\n\n def test_critics_table(self):\n conn = sqlite3.connect(DBNAME)\n cur = conn.cursor()\n\n sql = '''\n SELECT ReleaseYear\n FROM Critics\n WHERE NumberReviews=\"60\"\n '''\n results = cur.execute(sql)\n result_list = results.fetchall()\n self.assertIn((1965,), result_list)\n self.assertEqual(len(result_list), 2)\n\n sql = '''\n SELECT COUNT(*)\n FROM Critics\n '''\n results = cur.execute(sql)\n count = results.fetchone()[0]\n self.assertEqual(count, 100)\n\n conn.close()\n\nclass TestMoviesSearch(unittest.TestCase):\n def setUp(self):\n create_csv(get_movies(\"horror\"))\n init_db(DBNAME)\n insert_csv_data(MOVIESCSV)\n insert_csv_data2(MOVIESCSV)\n\n def test_command_search(self):\n results = movies_command('movies top=1')\n self.assertEqual(results[0][2], ' Drama')\n\n results = genres_command('genres number_reviews')\n self.assertEqual(results[1][2], 1984)\n\n\n results = compare_command('compare directors top=10')\n self.assertEqual(results[1][2], \" Trey Edward Shults\")\n self.assertEqual(len(results), 10)\n #\n results = studio_command('studios number_reviews top=5')\n self.assertEqual(results[3][1], 2009)\n self.assertEqual(results[1][0], \" Warner-Bros.-Pictures\")\n\n\nclass TestMovieInstance(unittest.TestCase):\n def setUp(self):\n create_csv(get_movies(\"horror\"))\n init_db(DBNAME)\n insert_csv_data(MOVIESCSV)\n insert_csv_data2(MOVIESCSV)\n\n def testConstructor(self):\n\n m1 = Movie(\"Delusional Love\", \"PG\", \"Romance\", \"Idaghe\", \"2018\", \"120\", \"Chicago-20th\", \"100\")\n self.assertEqual(m1.name, \"Delusional Love\")\n self.assertEqual(m1.rating, \"PG\")\n self.assertEqual(m1.genre, \"Romance\")\n self.assertEqual(m1.director, \"Idaghe\")\n self.assertEqual(m1.release_year, \"2018\")\n self.assertEqual(m1.runtime, \"120\")\n self.assertEqual(m1.studio, \"Chicago-20th\")\n self.assertEqual(m1.reviews, \"100\")\n\n\n def test_MovieString(self):\n movies = Movie(\"Delusional Love\", \"PG\", \"Romance\", \"Idaghe\", \"2018\", \"120\", \"Chicago-20th\", \"100\")\n self.assertEqual(movies.__str__(), \"Delusional Love rated PG (2018) directed by Idaghe\")\n\n\n\n\n\n\n\n\n\nunittest.main()\n","sub_path":"final_projtest.py","file_name":"final_projtest.py","file_ext":"py","file_size_in_byte":3441,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"435258670","text":"from platform_data.mod9.v2_client import RemeetingClient\nfrom platform_data.incontact.client import InContactClient\nfrom augment_dao.models import Chat\nfrom augment_dao.models import Message\nfrom augment_dao.models import Agent\nfrom augment_dao.model_files.Message import MessageSource\n\nfrom augment_dao.config import BaseConfig\nfrom augment_dao.config import CoreDevConfig\nfrom augment_dao.config import CoreStagingConfig\nfrom augment_dao.config import CoreProductionConfig\nfrom augment_dao.models import db\nfrom datetime import datetime\nfrom datetime import timedelta\nfrom uuid import UUID\nfrom uuid import uuid4\nfrom flask import Flask\n\nfrom dateutil import parser\nimport datetime\n\nINCONTACT_BASIC_TOKEN = 'TklDRWluQ29udGFjdERFVm9uZTRATklDRWluQ29udGFjdCBJbmMuOk5EYzJNVGMxWW1RdE1tRXhOaTAwT0RjeExUZzNNelF0TldRMk9UUTBNVEZqT0dGaQ=='\nINCONTACT_USERNAME = 'augmenthq@careerstep.com'\nINCONTACT_PASSWORD = 'e7t6m44#hRF$6l0T'\n\nREMEETING_BASIC_TOKEN = 'UDTp6BjJ6jyqAXcvTBRDHbNk'\n\n#WAV_JOB_MAP = [{\"contact_id\": \"42962523581\", \"recognition_id\": \"3f59\"}]\n#WAV_JOB_MAP = [{\"contact_id\": \"121260232666\", \"recognition_id\": \"a9cb\"}]\n#WAV_JOB_MAP = [{\"contact_id\": \"43172731223\", \"recognition_id\": \"d097\"}]\n#WAV_JOB_MAP = [{\"contact_id\": \"43861002079\", \"recognition_id\": \"66a5\"}]\n#WAV_JOB_MAP = [{\"contact_id\": \"43845729567\", \"recognition_id\": \"4862\"}]\n#WAV_JOB_MAP = [{\"contact_id\": \"43734584996\", \"recognition_id\": \"3166\"}]\n#WAV_JOB_MAP = [{\"contact_id\": \"43099937362\", \"recognition_id\": \"f381\"}]\n#WAV_JOB_MAP = [{\"contact_id\": \"43546832401\", \"recognition_id\": \"1aec\"}]\n#WAV_JOB_MAP = [{\"contact_id\": \"43845836456\", \"recognition_id\": \"2962\"}]\nWAV_JOB_MAP = [{\"contact_id\": \"43019760979\", \"recognition_id\": \"5728\"}]\n\n#SPEAKER_JOB_MAP = [{\"contact_id\": \"42962523581\", \"agent\": \"0\", \"customer\": \"1\"}]\n#SPEAKER_JOB_MAP = [{\"contact_id\": \"121260232666\", \"agent\": \"0\", \"customer\": \"1\"}]\n#SPEAKER_JOB_MAP = [{\"contact_id\": \"43172731223\", \"agent\": \"0\", \"customer\": \"1\"}]\n#SPEAKER_JOB_MAP = [{\"contact_id\": \"43861002079\", \"agent\": \"0\", \"customer\": \"1\"}]\n#SPEAKER_JOB_MAP = [{\"contact_id\": \"43845729567\", \"agent\": \"1\", \"customer\": \"0\"}]\n#SPEAKER_JOB_MAP = [{\"contact_id\": \"43734584996\", \"agent\": \"0\", \"customer\": \"1\"}]\n#SPEAKER_JOB_MAP = [{\"contact_id\": \"43099937362\", \"agent\": \"0\", \"customer\": \"1\"}]\n#SPEAKER_JOB_MAP = [{\"contact_id\": \"43546832401\", \"agent\": \"0\", \"customer\": \"1\"}]\n#SPEAKER_JOB_MAP = [{\"contact_id\": \"43845836456\", \"agent\": \"0\", \"customer\": \"1\"}]\nSPEAKER_JOB_MAP = [{\"contact_id\": \"43019760979\", \"agent\": \"0\", \"customer\": \"1\"}]\n\nSTART_TIMESTAMP_OVERRIDE = '2018-10-30T10:41:09.360Z'\nDEFAULT_CREATED_BY_UUID = \"00000000-0000-0000-0000-000000000001\"\nORG_UUID = \"00000000-0000-0000-0080-000000000000\"\n\n\n\ndef main():\n # initialize needed clients\n incontact_client = InContactClient(INCONTACT_BASIC_TOKEN, INCONTACT_USERNAME, INCONTACT_PASSWORD)\n incontact_client.auth(InContactClient.REPORTING_SCOPE)\n remeeting_client = RemeetingClient(REMEETING_BASIC_TOKEN)\n\n # Retrieve hash of wav file to job ids\n # Unneeded, static for now\n\n # cycle through the WAV FILES TO PROCESS\n for wjm in WAV_JOB_MAP:\n\n # retrieve the contact info for each contact\n print('working with contact id ' + wjm['contact_id'])\n call_results = incontact_client.get_single_call(wjm['contact_id'])\n for c in call_results:\n # print for visibility\n print(c.data)\n\n # retrieve the wav file recognition\n recognitions = remeeting_client.get_recognitions(wjm['recognition_id'])\n for r in recognitions:\n # print for visibility\n print(r.data)\n\n # build the chat, messages, and needed agent and persist\n create_chat(wjm, c, recognitions)\n\n\ndef create_chat(wjm, call_result, recognitions):\n init_db_session()\n\n created_by = UUID(DEFAULT_CREATED_BY_UUID)\n organization_id = UUID(ORG_UUID)\n\n # look for this agent\n agent = Agent.query \\\n .filter(\n Agent.platform_agent_id == call_result.platform_agent_id,\n Agent.organization_id == organization_id\n ).first()\n\n if agent is not None:\n print('agent found uuid ' + str(agent.id))\n else:\n # if not exists, make him\n print('agent is none')\n agent = Agent(\n organization_id=organization_id,\n created_by=created_by,\n id=uuid4(),\n platform_agent_id=call_result.platform_agent_id,\n display_name=call_result.first_name + ' ' + call_result.first_name,\n chat_name=None,\n email=None,\n include=True,\n widgets_include=True,\n )\n db.session.add(agent)\n db.session.commit()\n\n # create this chat\n if(START_TIMESTAMP_OVERRIDE == ''):\n start_timestamp = parser.parse(call_result.contact_start)\n else:\n start_timestamp = parser.parse(START_TIMESTAMP_OVERRIDE)\n\n end_timestamp = start_timestamp + datetime.timedelta(seconds=int(call_result.duration))\n c = Chat(\n organization_id=organization_id,\n created_by=created_by,\n id=uuid4(),\n agent_id=agent.id,\n platform_chat_id=call_result.platform_chat_id, # get the value from the API lookup\n pipeline_chat_id=call_result.platform_chat_id, # get the value from the API lookup\n day=start_timestamp.strftime(\"%Y-%m-%d\"), # get the value from the API lookup\n start_time=start_timestamp, # get the value from the API lookup\n end_time=end_timestamp,\n duration=call_result.duration\n )\n print('Chat creating with uuid ' + str(c.id))\n\n # create the messages\n messages = []\n auto_message_id = 0\n for r in recognitions:\n message_timestamp = start_timestamp + datetime.timedelta(seconds=float(r.timestamp))\n source = map_to_source(r.speaker_label, call_result.platform_chat_id)\n\n message = Message(\n organization_id=organization_id,\n created_by=created_by,\n id=uuid4(),\n chat_id=c.id,\n agent_id=agent.id,\n platform_message_id=call_result.platform_chat_id + ':' + str(auto_message_id), # auto_increment\n source=source, # get the value from the API lookup / mapping\n text=r.transcript, # get the value from the API lookup\n platform_chat_id=call_result.platform_chat_id, # get the value from the API lookup\n platform_timestamp=message_timestamp.isoformat() # get the value from the API lookup\n )\n\n auto_message_id = auto_message_id + 1\n\n print('Message creating with uuid ' + str(message.id) + ', timestamp ' + str(message_timestamp) +\n ', source ' + str(source) + ' utterance ' + r.transcript)\n\n # add to messages array\n messages.append(message)\n\n # set to newly created messages\n c.messages = messages\n\n db.session.add(c)\n\n db.session.commit()\n\n# example SPEAKER_JOB_MAP = [{\"contact_id\": \"42962523581\", \"agent\": \"0\", \"customer\": \"1\"}]\ndef map_to_source(speaker_label, contact_id):\n # default to customer in case they have background noise splitting audio into multi channel\n source_value = MessageSource.Customer\n\n for mapping in SPEAKER_JOB_MAP:\n if mapping['contact_id'] == contact_id:\n if mapping['agent'] == speaker_label:\n source_value = MessageSource.Agent\n if mapping['customer'] == speaker_label:\n source_value = MessageSource.Customer\n\n return source_value\n\n\ndef init_db_session():\n env_map = {\n 'dev': CoreDevConfig,\n 'staging': CoreStagingConfig,\n 'prod': CoreProductionConfig\n }\n\n conf = env_map['staging']\n\n app = Flask(__name__)\n app.config.from_object(conf)\n db.init_app(app)\n app.app_context().push()\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"cli/__insights_population_main__.py","file_name":"__insights_population_main__.py","file_ext":"py","file_size_in_byte":7833,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"612504822","text":"#-*- coding: utf-8 -*-\r\n\r\nfrom menu import CMenu as CCustomMenu\r\nfrom object import *\r\nimport gvars\r\n\r\nclass CMenu(CCustomMenu):\r\n\tm_Name\t=\"暂停\"\r\n\tm_SID\t= 2\r\n\tm_Items\t= {\r\n\t\t1\t:{\"text\":\"继续游戏\",},\r\n\t\t2\t:{\"text\":\"返回主菜单\"},\r\n\t\t3\t:{\"text\":\"退出游戏\"},\r\n\t\t4\t:{\"text\":\"编辑模式\"},\r\n\t}\r\n\r\n\tdef Click(self,i,dItemInfo):\r\n\t\tif i==1:\r\n\t\t\tgvars.Event.dispatch_event(\"resume\")\r\n\t\t\treturn\r\n\t\tif i==2:\r\n\t\t\treturn\r\n\t\tif i==3:\r\n\t\t\treturn\r\n\t\tif i==4:\r\n\t\t\tif gvars.Get(\"EditMode\"):\r\n\t\t\t\treturn\r\n\t\t\tgvars.Set(\"EditMode\",True)\r\n\t\t\tobj=gvars.Master\r\n\t\t\toCtrl=CreateCtrl(2)\r\n\t\t\tobj.stop()\r\n\t\t\tobj.do(oCtrl)\r\n\t\t\tgvars.Event.dispatch_event(\"resume\")\r\n\t\t\treturn\r\n\r\n","sub_path":"script/menu/m0002.py","file_name":"m0002.py","file_ext":"py","file_size_in_byte":667,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"107924172","text":"import random\nimport brain_games.cli\n\n\nDESCRIPTION = 'Answer \"yes\" if number even otherwise answer \"no\".'\n\n\ndef get_question_and_answer():\n number = random.randint(0, 1000)\n\n if number % 2 == 0:\n right_answer = 'yes'\n else:\n right_answer = 'no'\n\n question = str(number)\n return question, right_answer\n\n\ndef start_game():\n brain_games.cli.play(get_question_and_answer, DESCRIPTION)\n","sub_path":"brain_games/games/brain_even.py","file_name":"brain_even.py","file_ext":"py","file_size_in_byte":413,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"316464156","text":"import requests\n\ndef downImg():\n headers = {\n 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/66.0.3359.139 Safari/537.36'\n }\n curPage=1 #当前页\n maxPage=301\n while curPage<=maxPage:\n url = \"https://book.yunzhan365.com/byos/fsrl/files/mobile/%d.jpg\" % curPage\n r=requests.get(url=url,headers=headers)\n with open(\"D:\\\\image\\\\%d.jpg\"%curPage,\"wb\") as f:\n f.write(r.content)\n print(\"%d张图片已下载\"%curPage)\n curPage+=1\ndownImg()","sub_path":"张懿妈妈书/2.py","file_name":"2.py","file_ext":"py","file_size_in_byte":567,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"129836683","text":"import pandas as pd\nimport matplotlib.pyplot as plt\nimport numpy as np\nfrom scipy.interpolate import griddata\n\n# Import the data\nleaseBoundary = pd.read_csv('leaseBoundary.csv')\n\ninacP = pd.read_csv('Data\\\\inactiveWellPressures_clean.csv')\ninacP['Top perf, ft'] = inacP['Top perf, ft'].astype('float')\ninacP['Well ID'] = inacP['Well ID'].astype('str')\ninacP = inacP.set_index('Well ID')\n\n# Generate a mesh of points spanning the location of the wells. The values will be\n# interpolated at these points.\nlatBnds = [inacP['Latitude'].min(), inacP['Latitude'].max()]\nlonBnds = [inacP['Longitude'].min(), inacP['Longitude'].max()]\nlonGrid, latGrid = np.meshgrid(np.arange(*lonBnds, step=1e-4),\n\t\t\t\t\t\t\t np.arange(*latBnds, step=1e-4))\n\n# Interpolate on the mesh\nPi = griddata((inacP['Longitude'], inacP['Latitude']),\n\t\t\t inacP['Pressure @ datum, psi'], (lonGrid.flatten(),\n\t\t\t\t\t\t\t\t\t\t\t latGrid.flatten()))\n\n# Plot the interpolated pressure.\nfig, ax = plt.subplots()\n\ncont1 = ax.contourf(lonGrid, latGrid, Pi.reshape(latGrid.shape))\nplt.colorbar(cont1)\nax.scatter(inacP['Longitude'], inacP['Latitude'], color='black')\nax.set_aspect('equal', adjustable='box')\nax.plot(leaseBoundary.loc[:, 'longitude'], leaseBoundary.loc[:, 'latitude'], '-k')\n\n# Set title and axis' titles\nax.set_title('CRU Static reservoir pressure survey, psi (March 2020)')\nax.title.set_fontsize(16)\nax.set_xlabel('Longitude')\nax.set_ylabel('Latitude')\n\n# Annotate\nii = 0\nfor wellID in inacP.index:\n\tax.annotate(wellID,\n\t\t\t\t(inacP.loc[wellID, 'Longitude'], inacP.loc[wellID, 'Latitude']))\n\tii += 1\n","sub_path":"inacPress.py","file_name":"inacPress.py","file_ext":"py","file_size_in_byte":1565,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"415914439","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n\"\"\"\nplot_exp1.py\n\nPlot the exp 1 from Keramati & al, 2011\nTake as input a folder containing the data\nEach run is in the form of exp1run_.pickle\nCopyright (c) 2013 Guillaume VIEJO. All rights reserved.\n\"\"\"\n\nimport sys\nimport os\nfrom optparse import OptionParser\nimport numpy as np\nsys.path.append(\"../../src\")\nfrom fonctions import *\nimport subprocess\nfrom pylab import plot, figure, show, subplot, legend, ylim, axvline\n# -----------------------------------\n# ARGUMENT MANAGER\n# -----------------------------------\n#if not sys.argv[1:]:\n# sys.stdout.write(\"Sorry: you must specify at least 1 argument\")\n# sys.stdout.write(\"More help avalaible with -h or --help option\")\n# sys.exit(0)\nparser = OptionParser()\nparser.add_option(\"-i\", \"--input\", action=\"store\", help=\"The name of the input folder to load the data\", default=False)\n#parser.add_option(\"-o\", \"--output\", action=\"store\", help=\"The name of the output file to store the data\", default=False)\n(options, args) = parser.parse_args() \n# -----------------------------------\n\n# -----------------------------------\n# PARAMETERS\n# -----------------------------------\neta = 0.0001 # variance of evolution noise v\nvar_obs = 0.05 # variance of observation noise n\nbeta = 1.0 # rate of exploration\ngamma = 0.95 # discount factor\nsigma = 0.02 # updating rate of the average reward\nrau = 0.1 # update rate of the reward function\ntau = 0.08 # time step for graph exploration\n\nphi = 0.1 # update rate of the transition function\ndepth = 3 # depth of search when computing the goal value\ninit_cov = 1.1 # initialisation of covariance matrice\nkappa = 0.1 # unscentered transform parameters\n\nnb_iter_test = 500\n\nnb_iter_mod = 100\ndeval_mod_time = 40\nnb_iter_ext = 350\ndeval_ext_time = 240\n\nstates = ['s0', 's1']\nactions = ['pl', 'em']\n\nvalues = createQValuesDict(states, actions)\n# -----------------------------------\n# Loading data\n# -----------------------------------\n\ndata = dict({'data':dict({'values':dict({'mean':[],\n 'var':[]}),\n 'h':dict({'mean':[],\n 'var':[]}),\n 'r':dict({'mean':[],\n 'var':[]}),\n 'vpi':dict({'mean':[],\n 'var':[]}),\n 'p':dict({'mean':[],\n 'var':[]})}),\n 'data2':dict({'values':dict({'mean':[],\n 'var':[]}),\n 'h':dict({'mean':[],\n 'var':[]}),\n 'r':dict({'mean':[],\n 'var':[]}),\n 'vpi':dict({'mean':[],\n 'var':[]}),\n 'p':dict({'mean':[],\n 'var':[]})})})\n\ntmp = dict({'data': dict({'values':dict({0:[],1:[],2:[],3:[]}), \n 'h':dict({0:[],1:[],2:[],3:[]}), \n 'vpi':dict({0:[], 1:[]}), \n 'r':[],\n 'p':dict({0:[],1:[],2:[],3:[]})}),\n 'data2': dict({'values':dict({0:[],1:[],2:[],3:[]}), \n 'h':dict({0:[],1:[],2:[],3:[]}), \n 'vpi':dict({0:[], 1:[]}), \n 'r':[],\n 'p':dict({0:[],1:[],2:[],3:[]})})})\n\nprocess = subprocess.Popen(\"ls \"+options.input+\" | grep exp1\", shell = True, stdout=subprocess.PIPE)\nlist_data = process.communicate()[0].split(\"\\n\")\n\nfor i in list_data[0:-1]:\n d = loadData(options.input+\"/\"+i)\n for j in d.iterkeys():\n for k in d[j].iterkeys():\n if len(d[j][k].shape) == 2:\n for c in range(d[j][k].shape[1]):\n tmp[j][k][c].append(d[j][k][:,c].copy())\n else :\n tmp[j][k].append(d[j][k].copy())\n\nfor i in tmp.iterkeys():\n for j in tmp[i].iterkeys():\n if type(tmp[i][j]) == list:\n data[i][j]['mean'] = np.mean(tmp[i][j], 0)\n data[i][j]['var'] = np.var(tmp[i][j], 0)\n else:\n for k in tmp[i][j]:\n data[i][j]['mean'].append(np.mean(tmp[i][j][k], 0))\n data[i][j]['var'].append(np.var(tmp[i][j][k], 0))\n data[i][j]['mean'] = np.array(data[i][j]['mean'])\n data[i][j]['var'] = np.array(data[i][j]['var'])\n\ndelib = dict({'data':dict({'mean':[],\n 'var':[]}),\n 'data2':dict({'mean':[],\n 'var':[]})})\n \nn = len(tmp['data']['vpi'][0])\nfor i in delib.iterkeys():\n diff = []\n for k in range(n):\n a = np.mean([tmp[i]['vpi'][0][k], tmp[i]['vpi'][1][k]], 0)\n diff.append(a-tmp[i]['r'][k])\n delib[i]['mean'] = np.mean(diff, 0)\n delib[i]['var'] = np.var(diff, 0)\n \n# -----------------------------------\n# Plot\n# -----------------------------------\\\n\ncolors = {('s0','pl'):'green',('s0','em'):'red',('s1','pl'):'cyan',('s1','em'):'purple'}\nfigure()\nsubplot(521)\n#for s in states:\nfor s in ['s0']:\n for a in actions:\n plot(data['data']['vpi']['mean'][values[(s,a)]], 'o-', color = colors[(s,a)], label = \"VPI(\"+s+\",\"+a+\")\")\nplot(data['data']['r']['mean'], 'o-', color = 'blue', label = \"R*tau\")\naxvline(deval_mod_time-1, color='black')\nlegend()\nylim(0,0.1)\nsubplot(522)\n#for s in states:\nfor s in ['s0']:\n for a in actions:\n plot(data['data2']['vpi']['mean'][values[(s,a)]], 'o-', color = colors[(s,a)], label = \"VPI(\"+s+\",\"+a+\")\")\nplot(data['data2']['r']['mean'], 'o-', color = 'blue', label = \"R*tau\")\naxvline(deval_ext_time-1, color='black')\nlegend()\nylim(0,0.1)\nsubplot(523)\nfor s in ['s0']:\n for a in actions:\n plot(data['data']['p']['mean'][values[(s,a)]], 'o-', color = colors[(s,a)], label = \"p(\"+s+\",\"+a)\naxvline(deval_mod_time-1, color='black')\nylim(0.3,0.7)\nlegend()\nsubplot(524)\nfor s in ['s0']:\n for a in actions:\n plot(data['data2']['p']['mean'][values[(s,a)]], 'o-', color = colors[(s,a)], label = \"p(\"+s+\",\"+a)\naxvline(deval_ext_time-1, color='black')\nylim(0.3,0.7)\nlegend()\nsubplot(525)\nplot(delib['data']['mean']>0, color = 'blue', label = 'deliberation time')\naxvline(deval_mod_time-1, color='black')\nylim(0, 1.5)\nlegend()\nsubplot(526)\nplot(delib['data2']['mean']>0, color = 'blue', label = 'deliberation time')\naxvline(deval_ext_time-1, color='black')\nylim(0, 1.5)\nlegend()\nsubplot(527)\nplot(data['data']['h']['mean'][0]-data['data']['h']['mean'][1], label = \"Qh(s0, pl)-Qh(s0, em)\")\naxvline(deval_mod_time-1, color='black')\nlegend()\nylim(0,0.5)\nsubplot(528)\nplot(data['data2']['h']['mean'][0]-data['data2']['h']['mean'][1], label = \"Qh(s0, pl)-Qh(s0, em)\")\naxvline(deval_ext_time-1, color='black')\nylim(0,0.5)\nlegend()\n\nshow()\n# -----------------------------------\n\n\n\n\n\n\n\n\n\n","sub_path":"run/Keramati/Plot/plot_exp1.py","file_name":"plot_exp1.py","file_ext":"py","file_size_in_byte":6985,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"83014460","text":"candyList = [\"Snickers\", \"Kit Kat\", \"Sour Patch Kids\", \"Juicy Fruit\", \"Swedish Fish\",\n \"Skittles\", \"Hershey Bar\", \"Skittles\", \"Starbursts\", \"M&Ms\"]\n\ncandyCart = []\n\nfor candy in candyList:\n print(\"[\" + str(candyList.index(candy)) + \"] \" + candy)\n\nwantCandy = \"y\"\nwhile wantCandy == 'y':\n userChoice = int(input(\"Which candy would you like? \"))\n candyCart.append(candyList[userChoice])\n wantCandy = input(\"Do you still want candy? y/n? \")\n\nfor candy in candyCart:\n print(\"You bought \" + candy)\n","sub_path":"Python/Day2/03-Stu_KidInCandyStore/kid_in_candy_store_Bonus_MySol.py","file_name":"kid_in_candy_store_Bonus_MySol.py","file_ext":"py","file_size_in_byte":520,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"211189268","text":"from screenshot import save_images\nfrom upload import upload_image\nimport os\nimport logging\nlogging.basicConfig(format='%(asctime)s %(module)s.%(funcName)s:%(levelname)s:%(message)s',\n datefmt='%m/%d/%Y %I_%M_%S %p',\n filename='log_file',\n level=logging.INFO)\n\n\ndef main():\n url='https://twitter.com/neilkakkar/status/1224738075940675584'\n photo_list = save_images(url)\n album_title = 'TweetBot'\n auth_file_name = None\n # auth_file_name='/Users/lavishsaluja/credentials/photos_cred.json'\n upload_image(photo_list, album_title, auth_file_name)\n # os.remove(filename+'.jpg')\n\n\nif (__name__ == \"__main__\"):\n main()","sub_path":"src/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":692,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"185692930","text":"import numpy as np\r\nimport cv2, sys\r\n\r\nA=cv2.imread(sys.argv[1])\r\n\r\nif (A==None):\r\n\tprint (\" Error al cargar imagen \")\t\r\n\tsys.exit()\r\n\r\nr=cv2.imshow('image',A)\r\n\r\ninfo=A.shape\r\nalto = info[1]\r\nancho = info[0]\r\nbandas = info[2]\r\nB=np.zeros((alto, ancho, bandas), np.uint8)\r\n\r\nfor i in range(0, alto, 1):\r\n\tfor j in range(0, ancho, 1):\r\n\t\tB.itemset((j,-i,0),A.item(i,j,0))\r\n\t\tB.itemset((j,-i,1),A.item(i,j,1))\r\n\t\tB.itemset((j,-i,2),A.item(i,j,2))\r\n\r\n\r\ns=cv2.imshow('rotada',B)\r\ncv2.imwrite(sys.argv[2], B) \r\n\r\ncv2.waitKey(0)\r\ncv2.destroyAllWindows()","sub_path":"Ejercicio1.py","file_name":"Ejercicio1.py","file_ext":"py","file_size_in_byte":547,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"419642558","text":"#!/usr/bin/env python3\n# coding: utf-8\n# dependences: pandas, numpy\n\n#===================================================================================#\n#=================================IMPORT MODULES====================================#\n#===================================================================================#\n\nimport random as rand\nimport time\nimport sys\nimport copy\nimport csv\nimport os\nimport glob\nimport shutil\nfrom datetime import datetime, timedelta\nfrom operator import *\n\nimport unicodedata\n\n#===================================================================================#\n#===================================DEFNITIONS======================================#\n#===================================================================================#\n\nimport pandas as pd\nimport numpy as np\nfrom functions import *\nfrom classes import *\nfrom global_settings import *\n\nlow_to_high = ['open','near-open','open-mid','mid','close-mid','near-close','close']\nfront_to_back = ['front','near-front','central','near-back','back']\nsonorous_to_non = ['vowel','glide','liquid','nasal','obstruent']\n\ndef find_diacritics(string):\n\tunicode_letters = []\n\tunicode_diacritics = []\n\n\tfor key in range(0,len(sample_string)):\n\t\ti = sample_string[key]\n\t\tif 'COMBINING' in unicodedata.name(i) or 'MODIFIER' in unicodedata.name(i) or 'SUPERSCRIPT' in unicodedata.name(i) or 'SUBSCRIPT' in unicodedata.name(i):\n\t\t\tunicode_diacritics.append(i)\n\t\telif i.isalpha():\n\t\t\tif i not in unicode_letters:\n\t\t\t\tunicode_letters.append(i)\n\t\telse:\n\t\t\tif i not in unicode_letters:\n\t\t\t\tunicode_diacritics.append(i)\n\n\tunicode_letters = list(set(unicode_letters))\n\tunicode_diacritics = list(set(unicode_diacritics))\n\t\n\twith open('letters_and_diacritics.csv', 'w', newline='', encoding='utf-8') as f:\n\t\twriter = csv.writer(f)\n\t\twriter.writerows(unicode_letters)\n\t\twriter.writerow(\"=====================\")\n\t\twriter.writerows(unicode_diacritics)\n\n\treturn(unicode_letters,unicode_diacritics)\n\ndef get_consonant(phone_attributes):\n\tquery = ipa\n\tquery = query[query['voicing'] == phone_attributes[2]]\n\tquery = query[query['subplace'] == phone_attributes[4]]\n\tquery = query[query['submanner'] == phone_attributes[6]]\n\ttry:\n\t\treturn(query.index[0])\n\texcept:\n\t\tpass\n\ndef get_vowel(phone_attributes):\n\tquery = ipa\n\tquery = query[query['roundedness'] == phone_attributes[11]]\n\tquery = query[query['height'] == phone_attributes[12]]\n\tquery = query[query['backness'] == phone_attributes[13]]\n\ttry:\n\t\treturn(query.index[0])\n\texcept:\n\t\tpass\n\ndef change_phone(phone,change_attr,change_to):\n\tx = copy.deepcopy(Phone(phone))\n\tx.dictionary[change_attr] = change_to\n\tphone_attributes = list()\n\tfor key in x.dictionary.keys():\n\t\tphone_attributes.append(x.dictionary[key])\n\tif ipa_dict[phone].type == 'vowel':\n\t\tchanged_phone = get_vowel(phone_attributes)\n\telif ipa_dict[phone].type == 'consonant':\n\t\tchanged_phone = get_consonant(phone_attributes)\n\treturn(changed_phone)\n\n#===================================================================================#\n#==================================VOWEL FUNCTIONS==================================#\n#===================================================================================#\n\ndef raise_vowel(vowel):\n\tcycle = True\n\tscale = 1\n\twhile cycle:\n\t\theight_index = low_to_high.index(Phone(vowel).height)\n\t\tif height_index == len(low_to_high)-1:\n\t\t\t#cannot make vowel higher\n\t\t\treturn(vowel)\n\t\telse:\n\t\t\ttry:\n\t\t\t\tto_height = low_to_high[height_index+scale]\n\t\t\texcept:\n\t\t\t\treturn(vowel)\n\t\t\tnew_vowel = change_phone(vowel,'height',to_height)\n\t\t\tif new_vowel == None:\n\t\t\t\tscale = scale+1\n\t\t\telse:\n\t\t\t\tcycle = False\n\treturn(new_vowel)\n\ndef lower_vowel(vowel):\n\tcycle = True\n\tscale = 1\n\twhile cycle:\n\t\theight_index = low_to_high.index(Phone(vowel).height)\n\t\tif height_index == 0:\n\t\t\t#cannot make vowel lower\n\t\t\treturn(vowel)\n\t\telse:\n\t\t\ttry:\n\t\t\t\tto_height = low_to_high[height_index-scale]\n\t\t\texcept:\n\t\t\t\treturn(vowel)\n\t\t\tnew_vowel = change_phone(vowel,'height',to_height)\n\t\t\tif new_vowel == None:\n\t\t\t\tscale = scale+1\n\t\t\telse:\n\t\t\t\tcycle = False\n\treturn(new_vowel)\n\ndef round_vowel(vowel):\n\tif Phone(vowel).roundedness == 'rounded':\n\t\treturn(vowel)\n\telse:\n\t\tnew_vowel = change_phone(vowel,'roundedness','rounded')\n\t\tif new_vowel == None:\n\t\t\treturn(vowel)\n\t\telse:\n\t\t\treturn(new_vowel)\n\ndef unround_vowel(vowel):\n\tif Phone(vowel).roundedness == 'unrounded':\n\t\treturn(vowel)\n\telse:\n\t\tnew_vowel = change_phone(vowel,'roundedness','unrounded')\n\t\tif new_vowel == None:\n\t\t\treturn(vowel)\n\t\telse:\n\t\t\treturn(new_vowel)\n\ndef push_vowel(vowel):\n\tcycle = True\n\tscale = 1\n\twhile cycle:\n\t\tbackness_index = front_to_back.index(Phone(vowel).backness)\n\t\tif backness_index == len(low_to_high)-1:\n\t\t\t#cannot push vowel further\n\t\t\treturn(vowel)\n\t\telse:\n\t\t\ttry:\n\t\t\t\tto_distance = low_to_high[backness_index+scale]\n\t\t\texcept:\n\t\t\t\treturn(vowel)\n\t\t\tnew_vowel = change_phone(vowel,'backness',to_distance)\n\t\t\tif new_vowel == None:\n\t\t\t\tscale = scale+1\n\t\t\telse:\n\t\t\t\tcycle = False\n\treturn(new_vowel)\n\ndef pull_vowel(vowel):\n\tcycle = True\n\tscale = 1\n\twhile cycle:\n\t\tbackness_index = front_to_back.index(Phone(vowel).backness)\n\t\tif backness_index == 0:\n\t\t\t#cannot pull vowel further\n\t\t\treturn(vowel)\n\t\telse:\n\t\t\ttry:\n\t\t\t\tto_distance = low_to_high[backness_index-scale]\n\t\t\texcept:\n\t\t\t\treturn(vowel)\n\t\t\tnew_vowel = change_phone(vowel,'backness',to_distance)\n\t\t\tif new_vowel == None:\n\t\t\t\tscale = scale+1\n\t\t\telse:\n\t\t\t\tcycle = False\n\treturn(new_vowel)\n\n#===================================================================================#\n#===============================CONSONANT FUNCTIONS=================================#\n#===================================================================================#\n\ndef devoice(consonant):\n\tnew_consonant = change_phone(consonant,'voicing','voiceless')\n\tif new_consonant != None:\n\t\treturn(new_consonant)\n\telse:\n\t\treturn(consonant)\n\ndef voice(consonant):\n\tnew_consonant = change_phone(consonant,'voicing','voiced')\n\tif new_consonant != None:\n\t\treturn(new_consonant)\n\telse:\n\t\treturn(consonant)\n\ndef deaffricate(consonant):\n\tif Phone(consonant).manner == 'affricate':\n\t\tnew_consonant = change_phone(consonant,'manner','fricative')\n\t\tif new_consonant != None:\n\t\t\treturn(new_consonant)\n\t\telse:\n\t\t\treturn(consonant)\n\treturn(consonant)\n\n#===================================================================================#\n#===================================PHONE LISTS=====================================#\n#===================================================================================#\n\nvowels = list(ipa.loc[ipa.iloc[:]['type']=='vowel'][:].index)\nconsonants = list(ipa.loc[ipa.iloc[:]['type']=='consonant'][:].index)\ndiacritics = list(ipa.loc[ipa.iloc[:]['type']=='diacritic'][:].index)\nstresses = list(ipa.loc[ipa.iloc[:]['type']=='stress'][:].index)\nsuprasegmentals = list(ipa.loc[ipa.iloc[:]['type']=='suprasegmental'][:].index)\n\nvoiced = list(ipa.loc[ipa.iloc[:]['voicing']=='voiced'][:].index)\nvoiceless = list(ipa.loc[ipa.iloc[:]['voicing']=='voiceless'][:].index)\n\ncoronals = list(ipa.loc[ipa.iloc[:]['place']=='coronal'][:].index)\ndorsals = list(ipa.loc[ipa.iloc[:]['place']=='dorsal'][:].index)\nlabials = list(ipa.loc[ipa.iloc[:]['place']=='labial'][:].index)\nlaryengeals = list(ipa.loc[ipa.iloc[:]['place']=='laryengeal'][:].index)\n\nlabiodentals = list(ipa.loc[ipa.iloc[:]['subplace']=='labio-dental'][:].index)\npharyngeals = list(ipa.loc[ipa.iloc[:]['subplace']=='pharyngeal'][:].index)\nglottals = list(ipa.loc[ipa.iloc[:]['subplace']=='glottal'][:].index)\nalveolars = list(ipa.loc[ipa.iloc[:]['subplace']=='alveolar'][:].index)\nretroflexes = list(ipa.loc[ipa.iloc[:]['subplace']=='retroflex'][:].index)\nalveolars = list(ipa.loc[ipa.iloc[:]['subplace']=='alveolar'][:].index)\ndentals = list(ipa.loc[ipa.iloc[:]['subplace']=='dental'][:].index)\npalatoalveolars = list(ipa.loc[ipa.iloc[:]['subplace']=='palato-alveolar'][:].index)\nuvulars = list(ipa.loc[ipa.iloc[:]['subplace']=='uvular'][:].index)\nvelars = list(ipa.loc[ipa.iloc[:]['subplace']=='velar'][:].index)\nbilabials = list(ipa.loc[ipa.iloc[:]['subplace']=='bilabial'][:].index)\n\nobstruents = list(ipa.loc[ipa.iloc[:]['manner']=='obstruent'][:].index)\nsonorants = list(ipa.loc[ipa.iloc[:]['manner']=='sonorant'][:].index)\n\naffricates = list(ipa.loc[ipa.iloc[:]['submanner']=='affricate'][:].index)\nflaps = list(ipa.loc[ipa.iloc[:]['submanner']=='flap'][:].index)\ntrills = list(ipa.loc[ipa.iloc[:]['submanner']=='trill'][:].index)\nnasals = list(ipa.loc[ipa.iloc[:]['submanner']=='nasal'][:].index)\nfricatives = list(ipa.loc[ipa.iloc[:]['submanner']=='fricative'][:].index)\nplosives = list(ipa.loc[ipa.iloc[:]['submanner']=='plosive'][:].index)\napproximants = list(ipa.loc[ipa.iloc[:]['submanner']=='approximant'][:].index)\n\nlaterals = list(ipa.loc[ipa.iloc[:]['laterality']=='lateral'][:].index)\nliquids = list(ipa.loc[ipa.iloc[:]['liquidity']=='liquid'][:].index)\nstridents = list(ipa.loc[ipa.iloc[:]['stridency']=='strident'][:].index)\nsibilants = list(ipa.loc[ipa.iloc[:]['sibilance']=='sibilant'][:].index)\n\nunrounded_vowels = list(ipa.loc[ipa.iloc[:]['roundedness']=='unrounded'][:].index)\nrounded_vowels = list(ipa.loc[ipa.iloc[:]['roundedness']=='rounded'][:].index)\n\nclose_vowels = list(ipa.loc[ipa.iloc[:]['height']=='close'][:].index)\nclose_mid_vowels = list(ipa.loc[ipa.iloc[:]['height']=='close-mid'][:].index)\nmid_vowels = list(ipa.loc[ipa.iloc[:]['height']=='mid'][:].index)\nnear_close_vowels = list(ipa.loc[ipa.iloc[:]['height']=='near-close'][:].index)\nnear_open_vowels = list(ipa.loc[ipa.iloc[:]['height']=='near-open'][:].index)\nopen_vowels = list(ipa.loc[ipa.iloc[:]['height']=='open'][:].index)\nopen_mid_vowels = list(ipa.loc[ipa.iloc[:]['height']=='open-mid'][:].index)\n\nback = list(ipa.loc[ipa.iloc[:]['backness']=='back'][:].index)\ncentral = list(ipa.loc[ipa.iloc[:]['backness']=='central'][:].index)\nfront = list(ipa.loc[ipa.iloc[:]['backness']=='front'][:].index)\n\n\n\n","sub_path":"scripts/sounds.py","file_name":"sounds.py","file_ext":"py","file_size_in_byte":9866,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"233332081","text":"#!/usr/bin/env python\n#\n# Created by: Shawn Chen <=schen@virginia.edu>\n#\n# LICENSE\n# This program is free software; you can redistribute it and/or modify it\n# under the terms of the GNU General Public License as published by the Free\n# Software Foundation; either version 2 of the License, or(at your option)\n# any later version.\n#\n# This program is distributed in the hope that it will be useful, but WITHOUT\n# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or\n# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for\n# more details at http://www.gnu.org/copyleft/gpl.html\n#\n# Brief\n# Solves LeetCode Problem 160: Intersection of Two Linked Lists\n\n# class ListNode(object):\n# def __init__(self, x):\n# self.val = x\n# self.next = None\n\nclass Solution(object):\n def getIntersectionNode(self, headA, headB):\n \"\"\"\n :type head1, head1: ListNode\n :rtype: ListNode\n \"\"\"\n if not headA or not headB:\n return None\n pa, pb, lena, lenb = headA, headB, 0, 0\n while pa:\n lena, pa = lena + 1, pa.next\n while pb:\n lenb, pb = lenb + 1, pb.next\n if lena > lenb:\n plong, pshort = headA, headB\n else:\n plong, pshort = headB, headA\n i = abs(lena - lenb)\n while i > 0:\n plong = plong.next\n i -= 1\n while plong and pshort:\n if plong == pshort:\n return plong\n else:\n plong, pshort = plong.next, pshort.next\n return None\n","sub_path":"Problem160/solve.py","file_name":"solve.py","file_ext":"py","file_size_in_byte":1584,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"580209077","text":"import utilities as utils\nimport numpy as np\nimport random\nimport os\n\n\"\"\" Reference from https://gist.github.com/bwhite/3726239 for ndcg_at_k and dcg_at_k\nCode written by Sanjana Agarwal (sa14593@indiana.edu)\n\"\"\"\n\nndcgSum = 0.0\nlistLength = 0\n\n\ndef printRanking(RankDict, i):\n for key, value in RankDict.items():\n ndcgDict = dict()\n ndcgList = dict()\n r = []\n list = RankDict.get(key)\n if i == 1:\n print('Recommendations of neighborhood ' + str(RankDict.get(key)) + ' for user with ID ' + str(key))\n elif i == 2:\n print('Recommendations of categories' + str(RankDict.get(key)) + ' for user with ID ' + str(key))\n random.shuffle(list)\n for item in list:\n r.append(item[1])\n\n for neighborhood in list:\n ndcg = ndcg_at_k(r, len(list), method=0)\n ndcgDict[neighborhood[0]] = ndcg\n ndcgList[key] = ndcg\n # print(\"Outside list is\", ndcgList)\n if i == 1:\n printToFile('NeighborhoodNDCG.txt', ndcgList, list)\n elif i == 2:\n printToFile('CategoryNDCG.txt', ndcgList, list)\n print(\"NDCG Mean is\", ndcgSum / listLength)\n\n\ndef printToFile(filename, ndcgList, list):\n global ndcgSum, listLength\n # print(\"Length \", len(ndcgList))\n # print(\"NDCG SCORES FOR USERS NEIGHBORHOODS\")\n file = open(filename, 'a+')\n # with open('neighborhoodNDCG.txt') as file:\n for key, value in ndcgList.items():\n file.write(str(key) + \"\\t\" + str(list) + \"\\t\" + str(value))\n file.write(os.linesep)\n ndcgSum += value\n listLength += 1\n # print(\"Sum is\", ndcgSum)\n\n\ndef dcg_at_k(r, k, method=0):\n r = np.asfarray(r)[:k]\n if r.size:\n if method == 0:\n return r[0] + np.sum(r[1:] / np.log2(np.arange(2, r.size + 1)))\n elif method == 1:\n return np.sum(r / np.log2(np.arange(2, r.size + 2)))\n else:\n raise ValueError('method must be 0 or 1.')\n return 0.\n\n\ndef ndcg_at_k(r, k, method=0):\n dcg_max = dcg_at_k(sorted(r, reverse=True), k, method)\n if not dcg_max:\n return 0.\n return dcg_at_k(r, k, method) / dcg_max\n\n\nif __name__ == '__main__':\n neighborhoodRanking = utils.getModelFile('userNeighborhood')\n printRanking(neighborhoodRanking, 1)\n categoryRanking = utils.getModelFile('userCategories')\n printRanking(categoryRanking, 2)\n","sub_path":"search-project-master/Evaluation/ndcg.py","file_name":"ndcg.py","file_ext":"py","file_size_in_byte":2409,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"204037371","text":"\"\"\"\r\nName: Adventure Game.py\r\nPurpose: Adventure Game structure in preparation for graphics\r\n using Processing\r\n\r\nAuthor: Lin.I\r\n\r\nCreated: 09/01/2019\r\n\"\"\"\r\nimport time\r\n\r\n\r\n# Create variable for list of rooms\r\nroom_list = []\r\n\r\n# Create new rooms and append to list of rooms\r\nroom = [\"Entrance lobby\", 7, None, 1, 8]\r\nroom_list.append(room)\r\nroom = [\"Check-in Room for the Lab\", 0, None, None, 2]\r\nroom_list.append(room)\r\nroom = [\"Cleaning room\", None, 1, None, 3]\r\nroom_list.append(room)\r\nroom = [\"Laboratory\", 4, 2, None, None]\r\nroom_list.append(room)\r\nroom = [\"Laboratory Storage\", None, None, 3, None]\r\nroom_list.append(room)\r\nroom = [\"Storage/Maintenance Room\", None, 6, None, None]\r\nroom_list.append(room)\r\nroom = [\"Boiler Room\", None, 7, None, 5]\r\nroom_list.append(room)\r\nroom = [\"Front Desk\", None, None, 0, 6]\r\nroom_list.append(room)\r\nroom = [\"Elevator (1st floor)\", None, 0, None, None]\r\nroom_list.append(room)\r\nroom = [\"Elevator (2nd floor)\", None, 10, None, None]\r\nroom_list.append(room)\r\nroom = [\"Hallway\", 11, None, 15, 9]\r\nroom_list.append(room)\r\nroom = [\"Washroom\", None, None, 10, 12]\r\nroom_list.append(room)\r\nroom = [\"Break Room\", None, 11, 13, None]\r\nroom_list.append(room)\r\nroom = [\"Printing Room\", 12, None, 14, None]\r\nroom_list.append(room)\r\nroom = [\"Offices\", 13, 15, None, None]\r\nroom_list.append(room)\r\nroom = [\"Locker Room\", 10, None, None, 14]\r\nroom_list.append(room)\r\n\r\n# Variable for room player is in\r\ncurrent_room = 0\r\n\r\n# Create a variable for a list of items in the game\r\nitem_list = []\r\n\r\n# Create items and append to list of items\r\nitem = [0, None]\r\nitem_list.append(item)\r\nitem = [1, None]\r\nitem_list.append(item)\r\nitem = [2, \"Hazmat Suits\"]\r\nitem_list.append(item)\r\nitem = [3, None]\r\nitem_list.append(item)\r\nitem = [4, \"Needles\"]\r\nitem_list.append(item)\r\nitem = [5, \"Cleaning Products\"]\r\nitem_list.append(item)\r\nitem = [6, None]\r\nitem_list.append(item)\r\nitem = [7, \"Plants\"]\r\nitem_list.append(item)\r\nitem = [8, None]\r\nitem_list.append(item)\r\nitem = [9, None]\r\nitem_list.append(item)\r\nitem = [10, \"Water\"]\r\nitem_list.append(item)\r\nitem = [11, \"Soap\"]\r\nitem_list.append(item)\r\nitem = [12, \"Food\"]\r\nitem_list.append(item)\r\nitem = [13, \"Toner\"]\r\nitem_list.append(item)\r\nitem = [14, \"Money\"]\r\nitem_list.append(item)\r\nitem = [15, \"Credentials\"]\r\nitem_list.append(item)\r\n\r\n# Create a variable for a list of items player has\r\nuser_items = []\r\n\r\n# Create a variable for a list of people in each room\r\npeople_list = []\r\n\r\n# Create people and append to list of people\r\npeople = [0, 2]\r\npeople_list.append(people)\r\npeople = [1, 1]\r\npeople_list.append(people)\r\npeople = [2, 0]\r\npeople_list.append(people)\r\npeople = [3, 0]\r\npeople_list.append(people)\r\npeople = [4, 0]\r\npeople_list.append(people)\r\npeople = [5, 0]\r\npeople_list.append(people)\r\npeople = [6, 0]\r\npeople_list.append(people)\r\npeople = [7, 2]\r\npeople_list.append(people)\r\npeople = [8, 0]\r\npeople_list.append(people)\r\npeople = [9, 0]\r\npeople_list.append(people)\r\npeople = [10, 0]\r\npeople_list.append(people)\r\npeople = [11, 0]\r\npeople_list.append(people)\r\npeople = [12, 3]\r\npeople_list.append(people)\r\npeople = [13, 0]\r\npeople_list.append(people)\r\npeople = [14, 3]\r\npeople_list.append(people)\r\npeople = [15, 0]\r\npeople_list.append(people)\r\n\r\n# Game ends if done is True\r\ndone = False\r\n\r\n# Search item variables\r\nfound_all_items = False\r\nitem_count = 0\r\n\r\n# Synthesizer variables\r\nyour_name = cure_name = None\r\n\r\n# Curing people variables\r\ncure_successful = False\r\neveryone_cured = False\r\ncure_count = 0\r\n\r\n# Create a table of valid commands\r\nvalid_commands = (\"\"\"\\n{0:<15}{1}{2}\r\n{3}\r\n{4:<15}{1}{5}\r\n{6:<15}{1}{7}\r\n{8:<15}{1}{9}\r\n{10:<15}{1}{11}\r\n{12:<15}{1}{13}\r\n{14:<15}{1}{15}\r\n{16:<15}{1}{17}\r\n{18:<15}{1}{19}\r\n{20:<15}{1}{21}\r\n{22:<15}{1}{23}\r\n\"\"\".format(\"Commands\", \"|\", \"Description\",\r\n \"-\"*50,\r\n \"North (or n)\", \"Player goes North\",\r\n \"East (or e)\", \"Player goes East\",\r\n \"South (or s)\", \"Player goes South\",\r\n \"West (or w)\", \"Player goes West\",\r\n \"Search (or f)\", \"Player searches room for items and people\",\r\n \"Up (or u)\", \"Go up in the elevator\",\r\n \"Down (or d)\", \"Go down in the elevator\",\r\n \"Cure (or c)\", \"Cure people when cure is made\",\r\n \"Help (or h)\", \"Display valid commands\",\r\n \"Quit (or q)\", \"Quit Game\"))\r\n\r\n\r\ndef yes_or_no(prompt: str) -> bool:\r\n \"\"\"Keep asking the user for a valid answer until they provide one\r\n\r\n :param prompt: yes or no question asked to the user\r\n :return: True or False, or prompt for user to enter valid answer\r\n \"\"\"\r\n valid_answer = False\r\n while not valid_answer:\r\n answer = input(prompt + \" (y/n) \").lower()\r\n if answer == \"y\":\r\n return True\r\n elif answer == \"n\":\r\n return False\r\n else:\r\n print(\"Please enter \\\"y\\\" or \\\"n\\\"\")\r\n\r\n\r\ndef step_successful():\r\n \"\"\"If step is successfully completed advance user to next step\r\n\r\n :return: None\r\n \"\"\"\r\n global step\r\n input(\"Step \" + str(step) + \" Successful\\nNext -->\")\r\n timer(start_time)\r\n\r\n\r\ndef step_unsuccessful(message: str) -> None:\r\n \"\"\"Restart synthesis if user answers questions incorrectly\r\n\r\n :param message: Advice or comment on the incorrect answer given\r\n :return: None\r\n \"\"\"\r\n global step, restart_machine\r\n step = 0\r\n restart_machine = True\r\n input(\"ERROR\\nSynthesis Failed\\n\" +\r\n \"You will need to restart synthesis\\n\" +\r\n message + \"\\nRestart -->\")\r\n timer(start_time)\r\n\r\n\r\ndef timer(time_at_start):\r\n \"\"\"Check and display time left\r\n\r\n :param time_at_start: the time in seconds at the start of the game\r\n :return: None\r\n \"\"\"\r\n global done\r\n time_remaining = 600 - (time.time() - time_at_start)\r\n print()\r\n if time_remaining <= 0:\r\n print(\"You took to long to save everyone\\n\" +\r\n \"You Lose\")\r\n done = True\r\n else:\r\n print(int(time_remaining // 60), \"minute(s),\",\r\n int(time_remaining % 60), \"second(s) remain\")\r\n\r\n\r\n# Welcome user\r\nprint(\"***** Welcome to Virus Treatment *****\")\r\n\r\n# Backstory of game\r\nprint(\"\\nYou arrive at work like how you always have everyday.\")\r\ninput(\"Continue -->\")\r\n\r\nprint(\"\\nHowever, today is different...\")\r\ninput(\"Continue -->\")\r\n\r\nprint(\"\\nAt work you find out that a virus has broken out within the\",\r\n \"building and it has been quarantined by the CDC\",\r\n \"in order to prevent further outbreak.\")\r\ninput(\"Continue -->\")\r\n\r\nprint(\"\\nYou think all hope is lost until you remember you are a scientist.\" +\r\n \"\\nOnly you can synthesize a cure for the virus\")\r\ninput(\"Continue -->\")\r\n\r\nprint(\"\\nAfter minutes of hard work you determine the components\",\r\n \"needed for the cure.\\nHowever you realize you only have 10 minutes\",\r\n \"before the virus kills you and everyone in the building.\")\r\ninput(\"Continue -->\")\r\n\r\nprint(\"\\nEverything you need is in this building...\")\r\ninput(\"Start -->\")\r\n\r\n# Find the time the game starts for the player\r\nstart_time = time.time()\r\n\r\n# Display table of valid commands\r\nprint(valid_commands)\r\n\r\n# Game continues prompting until game over\r\nwhile not done:\r\n # Display the room player is in\r\n print(\"\\nYou are in the\", room_list[current_room][0])\r\n\r\n # When the cure is made, display the number of uncured people in the room\r\n if cure_successful and not everyone_cured:\r\n print(people_list[current_room][1],\r\n \"person/people in the\", room_list[current_room][0],\r\n \"need(s) to be cured\")\r\n\r\n # Get command from user\r\n command = input(\"What do you want to do? \").lower()\r\n\r\n # Travelling north\r\n if command == \"north\" or command == \"n\":\r\n next_room = room_list[current_room][1]\r\n # Check if player can travel to next room\r\n if next_room is None:\r\n print(\"You can't go that way\")\r\n else:\r\n current_room = next_room\r\n\r\n # Travelling east\r\n elif command == \"east\" or command == \"e\":\r\n next_room = room_list[current_room][2]\r\n # Check if player can travel to next room\r\n if next_room is None:\r\n print(\"You can't go that way\")\r\n else:\r\n current_room = next_room\r\n\r\n # Travelling south\r\n elif command == \"south\" or command == \"s\":\r\n next_room = room_list[current_room][3]\r\n # Check if player can travel to next room\r\n if next_room is None:\r\n print(\"You can't go that way\")\r\n else:\r\n current_room = next_room\r\n\r\n # Travelling west\r\n elif command == \"west\" or command == \"w\":\r\n next_room = room_list[current_room][4]\r\n # Check if player can travel to next room\r\n if next_room is None:\r\n print(\"You can't go that way\")\r\n # Check if player has credentials to travel to cleaning room\r\n elif next_room == 2 and item_list[15] not in user_items:\r\n print(\"You need credentials to go into the cleaning room\")\r\n # Check if player has hazmat suit to travel to laboratory\r\n elif next_room == 3 and item_list[2] not in user_items:\r\n print(\"You died from an exposure of a cocktail of viruses,\",\r\n \"you needed a hazmat suit. You'll get'em next time.\")\r\n done = True\r\n else:\r\n current_room = next_room\r\n\r\n # Going up in elevator\r\n elif command == \"up\" or command == \"u\":\r\n # Check if player is in elevator to go up\r\n if room_list[current_room][0] == room_list[8][0]:\r\n current_room = 9\r\n else:\r\n print(\"You can't go up from here\")\r\n\r\n # Going down in elevator\r\n elif command == \"down\" or command == \"d\":\r\n # Check if player is in elevator to go down\r\n if room_list[current_room][0] == room_list[9][0]:\r\n current_room = 8\r\n else:\r\n print(\"You can't go down from here\")\r\n\r\n # Search room for item\r\n elif ((not found_all_items or not cure_successful) and\r\n (command == \"search\" or command == \"f\")):\r\n # Check if user already has item from room\r\n if (item_list[current_room][1] is not None and\r\n item_list[current_room] not in user_items):\r\n # Check if player has money for water\r\n if current_room == 10 and item_list[14] not in user_items:\r\n print(\"You need money for this item\")\r\n # If user finds new item, and item to inventory\r\n else:\r\n user_items.append(item_list[current_room])\r\n item_count += 1\r\n print(\"You found\", item_list[current_room][1])\r\n print(10-item_count, \"item(s) left to find\")\r\n # If item is already in inventory show no items in room\r\n else:\r\n print(\"No items in this room\")\r\n print(10-item_count, \"item(s) left to find\")\r\n\r\n print(people_list[current_room][1], \"person/people in this room\")\r\n\r\n # Tell user to go to lab if all items have been collected\r\n if item_count == 10:\r\n found_all_items = True\r\n print(\"You have everything you need to synthesize the cure.\\n\" +\r\n \"Now head back to the lab to synthesize it\")\r\n\r\n # Display all valid commands\r\n elif command == \"help\" or command == \"h\":\r\n print(valid_commands)\r\n\r\n # Leave the game\r\n elif command == \"quit\" or command == \"q\":\r\n print(\"You left the game\")\r\n break\r\n\r\n # Synthesizing the cure\r\n step = 1\r\n leave_synth_mach = False\r\n if (found_all_items and current_room == 3 and not cure_successful and\r\n input(\"\\nYou are in the \" + room_list[current_room][0] + \"\\n\" +\r\n \"Welcome to Synthesizing Machine\\n\" +\r\n \"Enter \\\"b\\\" to begin synthesizing cure \")) == \"b\":\r\n print()\r\n\r\n # Cycle through steps until cure is complete or if step is inccorect\r\n while not leave_synth_mach:\r\n restart_machine = False\r\n print(\"Step\", str(step))\r\n\r\n if step == 1:\r\n if yes_or_no(\"Do you wish to leave the synthesis machine?\"):\r\n break\r\n your_name = input(\"Enter your name: \")\r\n print(\"Hello, \" + your_name + \".\")\r\n step_successful()\r\n\r\n elif step == 2:\r\n serum = input(\"Hello, \" + your_name + \".\\n\" +\r\n \"What do you want to make?\\n\" +\r\n \"[1] Virus, [2] Cure, [3] Vaccine\\n\" +\r\n \"Enter your choice: \")\r\n if serum == \"1\":\r\n step_unsuccessful(\"You seriously want to make a virus?!\" +\r\n \"\\nThis one is about to kill you\")\r\n elif serum == \"2\":\r\n print(\"That's a smart choice\")\r\n step_successful()\r\n elif serum == \"3\":\r\n step_unsuccessful(\"A bit late to make a vaccine\" +\r\n \"for a virus that is currently\" +\r\n \"killing you at the moment\")\r\n else:\r\n step_unsuccessful(\"No valid answer given\")\r\n\r\n elif step == 3:\r\n if input(\"How many cures do you need to make?\\n\" +\r\n \"(Hint: It's the number of people in the \" +\r\n \"building including you)\\n-->\") == \"12\":\r\n step_successful()\r\n else:\r\n step_unsuccessful(\"Go back and recount the number \" +\r\n \"of people in the building\")\r\n\r\n elif step == 4:\r\n if input(\"What is x in this equation? (x is positive)\\n7x^2 = 28 --> \") == \"2\":\r\n step_successful()\r\n else:\r\n step_unsuccessful(\"Think back to algebra\")\r\n\r\n elif step == 5:\r\n print(\"Balance the chemical equation below:\\n\" +\r\n \"_H2 + _O2 -> _H2O)\")\r\n H2 = input(\"How many H2 molecules? \")\r\n O2 = input(\"How many O2 molecules? \")\r\n H2O = input(\"How many H2O molecules? \")\r\n if H2 == \"2\" and O2 == \"1\" and H2O == \"2\":\r\n step_successful()\r\n else:\r\n step_unsuccessful(\"Both sides must have the \" +\r\n \"same number of atoms\")\r\n\r\n elif step == 6:\r\n print(\"Balance the chemical equation below:\\n\" +\r\n \"_CH4 + _O2 -> _CO2 + _H2O)\")\r\n CH4 = input(\"How many CH4 molecules? \")\r\n O2 = input(\"How many O2 molecules? \")\r\n CO2 = input(\"How many CO2 molecules? \")\r\n H2O = input(\"How many H2O molecules? \")\r\n if CH4 == \"1\" and O2 == \"2\" and CO2 == \"1\" and H2O == \"2\":\r\n step_successful()\r\n else:\r\n step_unsuccessful(\"Both sides must have the \" +\r\n \"same number of atoms\")\r\n\r\n elif step == 7:\r\n print(\"Based on what you have found, what should the \" +\r\n \"resultant or resultants of the following \" +\r\n \"word equation be?\")\r\n if input(\"Plants + Water -> \").lower() == \"food\":\r\n step_successful()\r\n else:\r\n step_unsuccessful(\"Check what you collected\")\r\n\r\n elif step == 8:\r\n if yes_or_no(\"Cure is ready\\nInject cure into needles?\"):\r\n input(\"Needles Filled\\nNext -->\")\r\n step_successful()\r\n else:\r\n step_unsuccessful(\"Cure has gone bad\")\r\n\r\n elif step == 9:\r\n print(\"Cure Synthesized\\nThis is a new cure\")\r\n cure_name = input(\"What would you like to name this cure? \")\r\n print(\"Cure has now been named \" + cure_name)\r\n step_successful()\r\n\r\n elif step == 10:\r\n print(cure_name, \"is ready now\\nYou have cured yourself with\",\r\n cure_name + \"\\n11 people left to cure with\", cure_name)\r\n cure_successful = True\r\n leave_synth_mach = True\r\n\r\n print()\r\n step += 1\r\n\r\n # Curing people\r\n if (cure_successful and not everyone_cured and\r\n (command == \"cure\" or command == \"c\")):\r\n # Use cure if there is anyone in the room to cure\r\n if people_list[current_room][1] != 0:\r\n people_list[current_room][1] -= 1\r\n cure_count += 1\r\n print(people_list[current_room][1],\r\n \"person/people left to cure in the\",\r\n room_list[current_room][0])\r\n print(11-cure_count, \"person/people in total left to cure with\",\r\n cure_name)\r\n # Game ends when everyone is cured\r\n if cure_count == 11:\r\n everyone_cured = True\r\n\r\n # Check if cure has been made before curing\r\n if not cure_successful and (command == \"cure\" or command == \"c\"):\r\n print(\"You need to have made the cure to cure people\")\r\n\r\n # Check and display time left\r\n timer(start_time)\r\n\r\n # Win game message\r\n if everyone_cured:\r\n done = True\r\n print(\"\\nEveryone has been cured\\n\" +\r\n \"Congratulations!\\nYou saved everyone\")\r\n\r\n print()\r\n","sub_path":"Adventure Game.py","file_name":"Adventure Game.py","file_ext":"py","file_size_in_byte":17519,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"587031748","text":"from django.urls import path, include\nfrom employee.views import *\nfrom rest_framework import routers\n\nrouter = routers.DefaultRouter()\nrouter.register('employee', EmployeeViewSet)\nemployee_list_view = EmployeeViewSet.as_view(\n {\n \"get\": \"list\"\n }\n)\n\nurlpatterns = [\n path('register/', RegisterAPI.as_view(), name='register'),\n path('login/', LoginView.as_view(), name='login'),\n path('logout/', LogoutView.as_view(), name='logout'),\n path('upload/', UploadView.as_view(), name='upload'),\n path('', include(router.urls)),\n\n]\n","sub_path":"EmployeeApp/employee/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":553,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"451624677","text":"# coding=utf-8\nfrom __future__ import print_function\nfrom __future__ import division\n\nimport tensorflow as tf\nimport numpy as np\nimport os\nimport math\nimport sys\nfrom PIL import Image\nimport cv2\nimport random\nimport to_tfrecord as TFRecord\n\ntfrecord_file = TFRecord.tfrecord_file\n_NUM_SHARDS = TFRecord._NUM_SHARDS\nHEIGHT = 1024\nWIDTH = 2048\nCROP_HEIGHT = 680\nCROP_WIDTH = 680\n\n\ndef flip_randomly_left_right_image_with_annotation(image_0_tensor, image_tensor, annotation_tensor):\n # Reference https://github.com/warmspringwinds/tf-image-segmentation/blob/master/tf_image_segmentation/utils/augmentation.py\n # Random variable: two possible outcomes (0 or 1)\n # with a 1 in 2 chance\n random_var = tf.random_uniform(maxval=2, dtype=tf.int32, shape=[])\n\n randomly_flipped_img_0 = tf.cond(pred=tf.equal(random_var, 0),\n fn1=lambda: tf.image.flip_left_right(image_0_tensor),\n fn2=lambda: image_0_tensor)\n\n randomly_flipped_img = tf.cond(pred=tf.equal(random_var, 0),\n fn1=lambda: tf.image.flip_left_right(image_tensor),\n fn2=lambda: image_tensor)\n\n randomly_flipped_annotation = tf.cond(pred=tf.equal(random_var, 0),\n fn1=lambda: tf.image.flip_left_right(tf.expand_dims(annotation_tensor, -1)),\n fn2=lambda: tf.expand_dims(annotation_tensor, -1))\n randomly_flipped_annotation = tf.squeeze(randomly_flipped_annotation, -1)\n return randomly_flipped_img_0, randomly_flipped_img, randomly_flipped_annotation\n\ndef random_resize(batch_image_0, batch_image, batch_anno, mmin=0.5, mmax=2):\n rand_var = tf.random_uniform(shape=[],\n minval=mmin,\n maxval=mmax)\n scaled_shape = [tf.cast(tf.round(rand_var * HEIGHT), tf.int32), tf.cast(tf.round(rand_var * WIDTH), tf.int32)]\n\n batch_image_0 = tf.image.resize_bilinear(batch_image_0, scaled_shape)\n batch_image_0 = tf.cast(batch_image_0, tf.uint8)\n batch_image = tf.image.resize_bilinear(batch_image, scaled_shape)\n\n batch_anno = tf.expand_dims(batch_anno, -1)\n batch_anno = tf.image.resize_nearest_neighbor(batch_anno, scaled_shape)\n batch_anno = tf.squeeze(batch_anno, -1)\n\n return batch_image_0, batch_image, batch_anno\n\ndef random_crop(batch_image_0, batch_image, batch_anno):\n\n '''\n seed = random.randint(0, 1e10)\n input_shape = batch_image.get_shape().as_list()\n batch_image_0 = tf.random_crop(batch_image_0, [input_shape[0], CROP_HEIGHT, CROP_WIDTH, 3], seed=seed)\n batch_image = tf.random_crop(batch_image, [input_shape[0], CROP_HEIGHT, CROP_WIDTH, 3], seed=seed)\n batch_anno = tf.random_crop(batch_anno, [input_shape[0], CROP_HEIGHT, CROP_WIDTH], seed=seed)\n return batch_image_0, batch_image, batch_anno\n '''\n max_h = HEIGHT - CROP_HEIGHT\n max_w = WIDTH - CROP_WIDTH\n x_st = np.random.randint(low=0, high=max_h)\n y_st = np.random.randint(low=0, high=max_w)\n\n input_shape = batch_image.get_shape().as_list()\n batch_image_0 = tf.slice(batch_image_0, [0, x_st, y_st, 0], [input_shape[0], CROP_HEIGHT, CROP_WIDTH, 3])\n batch_image = tf.slice(batch_image, [0, x_st, y_st, 0], [input_shape[0], CROP_HEIGHT, CROP_WIDTH, 3])\n batch_anno = tf.slice(batch_anno, [0, x_st, y_st], [input_shape[0], CROP_HEIGHT, CROP_WIDTH])\n\n return batch_image_0, batch_image, batch_anno\n\ndef augmentation_standardization(image_0, image, anno, type):\n\n image = tf.cast(image, tf.float32)\n\n #if type == 'train' or type == 'val':\n\n #image_0, image, anno = flip_randomly_left_right_image_with_annotation(image_0, image, anno)\n #image = tf.image.random_brightness(image, max_delta=10)\n\n image = tf.image.per_image_standardization(image)\n #image /= 255\n #image -= 0.5\n image_0 = tf.reshape(image_0, [HEIGHT, WIDTH, 3])\n image = tf.reshape(image, [HEIGHT, WIDTH, 3])\n anno = tf.reshape(anno, [HEIGHT, WIDTH])\n\n return image_0, image, anno\n\ndef augmentation_scale(image_0, image, anno, mmin, mmax, type):\n\n #if type == 'train' or type == 'val':\n # image_0, image, anno = random_resize(image_0, image, anno, mmin, mmax)\n\n ''' for resize to small size\n scaled_shape = [tf.cast(tf.round(0.25 * HEIGHT), tf.int32), tf.cast(tf.round(0.25 * WIDTH), tf.int32)]\n image_0 = tf.image.resize_bilinear(image_0, scaled_shape)\n image = tf.image.resize_bilinear(image, scaled_shape)\n anno = tf.expand_dims(anno, -1)\n anno = tf.image.resize_nearest_neighbor(anno, scaled_shape)\n anno = tf.squeeze(anno, axis=-1)\n '''\n\n image_0, image, anno = random_crop(image_0, image, anno)\n\n return image_0, image, anno\n\ndef read_and_decode(filelist):\n filename_queue = tf.train.string_input_producer(filelist)\n reader = tf.TFRecordReader()\n _, serialized_exampe = reader.read(filename_queue)\n\n features = tf.parse_single_example(serialized_exampe,\n features={\n 'image/encoded': tf.FixedLenFeature([], tf.string),\n 'image/anno': tf.FixedLenFeature([], tf.string),\n 'image/filename': tf.FixedLenFeature([], tf.string),\n 'image/height': tf.FixedLenFeature([], tf.int64),\n 'image/width': tf.FixedLenFeature([], tf.int64),\n })\n\n image = tf.decode_raw(features['image/encoded'], tf.uint8)\n anno = tf.decode_raw(features['image/anno'], tf.uint8)\n filename = features['image/filename']\n height = tf.cast(features['image/height'], tf.int32)\n width = tf.cast(features['image/width'], tf.int32)\n\n image = tf.reshape(image, [HEIGHT, WIDTH, 3])\n anno = tf.reshape(anno, [HEIGHT, WIDTH])\n\n return image, anno, filename\n\ndef read_batch(batch_size, type='train'):\n filelist_train = [ os.path.join(tfrecord_file, 'image_%s_%05d-of-%05d.tfrecord' % ('train', shard_id, _NUM_SHARDS - 1)) for\n shard_id in range(_NUM_SHARDS)]\n filelist_val = [os.path.join(tfrecord_file, 'image_%s_%05d-of-%05d.tfrecord' % ('val', shard_id, _NUM_SHARDS - 1))\n for shard_id in range(_NUM_SHARDS)]\n filelist_test = [os.path.join(tfrecord_file, 'image_%s_%05d-of-%05d.tfrecord' % ('test', shard_id, _NUM_SHARDS - 1))\n for shard_id in range(_NUM_SHARDS)]\n\n filelist = []\n if type == 'train':\n filelist = filelist + filelist_train\n elif type == 'val':\n filelist = filelist + filelist_val\n elif type == 'test':\n filelist = filelist + filelist_test\n elif type == 'trainval':\n filelist = filelist + filelist_train + filelist_val\n else:\n raise Exception('data set name not exits')\n\n print(filelist)\n image, anno, filename = read_and_decode(filelist)\n image_0 = image\n ## data augmentation and standardation\n\n image_0, image, anno = augmentation_standardization(image_0, image, anno, type)\n\n image_0_batch, image_batch, anno_batch, filename = tf.train.shuffle_batch([image_0, image, anno, filename], batch_size=batch_size,\n capacity=128, min_after_dequeue=64, num_threads=4)\n\n # print(image_batch, anno_batch)\n image_0_batch, image_batch, anno_batch = augmentation_scale(image_0_batch, image_batch, anno_batch, mmin=0.5, mmax=2.0, type=type)\n return image_0_batch, image_batch, anno_batch, filename\n\nif __name__ == '__main__':\n BATCH_SIZE = 4\n image_0, image_batch, anno_batch, filename = read_batch(BATCH_SIZE, type='val')\n with tf.Session() as sess:\n sess.run(tf.global_variables_initializer())\n coord = tf.train.Coordinator()\n threads = tf.train.start_queue_runners(sess=sess, coord=coord)\n\n b_image_0, b_image, b_anno, b_filename = sess.run([image_0, image_batch, anno_batch, filename])\n '''\n print(b_image_0.shape)\n print(b_image.shape)\n print(b_anno.shape)\n print(b_filename)\n\n print(b_image_0)\n print(b_image)\n print(b_anno)\n print(np.unique(b_anno))\n '''\n print(np.unique(b_anno))\n for i in range(BATCH_SIZE):\n cv2.imwrite('test/%d_img.png'%i, b_image_0[i])\n cv2.imwrite('test/%d_img_2.png' % i, 255 * (0.5 + b_image[i]))\n cv2.imwrite('test/%d_anno.png' % i, 10*b_anno[i])\n\n coord.request_stop()\n\n coord.join(threads)","sub_path":"train_on_cityscape/input_data.py","file_name":"input_data.py","file_ext":"py","file_size_in_byte":8664,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"62850883","text":"def check_vowel(c):\n c = c.lower()\n vowels = 'aoeiu'\n if c in vowels:\n return True\n else:\n return False\n\n# def disemvowel(string):\n# i = 0\n# while i < len(string):\n# if check_vowel(string[i]):\n# string = string[:i] + string[i+1:]\n# else:\n# i += 1\n# return string\n\n\ndef disemvowel(string):\n vowels = 'aoeiuAOEIU'\n for c in vowels:\n string = string.replace(c, '')\n return string\n\n\nprint(disemvowel('This website is for losers LOL!'))\n","sub_path":"loops/031120 Strings/t1.py","file_name":"t1.py","file_ext":"py","file_size_in_byte":526,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"264419729","text":"# -*- encoding: utf-8 -*-\n##############################################################################\n#\n# Copyright (c) 2007 - 2009 Corvus Latinoamerica, C.A. (http://corvus.com.ve) All Rights Reserved\n#\n#\n# WARNING: This program as such is intended to be used by professional\n# programmers who take the whole responsability of assessing all potential\n# consequences resulting from its eventual inadequacies and bugs\n# End users who are looking for a ready-to-use solution with commercial\n# garantees and support are strongly adviced to contract a Free Software\n# Service Company\n#\n# This program is Free Software; you can redistribute it and/or\n# modify it under the terms of the GNU General Public License\n# as published by the Free Software Foundation; either version 2\n# of the License, or (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with this program; if not, write to the Free Software\n# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.\n#\n##############################################################################\n\nimport time\nimport locale\nimport wizard\nimport netsvc\nimport pooler\nimport tools\nfrom osv.orm import browse_record\nimport base64\nimport os.path\n\n#Formulario del Wizard - Datos Entrada------------------------------------------------------------------------------------------------\ndata_form = '''\n
\n\t\n\t\n\t\n\t\n\t\n'''\n\ndata_fields = {\n\t'fdata': {'string':'Archivo', 'type':'binary', 'filename':'fname','required':True },\n\t'fname': {'string':'Descripcion', 'type':'char', 'size':90 },\n}\n\n\n#Formulario del Wizard - Datos Salida-------------------------------------------------------------------------------------------------\n_result_form = '''\n
\n
\" + t + \"\"\n tweet_text.replace(t, new_text)\n return tweet_text\n","sub_path":"tweets/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":900,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"473756322","text":"import numpy as np\nimport matplotlib.pyplot as plt\nfrom sklearn import tree\nimport timeit\nfrom sklearn import cross_validation\nimport pandas as pd\nfrom scipy import stats\nfrom sklearn.ensemble import GradientBoostingClassifier\nfrom keras.models import Sequential\nfrom keras.layers.core import Dense, Activation\nfrom keras.optimizers import SGD\nfrom sklearn import svm\nfrom sklearn import neighbors\nfrom keras.utils.np_utils import to_categorical\nfrom sklearn.ensemble import AdaBoostClassifier\nfrom sklearn.tree import DecisionTreeClassifier\n\nseed = 7\nnp.random.seed(seed)\nlearning = np.genfromtxt('DataSet_1/winequality-red.csv', delimiter=';')\nlearning_in = learning[1:-1,:-1]\nlearning_out = learning[1:-1, -1]\nlearning_in_norm = stats.zscore(learning_in, axis=1, ddof=1)\n\nX_train, X_test_split, Y_train, Y_test_split = cross_validation.train_test_split(learning_in, learning_out, test_size=.4, random_state=0)\nX_CV, X_test, Y_CV, Y_test = cross_validation.train_test_split(X_test_split, Y_test_split, test_size=.5, random_state=0)\n\n\"\"\"\nOptimal Hyperparameters for each learning algorithm\n\"\"\"\n#Decision Tree\nDT_instances = 638\nDT_depth = 8\n\n#Boosted Decision Tree\nBDT_n_est = 60\nBDT_lr = 0.3\nBDT_depth = 8\n\n#Neural Network\nANN_lr = .85\nANN_m = .85\nANN_epochs = 200\n\n#SVM\nSVM_kernel = 'rbf'\nSVM_gamma = .1\n\n#KNN\nKNN_weights = 'distance'\nKNN_k = 49\n\n\"\"\"\nTesting the basic Decision Tree\n\"\"\"\nX_discard, X_train_step, Y_discard, Y_train_step = cross_validation.train_test_split(X_train, Y_train, test_size=DT_instances/X_train.shape[0], random_state=0)\n\nclf_tree = tree.DecisionTreeClassifier(max_depth=6)\nclf_tree = clf_tree.fit(X_train_step, Y_train_step)\nDT_train_error = 1-clf_tree.score(X_train, Y_train)\nDT_cv_error =1-clf_tree.score(X_CV, Y_CV)\nDT_test_error =1-clf_tree.score(X_test, Y_test)\n\nprint(\"The Basic Decision tree had a Training Error of: {0}\".format(DT_train_error))\nprint(\"The Basic Decision tree had a Cross-Validation Error of: {0}\".format(DT_cv_error))\nprint(\"The Basic Decision tree had a Testing Error of: {0}\".format(DT_test_error))\nprint('\\n')\n\"\"\"\nTesting the boosted Decision Tree,\na maximum depth of 2 nodes, a learning rate of .1, and using 16 estimators\n\"\"\"\nclf_tree = AdaBoostClassifier(DecisionTreeClassifier(max_depth=BDT_depth),\n algorithm=\"SAMME\",\n n_estimators=BDT_n_est, learning_rate=BDT_lr)\n# clf_tree = GradientBoostingClassifier(n_estimators=BDT_n_est, learning_rate=BDT_lr, max_depth=BDT_depth, random_state=0)\nclf_tree = clf_tree.fit(X_train_step, Y_train_step)\nBDT_test_error = 1-clf_tree.score(X_test, Y_test)\nBDT_train_error = 1-clf_tree.score(X_train, Y_train)\nBDT_cv_error = 1-clf_tree.score(X_CV, Y_CV)\n\nprint(\"The Boosted Decision tree had a Training Error of: {0}\".format(BDT_train_error))\nprint(\"The Boosted Decision tree had a Cross-Validation Error of: {0}\".format(BDT_cv_error))\nprint(\"The Boosted Decision tree had a Testing Error of: {0}\".format(BDT_test_error))\nprint('\\n')\n\n\nY_train1 = to_categorical(Y_train)\nY_test1 = to_categorical(Y_test)\nY_CV1 = to_categorical(Y_CV)\nmodel = Sequential()\nmodel.add(Dense(input_dim=learning_in.shape[1], output_dim=learning_in.shape[1]/2, init='uniform'))\nmodel.add(Activation('relu'))\nmodel.add(Dense(9, input_dim=learning_in.shape[1]/2, init='uniform'))\nmodel.add(Activation('sigmoid'))\nprint('compiling')\nmodel.compile(loss='categorical_crossentropy', optimizer='RMSprop',metrics=['accuracy'])\nmodel.fit(X_train, Y_train1, nb_epoch=ANN_epochs, validation_data=(X_CV, Y_CV1), batch_size=10, verbose=0)\nANN_train_error =1- model.evaluate(X_train, Y_train1, verbose=0)[1]\nANN_cv_error = 1-model.evaluate(X_CV, Y_CV1, verbose=0)[1]\nANN_test_error = 1-model.evaluate(X_test, Y_test1, verbose=0)[1]\n\nprint(\"The ANN had a Training Error of: {0}\".format(ANN_train_error))\nprint(\"The ANN had a Cross-Validation Error of: {0}\".format(ANN_cv_error))\nprint(\"The ANN had a Testing Error of: {0}\".format(ANN_test_error))\nprint('\\n')\n\n\"\"\"\nTesting the Support Vector Machine with a radial kernel\n\"\"\"\n\nclf_rad = svm.SVC(kernel=SVM_kernel, gamma=SVM_gamma)\nclf_rad.fit(X_train, Y_train)\nSVM_train_error = 1-clf_rad.score(X_train, Y_train)\nSVM_cv_error = 1-clf_rad.score(X_CV, Y_CV)\nSVM_test_error = 1-clf_rad.score(X_test, Y_test)\n\nprint(\"The SVM had a Training Error of: {0}\".format(SVM_train_error))\nprint(\"The SVM had a Cross-Validation Error of: {0}\".format(SVM_cv_error))\nprint(\"The SVM had a Testing Error of: {0}\".format(SVM_test_error))\nprint('\\n')\n\n\"\"\"\nTesting K-Nearest Neighbors with distance based weighing and 49 neighbors being considered\n\"\"\"\nknn_dist = neighbors.KNeighborsClassifier(n_neighbors=KNN_k, weights=KNN_weights)\nknn_dist.fit(X_train, Y_train)\nKNN_train_error = 1-knn_dist.score(X_train, Y_train)\nKNN_cv_error = 1-knn_dist.score(X_CV, Y_CV)\nKNN_test_error = 1-knn_dist.score(X_test, Y_test)\n\nprint(\"The KNN had a Training Error of: {0}\".format(KNN_train_error))\nprint(\"The KNN had a Cross-Validation Error of: {0}\".format(KNN_cv_error))\nprint(\"The KNN had a Testing Error of: {0}\".format(KNN_test_error))\nprint('\\n')\n\nfig = plt.figure()\nN = 5\nwidth = .35\nspace = .35\n\ntrain_errors = [DT_train_error, BDT_train_error, ANN_train_error, SVM_train_error, KNN_train_error]\ncv_errors = [DT_cv_error, BDT_cv_error, ANN_cv_error, SVM_cv_error, KNN_cv_error]\ntest_errors = [DT_test_error, BDT_test_error, ANN_test_error, SVM_test_error, KNN_test_error]\n\nind = np.arange(N)*2\nlabels = ['DT', 'BDT', 'ANN','SVM','KNN']\nposition = [1,3,5,7,9]\nplt.bar(ind, train_errors, width, color='r', label='Training Error')\nplt.bar(ind+width, cv_errors, width, color='g', label='Cross Validation Error')\nplt.bar(ind+2*width, test_errors, width, color='b', label='Test Error')\nplt.xticks(position,labels)\n#plt.bar(ind+3*width, 5*[0], width, color='b')\nplt.title('Errors for different models')\nplt.ylabel(\"Error\")\nplt.ylim(0,.8)\nplt.legend(loc=\"best\")\nfig.savefig('Plots/Errors for different models.png')\nplt.close()","sub_path":"Supervised learning/Supervised learning/data1/compare_all.py","file_name":"compare_all.py","file_ext":"py","file_size_in_byte":5961,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"166811215","text":"# -*- coding: utf-8 -*-\nimport datetime\nimport json\n\nimport tornado.web\n\nfrom .. import config\nfrom ..component import tradeapi, tradeapi_credit, helper\n\n\ndef base_filter(func):\n def wrapper(self, *args, **kwargs):\n screen_size = helper.get_screen_size()\n if screen_size != config.screen_size:\n self.write({'ret_code': -1, 'err_msg': 'screen size({}) not adapted'.format(screen_size)})\n return\n\n now = datetime.datetime.now()\n w_day = now.isoweekday()\n hour = now.hour\n minute = now.minute\n if w_day <= 5 and (hour < 15 and (hour > 9 or (hour == 9 and minute >= 15))):\n pass\n # self.write({'ret_code': -1, 'err_msg': ''})\n # return\n\n return func(self, *args, **kwargs)\n\n return wrapper\n\n\nclass BaseHandler(tornado.web.RequestHandler):\n def set_default_headers(self):\n # The value of the 'Access-Control-Allow-Origin' header in the response must not be the wildcard '*' when the request's credentials mode is 'include'. Origin 'http://localhost:4200' is therefore not allowed access. The credentials mode of requests initiated by the XMLHttpRequest is controlled by the withCredentials attribute.\n # self.set_header('Access-Control-Allow-Origin', 'http://kylin-ux.com:4200')\n #self.set_header('Access-Control-Allow-Origin', 'http://localhost:4200')\n self.set_header('Access-Control-Allow-Origin', '*')\n # self.set_header(\"Access-Control-Allow-Headers\", \"Access-Control-Allow-Headers, Origin,Accept, X-Requested-With, Content-Type, Access-Control-Request-Method, Access-Control-Request-Headers\")\n self.set_header(\"Access-Control-Allow-Headers\", '*')\n self.set_header('Access-Control-Allow-Methods', 'POST, PUT, GET, OPTIONS')\n # set-cookie in response not set for Angular2 post request\n # because CORS\n # return this.http.post('http://localhost:8888/api/query_mshopbook_init_data', JSON.stringify({}), { withCredentials: true })\n # The value of the 'Access-Control-Allow-Credentials' header in the response is 'True' which must be 'true' when the request's credentials mode is 'include'. Origin 'http://localhost:4200' is therefore not allowed access. The credentials mode of requests initiated by the XMLHttpRequest is controlled by the withCredentials attribute.\n # self.set_header('Access-Control-Allow-Credentials', 'true')\n\n def options(self):\n # no body\n self.set_status(204)\n self.finish()\n\n \nclass MainHandler(BaseHandler):\n def get(self):\n self.write(\"Hello, world\")\n\n def post(self):\n self.write({})\n\n\nclass OrderHandler(BaseHandler):\n def get(self):\n self.write(\"Hello, world\")\n \n def post(self):\n param = self.request.body.decode('utf-8')\n param = json.loads(param)\n code = param['code']\n direct = param['direct']\n price = param['price']\n count = param['count']\n auto = param['auto']\n\n account_id = param['account_id']\n if account_id == config.ACCOUNT_ID_PT:\n tradeapi.order(direct, code, count, price, auto=auto)\n else:\n op_type = param['op_type']\n tradeapi_credit.order(op_type, direct, code, count, price, auto=auto)\n self.write({})\n\n\nclass WithdrawHandler(BaseHandler):\n def get(self):\n self.write(\"Hello, world\")\n\n def post(self):\n param = self.request.body.decode('utf-8')\n param = json.loads(param)\n direct = param['direct']\n\n tradeapi.withdraw(direct)\n\n self.write({})\n\n\nclass QueryWithdrawOrderHandler(BaseHandler):\n def get(self):\n self.write(\"Hello, world\")\n\n def post(self):\n param = self.request.body.decode('utf-8')\n param = json.loads(param)\n account_id = param['account_id']\n if account_id == config.ACCOUNT_ID_PT:\n order_list = tradeapi.get_order()\n else:\n order_list = tradeapi_credit.get_order()\n self.write(json.dumps(order_list))\n\n\nclass QueryMoneyHandler(BaseHandler):\n def get(self):\n self.write(\"Hello, world\")\n\n @base_filter\n def post(self):\n param = self.request.body.decode('utf-8')\n param = json.loads(param)\n account_id = param['account_id']\n if account_id == config.ACCOUNT_ID_PT:\n r = tradeapi.get_asset()\n else:\n r = tradeapi_credit.get_asset()\n self.write(r)\n\n\nclass QueryPositionHandler(BaseHandler):\n def get(self):\n self.write(\"Hello, world\")\n\n @base_filter\n def post(self):\n param = self.request.body.decode('utf-8')\n print(param)\n param = json.loads(param)\n if 'code' in param and param['code']:\n code = param['code']\n else:\n code = None\n\n account_id = param['account_id']\n if account_id == config.ACCOUNT_ID_PT:\n position = tradeapi.query_position(code)\n else:\n position = tradeapi_credit.query_position(code)\n\n self.write(json.dumps(position))\n\n\nclass QueryOperationDetailHandler(BaseHandler):\n def get(self):\n self.write(\"Hello, world\")\n\n @base_filter\n def post(self):\n param = self.request.body.decode('utf-8')\n print(param)\n param = json.loads(param)\n if 'code' in param and param['code']:\n code = param['code']\n else:\n code = None\n\n account_id = param['account_id']\n if account_id == config.ACCOUNT_ID_PT:\n detail_list = tradeapi.get_operation_detail(code)\n else:\n detail_list = tradeapi_credit.get_operation_detail(code)\n self.write(json.dumps(detail_list))\n\n\nclass QueryTodayOrderHandler(BaseHandler):\n def get(self):\n self.write(\"Hello, world\")\n\n @base_filter\n def post(self):\n param = self.request.body.decode('utf-8')\n print(param)\n param = json.loads(param)\n if 'code' in param and param['code']:\n code = param['code']\n else:\n code = None\n\n account_id = param['account_id']\n if account_id == config.ACCOUNT_ID_PT:\n detail_list = tradeapi.get_operation_detail(code)\n else:\n detail_list = tradeapi_credit.get_today_order(code)\n self.write(json.dumps(detail_list))\n\n\nclass QueryTradeSignalHandler(BaseHandler):\n def get(self):\n self.write(\"Hello, world\")\n\n def post(self):\n param = self.request.body.decode('utf-8')\n print(param)\n param = json.loads(param)\n\n if 'date' in param and param['date']:\n date = datetime.datetime.strptime(param['date'], '%Y-%m-%d %H:%M:%S')\n else:\n date = None\n\n from pointor import signal\n from config import config\n supplemental_signal_path = config.supplemental_signal_path\n period = 'day'\n supplemental_signal = signal.get_supplemental_signal(supplemental_signal_path, period)\n\n signal_list = []\n from trade_manager import trade_manager\n position_list = trade_manager.query_current_position()\n position_list = [position.code for position in position_list]\n now = datetime.datetime.now() - datetime.timedelta(minutes=5)\n for signal_dict in supplemental_signal:\n if signal_dict['code'] not in position_list:\n continue\n if not date:\n date = now\n\n if signal_dict['date'] <= date:\n continue\n\n if not signal_dict['price']:\n signal_dict['price'] = 0\n signal_list.append(signal_dict)\n\n from util.util import DateEncoder\n res = json.dumps(signal_list, indent=2, cls=DateEncoder)\n\n self.write(res)\n","sub_path":"server/business/main_handler.py","file_name":"main_handler.py","file_ext":"py","file_size_in_byte":7818,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"29135312","text":"import os\nimport logging\nfrom queue import Queue\nfrom builtins import str\nfrom future import standard_library\n\nfrom pymesos import MesosSchedulerDriver, Scheduler\nfrom addict import Dict\n\nfrom airflow import configuration\nfrom airflow.models import DagPickle\nfrom airflow.executors.base_executor import BaseExecutor\nfrom airflow.settings import Session\nfrom airflow.utils.state import State\nfrom airflow.exceptions import AirflowException\nfrom airflow.utils.operator_resources import (\n ScalarResource, TextResource\n)\n\nstandard_library.install_aliases()\n\nDEFAULT_FRAMEWORK_NAME = 'Airflow'\nFRAMEWORK_CONNID_PREFIX = 'mesos_framework_'\n\n\ndef get_framework_name():\n if not configuration.get('mesos', 'FRAMEWORK_NAME'):\n return DEFAULT_FRAMEWORK_NAME\n return configuration.get('mesos', 'FRAMEWORK_NAME')\n\n\ndef copy_env_var(command, env_var_name):\n if not isinstance(command.environment.variables, list):\n command.environment.variables = []\n command.environment.variables.append(\n dict(name=env_var_name, value=os.getenv(env_var_name, ''))\n )\n\n\ndef offer_suitable(task_instance, cpus=0, mem=0, offerOrgIds=[]):\n is_suitable = True\n if task_instance.resources.cpu.value > cpus:\n logging.debug(\"offer doesn't have enough cpu\")\n is_suitable = False\n if task_instance.resources.ram.value > mem:\n logging.debug(\"offer doesn't have enough mem\")\n is_suitable = False\n if not hasattr(task_instance.resources, 'organizationId'):\n logging.info(\"task_instance doesn't have organizationId\")\n return False\n if task_instance.resources.organizationId.value not in offerOrgIds:\n logging.debug(\"offer doesn't have organizationId\")\n is_suitable = False\n return is_suitable\n\n# AirflowMesosScheduler, implements Mesos Scheduler interface\n# To schedule airflow jobs on mesos\n\n\nclass AirflowMesosScheduler(Scheduler):\n \"\"\"\n Airflow Mesos scheduler implements mesos scheduler interface\n to schedule airflow tasks on mesos.\n Basically, it schedules a command like\n 'airflow run --local -p='\n to run on a mesos slave.\n \"\"\"\n\n def __init__(self,\n task_queue,\n result_queue):\n self.task_queue = task_queue\n self.result_queue = result_queue\n self.task_counter = 0\n self.task_key_map = {}\n\n def registered(self, driver, frameworkId, masterInfo):\n logging.info(\"AirflowScheduler registered to mesos with framework ID %s\", frameworkId.value)\n\n if configuration.getboolean('mesos', 'CHECKPOINT') and configuration.get('mesos', 'FAILOVER_TIMEOUT'):\n # Import here to work around a circular import error\n from airflow.models import Connection\n\n # Update the Framework ID in the database.\n session = Session()\n conn_id = FRAMEWORK_CONNID_PREFIX + get_framework_name()\n connection = Session.query(Connection).filter_by(conn_id=conn_id).first()\n if connection is None:\n connection = Connection(conn_id=conn_id, conn_type='mesos_framework-id',\n extra=frameworkId.value)\n else:\n connection.extra = frameworkId.value\n\n session.add(connection)\n session.commit()\n Session.remove()\n\n def reregistered(self, driver, masterInfo):\n logging.info(\"AirflowScheduler re-registered to mesos\")\n\n def disconnected(self, driver):\n # TODO: exponential back off(?)\n # TODO: use many scheduler connections as a connection pool\n self.start()\n logging.info(\"AirflowScheduler disconnected from mesos\")\n\n def offerRescinded(self, driver, offerId):\n logging.info(\"AirflowScheduler offer %s rescinded\", str(offerId))\n\n def frameworkMessage(self, driver, executorId, slaveId, message):\n logging.info(\"AirflowScheduler received framework message %s\", message)\n\n def executorLost(self, driver, executorId, slaveId, status):\n logging.warning(\"AirflowScheduler executor %s lost\", str(executorId))\n\n def slaveLost(self, driver, slaveId):\n logging.warning(\"AirflowScheduler slave %s lost\", str(slaveId))\n\n def error(self, driver, message):\n logging.error(\"AirflowScheduler driver aborted %s\", message)\n raise AirflowException(\"AirflowScheduler driver aborted %s\" % message)\n\n def resourceOffers(self, driver, offers):\n logging.debug(\"got offers: {}\".format(offers))\n logging.debug(\"task_counter: {}\".format(self.task_counter))\n logging.debug(\"task_key_map: {}\".format(self.task_key_map))\n logging.debug(\"task_queue.qsize: {}\".format(self.task_queue.qsize()))\n\n for offer in offers:\n tasks = []\n offerCpus = 0\n offerMem = 0\n offerOrgIds = []\n # TODO: check disk\n for resource in offer.resources:\n if resource.name == \"cpus\":\n offerCpus += resource.scalar.value\n elif resource.name == \"mem\":\n offerMem += resource.scalar.value\n offerOrgIds = [attr.text.value for attr in offer.attributes if attr.name == 'organizationId']\n\n logging.info(\n \"Received offer {} with cpus: {} and mem: {} and organizationIds: {}\".format(\n offer.id.value, offerCpus, offerMem, offerOrgIds))\n\n remainingCpus = offerCpus\n remainingMem = offerMem\n\n if self.task_queue.empty():\n logging.debug(\"task_queue is empty\")\n\n while (not self.task_queue.empty()):\n key, cmd, task_instance = self.task_queue.get()\n # validate resource offers\n if not offer_suitable(task_instance, remainingCpus, remainingMem, offerOrgIds):\n # if not suitable, put task back on the queue\n logging.info(\"offer not suitable for {}\".format(key))\n logging.info(\"task_instance.resources={}\".format(task_instance.resources))\n logging.info(\"putting {} back into task_queue\".format(key))\n self.task_queue.put((key, cmd, task_instance))\n break\n tid = self.task_counter\n self.task_counter += 1\n self.task_key_map[str(tid)] = (key, cmd, task_instance)\n\n logging.info(\"Launching task %d using offer %s\", tid, offer.id.value)\n\n task = Dict()\n task.task_id.value = str(tid)\n task.agent_id.value = offer.agent_id.value\n task.name = \"AirflowTask %d\" % tid\n task.resources = [\n dict(name=\"cpus\", type=\"SCALAR\", scalar={\"value\": task_instance.resources.cpu.value}),\n dict(name=\"mem\", type=\"SCALAR\", scalar={\"value\": task_instance.resources.ram.value}),\n ]\n\n container = Dict()\n container.type = \"DOCKER\"\n container.volumes = [\n dict(host_path=\"/airflow_home/logs\", container_path=\"/airflow_home/logs\", mode=\"RW\"),\n dict(host_path=\"/var/run/docker.sock\", container_path=\"/var/run/docker.sock\", mode=\"RW\"),\n ]\n\n docker = Dict()\n docker.image = os.getenv(\"DOCKER_AIRFLOW_IMAGE_TAG\", \"astronomerio/airflow\")\n docker.force_pull_image = True\n\n container.docker = docker\n task.container = container\n\n command = Dict()\n command.value = cmd\n\n # Copy some environment vars from scheduler to execution docker container\n copy_env_var(command, \"AIRFLOW__CORE__SQL_ALCHEMY_CONN\")\n copy_env_var(command, \"AIRFLOW__SMTP__SMTP_HOST\")\n copy_env_var(command, \"AIRFLOW__SMTP__SMTP_STARTTLS\")\n copy_env_var(command, \"AIRFLOW__SMTP__SMTP_SSL\")\n copy_env_var(command, \"AIRFLOW__SMTP__SMTP_USER\")\n copy_env_var(command, \"AIRFLOW__SMTP__SMTP_PORT\")\n copy_env_var(command, \"AIRFLOW__SMTP__SMTP_PASSWORD\")\n copy_env_var(command, \"AIRFLOW__SMTP__SMTP_MAIL_FROM\")\n copy_env_var(command, \"AWS_ACCESS_KEY_ID\")\n copy_env_var(command, \"AWS_SECRET_ACCESS_KEY\")\n\n task.command = command\n tasks.append(task)\n remainingCpus -= task_instance.resources.cpu.value\n remainingMem -= task_instance.resources.ram.value\n\n logging.info(\"Offer {} is launching tasks: {}\".format(offer.id.value, tasks))\n driver.launchTasks(offer.id, tasks)\n\n def statusUpdate(self, driver, update):\n logging.info(\"Task %s is in state %s, data %s\",\n update.task_id.value, update.state, update)\n\n try:\n key, cmd, task_instance = self.task_key_map[update.task_id.value]\n except KeyError:\n # The map may not contain an item if the framework re-registered after a failover.\n # Discard these tasks.\n logging.warn(\"Unrecognised task key %s\" % update.task_id.value)\n return\n\n # XXX: Sometimes we get into a situation where task_queue.task_done()\n # throws errors. Could be due to some unhandled event we should be taking\n # care of somewhere else. Less likely, could be due to an issue where Queue.put isn't\n # properly locking. Either way, just ignore for now.\n try:\n if update.state == \"TASK_FINISHED\":\n self.result_queue.put((key, State.SUCCESS))\n self.task_queue.task_done()\n del self.task_key_map[update.task_id.value]\n return\n\n if update.state == \"TASK_LOST\" or \\\n update.state == \"TASK_KILLED\" or \\\n update.state == \"TASK_FAILED\":\n self.result_queue.put((key, State.FAILED))\n self.task_queue.task_done()\n return\n if update.state == \"TASK_ERROR\":\n # catch potential race condition between airflow and the rest of the\n # frameworks in mesos\n # potentially not needed depending on how the offers actually work\n # i.e. if offers are sent out to all frameworks at once or one by one\n if 'more than available' in update.message:\n self.task_queue.task_done()\n del self.task_key_map[update.task_id.value]\n self.task_queue.put((key, cmd, task_instance))\n else:\n logging.info('unhandled TASK_ERROR state: {}'.format(update.message))\n self.result_queue.put((key, State.FAILED))\n self.task_queue.task_done()\n del self.task_key_map[update.task_id.value]\n except ValueError:\n logging.warn(\"Error marking task_done\")\n\n\nclass AstronomerMesosExecutor(BaseExecutor):\n \"\"\"\n MesosExecutor allows distributing the execution of task\n instances to multiple mesos workers.\n\n Apache Mesos is a distributed systems kernel which abstracts\n CPU, memory, storage, and other compute resources away from\n machines (physical or virtual), enabling fault-tolerant and\n elastic distributed systems to easily be built and run effectively.\n See http://mesos.apache.org/\n \"\"\"\n\n def __init__(self, mesos_driver=None):\n super().__init__()\n self.task_queue = Queue()\n self.result_queue = Queue()\n self._mesos_driver = mesos_driver\n\n @property\n def mesos_driver(self):\n \"\"\"\n Lazily instantiates the Mesos scheduler driver if one was not injected in\n via the constructor\n \"\"\"\n if self._mesos_driver is None:\n framework = Dict()\n framework.user = 'core'\n\n if not configuration.get('mesos', 'MASTER'):\n logging.error(\"Expecting mesos master URL for mesos executor\")\n raise AirflowException(\"mesos.master not provided for mesos executor\")\n\n master = configuration.get('mesos', 'MASTER')\n\n framework.name = get_framework_name()\n\n if configuration.getboolean('mesos', 'CHECKPOINT'):\n framework.checkpoint = True\n\n if configuration.get('mesos', 'FAILOVER_TIMEOUT'):\n # Import here to work around a circular import error\n from airflow.models import Connection\n\n # Query the database to get the ID of the Mesos Framework, if available.\n conn_id = FRAMEWORK_CONNID_PREFIX + framework.name\n session = Session()\n connection = session.query(Connection).filter_by(conn_id=conn_id).first()\n if connection is not None:\n # Set the Framework ID to let the scheduler reconnect with running tasks.\n framework.id.value = connection.extra\n\n framework.failover_timeout = configuration.getint('mesos', 'FAILOVER_TIMEOUT')\n else:\n framework.checkpoint = False\n\n logging.info('MesosFramework master : %s, name : %s, checkpoint : %s',\n master, framework.name, str(framework.checkpoint))\n\n if configuration.getboolean('mesos', 'AUTHENTICATE'):\n if not configuration.get('mesos', 'DEFAULT_PRINCIPAL'):\n logging.error(\"Expecting authentication principal in the environment\")\n raise AirflowException(\"mesos.default_principal not provided in authenticated mode\")\n if not configuration.get('mesos', 'DEFAULT_SECRET'):\n logging.error(\"Expecting authentication secret in the environment\")\n raise AirflowException(\"mesos.default_secret not provided in authenticated mode\")\n\n principal = configuration.get('mesos', 'DEFAULT_PRINCIPAL')\n secret = configuration.get('mesos', 'DEFAULT_SECRET')\n\n framework.principal = credential.principal\n\n self._mesos_driver = MesosSchedulerDriver(\n AirflowMesosScheduler(self.task_queue, self.result_queue),\n framework,\n master,\n use_addict=True,\n principal=principal,\n secret=secret)\n else:\n framework.principal = 'Airflow'\n self._mesos_driver = MesosSchedulerDriver(\n AirflowMesosScheduler(self.task_queue, self.result_queue),\n framework,\n master,\n use_addict=True)\n return self._mesos_driver\n\n def start(self):\n self.mesos_driver.start()\n\n def execute_async(self, key, command, queue=None):\n logging.info('placing task on queue: %s %s', key, command)\n pickle_id = self.pickle_from_command(command)\n if pickle_id is not None:\n dag_pickle = self.find_pickle(pickle_id)\n if dag_pickle is not None:\n dag = dag_pickle.pickle\n dag_id, task_id, execution_date = key\n task_instance = dag.get_task(task_id=task_id)\n logging.debug('Have matching task %s', task_instance)\n logging.debug('requires resources: %s', task_instance.resources)\n # query dag_pickle table with pickle from command\n # get the task matching key off dag.tasks\n self.task_queue.put((key, command, task_instance))\n\n def sync(self):\n while not self.result_queue.empty():\n results = self.result_queue.get()\n self.change_state(*results)\n\n def end(self):\n self.task_queue.join()\n self.mesos_driver.stop()\n\n def pickle_from_command(self, command):\n\n from airflow.bin.cli import get_parser\n parser = get_parser()\n strip_airflow = command[len('airflow '):]\n args = parser.parse_args(strip_airflow.split())\n if hasattr(args, 'pickle'):\n return args.pickle\n\n def find_pickle(self, pickle_id):\n session = Session()\n logging.debug(f'Loading pickle id {pickle_id}')\n dag_pickle = session.query(\n DagPickle).filter(DagPickle.id == pickle_id).first()\n return dag_pickle\n","sub_path":"airflow_home/plugins/executors/astronomer_mesos_executor.py","file_name":"astronomer_mesos_executor.py","file_ext":"py","file_size_in_byte":16525,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"438884414","text":"import data_utils\nfrom cnn import Net\nimport torch\nimport torchvision\nimport matplotlib.pyplot as plt\nimport numpy as np\n\n\n# 输出图像的函数\ndef imshow(img):\n img = img / 2 + 0.5 # unnormalize\n npimg = img.numpy()\n plt.imshow(np.transpose(npimg, (1, 2, 0)))\n plt.show()\n\n\n# get datasets\nPATH = './weights/cnn_weights.pth'\ntest_loader = data_utils.get_test_loader()\nnet = Net()\nnet.load_state_dict(torch.load(PATH))\nclasses = ('plane', 'car', 'bird', 'cat',\n 'deer', 'dog', 'frog', 'horse', 'ship', 'truck')\n\nclass_correct = list(0. for i in range(10))\nclass_total = list(0. for i in range(10))\nwith torch.no_grad():\n for data in test_loader:\n images, labels = data\n outputs = net(images)\n _, predicted = torch.max(outputs, 1)\n c = (predicted == labels).squeeze()\n for i in range(4):\n label = labels[i]\n class_correct[label] += c[i].item()\n class_total[label] += 1\n\nfor i in range(10):\n print('Accuracy of %5s : %2d %%' % (\n classes[i], 100 * class_correct[i] / class_total[i]))\n\ndataiter = iter(test_loader)\nimages, labels = dataiter.next()\n\n# 输出图片\nimshow(torchvision.utils.make_grid(images))\nprint('GroundTruth: ', ' '.join('%5s' % classes[labels[j]] for j in range(4)))\n\noutputs = net(images)\n_, predicted = torch.max(outputs, 1)\n\nprint('Predicted: ', ' '.join('%5s' % classes[predicted[j]] for j in range(4)))\n","sub_path":"test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":1433,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"210601269","text":"import numpy as np \nimport matplotlib.pyplot as plt\nfrom scipy.integrate import odeint \n\nclass maladie :\n def __init__(self,R,pi,mu) :\n self.R = R\n self.beta = R*mu\n self.pi = pi \n self.mu = mu \n#definition des fonctions usuelles inutiles\n def get_beta(self) :\n return self.beta\n\n def get_pi(self) :\n return self.pi\n\n def get_mu(self) :\n return self.mu\n\n def get_R(self) :\n return self.R\n \nclass env_total: \n def __init__(self,NN1,NN2,NN3,NN4,virus,matrice_influence,S01,U01,P01,R0_U1,R0_P1,S02,U02,P02,R0_U2,R0_P2,S03,U03,P03,R0_U3,R0_P3,S04,U04,P04,R0_U4,R0_P4) :\n self.population = NN1+NN2+NN3+NN4\n self.pi = virus.pi \n self.mu = virus.mu \n self.virus = virus \n self.beta1 = virus.beta/NN1\n self.beta2 = virus.beta/NN2\n self.beta3 = virus.beta/NN3\n self.beta4 = virus.beta/NN4\n self.influence_1_2 = matrice_influence[0][1]\n self.influence_1_3 = matrice_influence[0][2]\n self.influence_1_4 = matrice_influence[0][3]\n self.influence_2_1 = matrice_influence[1][0]\n self.influence_2_3 = matrice_influence[1][2]\n self.influence_2_4 = matrice_influence[1][3]\n self.influence_3_1 = matrice_influence[2][0]\n self.influence_3_2 = matrice_influence[2][1]\n self.influence_3_4 = matrice_influence[2][3]\n self.influence_4_1 = matrice_influence[3][0]\n self.influence_4_2 = matrice_influence[3][1]\n self.influence_4_3 = matrice_influence[3][2]\n self.S1 = S01\n self.S2 = S02\n self.S3 = S03\n self.S4 = S04\n self.U1 = U01\n self.U2 = U02\n self.U3 = U03\n self.U4 = U04\n self.P1 = P01\n self.P2 = P02\n self.P3 = P03\n self.P4 = P04\n self.R_U1 = R0_U1\n self.R_U2 = R0_U2\n self.R_U3 = R0_U3\n self.R_U4 = R0_U4\n self.R_P1 = R0_P1\n self.R_P2 = R0_P2\n self.R_P3 = R0_P3\n self.R_P4 = R0_P4\n self.NN1= NN1 \n self.NN2=NN2\n self.NN3=NN3\n self.NN4=NN4\n self.history = [[S01,U01,P01,R0_U1,R0_P1],[S02,U02,P02,R0_U2,R0_P2],[S03,U03,P03,R0_U3,R0_P3],[S04,U04,R0_U4,R0_P4,P04]]\n\n def je_veux_juste_le_lendemain (self,prop_test1,prop_test2,prop_test3,prop_test4) :\n test1 = prop_test1 * self.NN1\n test2 = prop_test2 * self.NN2\n test3 = prop_test3 * self.NN3\n test4 = prop_test4 * self.NN4\n def systeme_diff (vecteur_condition_initiale,t) :\n S01,U01,P01,R0_U1,R0_P1,S02,U02,P02,R0_U2,R0_P2,S03,U03,P03,R0_U3,R0_P3,S04,U04,P04,R0_U4,R0_P4 = vecteur_condition_initiale\n dS1 = - self.beta1 * S01 * (U01 + (1-self.virus.pi)*P01)\n dS2 = - self.beta2 * S02 * (U02 + (1-self.virus.pi)*P02)\n dS3 = - self.beta3 * S03 * (U03 + (1-self.virus.pi)*P03)\n dS4 = - self.beta4 * S04 * (U04 + (1-self.virus.pi)*P04) \n dR_U1 = self.virus.mu * U01 \n dR_U2 = self.virus.mu * U02 \n dR_U3 = self.virus.mu * U03 \n dR_U4 = self.virus.mu * U04 \n dR_P1 = self.virus.mu * P01\n dR_P2 = self.virus.mu * P02\n dR_P3 = self.virus.mu * P03\n dR_P4 = self.virus.mu * P04\n dP1 = (test1)*((U01/(U01+R0_U1+S01))) - self.virus.mu * P01\n dP2 = (test2)*((U02/(U02+R0_U2+S02))) - self.virus.mu * P02\n dP3 = (test3)*((U03/(U03+R0_U3+S03))) - self.virus.mu * P03\n dP4 = (test4)*((U04/(U04+R0_U4+S04))) - self.virus.mu * P04\n dU1 = -dS1 - self.virus.mu*U01 - (test1)*(U01/(U01+R0_U1+S01)) + U02 * self.influence_1_2* self.beta2 + U03 * self.influence_1_3 * self.beta3 + U04 * self.influence_1_4 *self.beta4\n dU2 = -dS2 - self.virus.mu*U02 - (test2)*(U02/(U02+R0_U2+S02)) + U01 * self.influence_2_1 * self.beta1 + U03 * self.influence_2_3 *self.beta3+ U04 * self.influence_2_4*self.beta4 \n dU3 = -dS3 - self.virus.mu*U03 - (test3)*(U03/(U03+R0_U3+S03)) + U01 * self.influence_3_1 * self.beta1+ U02 * self.influence_3_2 * self.beta2 + U04 * self.influence_3_4*self.beta4\n dU4 = -dS4 - self.virus.mu*U04 - (test4)*(U04/(U04+R0_U4+S04)) + U01 * self.influence_4_1 * self.beta1+ U02 * self.influence_4_2 * self.beta2 + U03 * self.influence_4_3*self.beta3 \n return [dS1,dU1,dP1,dR_U1,dR_P1,dS2,dU2,dP2,dR_U2,dR_P2,dS3,dU3,dP3,dR_U3,dR_P3,dS4,dU4,dP4,dR_U4,dR_P4]\n T = np.arange(0,100,1)\n data = odeint (systeme_diff,[self.S1,self.U1,self.P1,self.R_U1,self.R_P1,self.S2,self.U2,self.P2,self.R_U2,self.R_P2,self.S3,self.U3,self.P3,self.R_U3,self.R_P3,self.S4,self.U4,self.P4,self.R_U4,self.R_P4],T)\n # Actualisation du milion et demi de valeur !!\n self.S1 = data[1,0] \n self.U1 = data[1,1]\n self.P1 = data[1,2]\n self.R1_U = data[1,3]\n self.R1_P = data[1,4]\n self.history[0][0].append(self.S1)\n self.history[0][1].append(self.U1)\n self.history[0][2].append(self.P1)\n self.history[0][3].append(self.R1_U)\n self.history[0][4].append(self.R1_P)\n\n self.S2 = data[1,5] \n self.U2 = data[1,6]\n self.P2 = data[1,7]\n self.R2_U = data[1,8]\n self.R2_P = data[1,9]\n self.history[1][0].append(self.S2)\n self.history[1][1].append(self.U2)\n self.history[1][2].append(self.P2)\n self.history[1][3].append(self.R2_U)\n self.history[1][4].append(self.R2_P)\n\n self.S3 = data[1,10] \n self.U3 = data[1,11]\n self.P3 = data[1,12]\n self.R3_U = data[1,13]\n self.R3_P = data[1,14]\n self.history[2][0].append(self.S3)\n self.history[2][1].append(self.U3)\n self.history[2][2].append(self.P3)\n self.history[2][3].append(self.R3_U)\n self.history[2][4].append(self.R3_P)\n\n self.S4 = data[1,15] \n self.U4 = data[1,16]\n self.P4 = data[1,17]\n self.R4_U = data[1,18]\n self.R4_P = data[1,19]\n self.history[3][0].append(self.S4)\n self.history[3][1].append(self.U4)\n self.history[3][2].append(self.P4)\n self.history[3][3].append(self.R4_U)\n self.history[3][4].append(self.R4_P)\n\n def graphe_sur_charge(self,prop_test1,prop_test2,prop_test3,prop_test4) :\n test1 = prop_test1 * self.NN1\n test2 = prop_test2 * self.NN2\n test3 = prop_test3 * self.NN3\n test4 = prop_test4 * self.NN4\n def systeme_diff (vecteur_condition_initiale,t) :\n S01,U01,P01,R0_U1,R0_P1,S02,U02,P02,R0_U2,R0_P2,S03,U03,P03,R0_U3,R0_P3,S04,U04,P04,R0_U4,R0_P4 = vecteur_condition_initiale\n dS1 = - self.beta1 * S01 * (U01 + (1-self.virus.pi)*P01)\n dS2 = - self.beta2 * S02 * (U02 + (1-self.virus.pi)*P02)\n dS3 = - self.beta3 * S03 * (U03 + (1-self.virus.pi)*P03)\n dS4 = - self.beta4 * S04 * (U04 + (1-self.virus.pi)*P04) \n dR_U1 = self.virus.mu * U01 \n dR_U2 = self.virus.mu * U02 \n dR_U3 = self.virus.mu * U03 \n dR_U4 = self.virus.mu * U04 \n dR_P1 = self.virus.mu * P01\n dR_P2 = self.virus.mu * P02\n dR_P3 = self.virus.mu * P03\n dR_P4 = self.virus.mu * P04\n dP1 = (test1)*((U01/(U01+R0_U1+S01))) - self.virus.mu * P01\n dP2 = (test2)*((U02/(U02+R0_U2+S02))) - self.virus.mu * P02\n dP3 = (test3)*((U03/(U03+R0_U3+S03))) - self.virus.mu * P03\n dP4 = (test4)*((U04/(U04+R0_U4+S04))) - self.virus.mu * P04\n dU1 = -dS1 - self.virus.mu*U01 - (test1)*(U01/(U01+R0_U1+S01)) + U02 * self.influence_1_2* self.beta2 + U03 * self.influence_1_3 * self.beta3 + U04 * self.influence_1_4 *self.beta4\n dU2 = -dS2 - self.virus.mu*U02 - (test2)*(U02/(U02+R0_U2+S02)) + U01 * self.influence_2_1 * self.beta1 + U03 * self.influence_2_3 *self.beta3+ U04 * self.influence_2_4*self.beta4 \n dU3 = -dS3 - self.virus.mu*U03 - (test3)*(U03/(U03+R0_U3+S03)) + U01 * self.influence_3_1 * self.beta1+ U02 * self.influence_3_2 * self.beta2 + U04 * self.influence_3_4*self.beta4\n dU4 = -dS4 - self.virus.mu*U04 - (test4)*(U04/(U04+R0_U4+S04)) + U01 * self.influence_4_1 * self.beta1+ U02 * self.influence_4_2 * self.beta2 + U03 * self.influence_4_3*self.beta3 \n return [dS1,dU1,dP1,dR_U1,dR_P1,dS2,dU2,dP2,dR_U2,dR_P2,dS3,dU3,dP3,dR_U3,dR_P3,dS4,dU4,dP4,dR_U4,dR_P4]\n T = np.arange(0,100,1)\n data = odeint (systeme_diff,[self.S1,self.U1,self.P1,self.R_U1,self.R_P1,self.S2,self.U2,self.P2,self.R_U2,self.R_P2,self.S3,self.U3,self.P3,self.R_U3,self.R_P3,self.S4,self.U4,self.P4,self.R_U4,self.R_P4],T)\n # On extrait les données pour préparer la représentation graphique \n s1 = data[:,0] \n u1 = data[:,1]\n p1 = data[:,2]\n r_u1 = data[:,3] \n r_p1 = data[:,4] \n\n s2 = data[:,5] \n u2 = data[:,6]\n p2 = data[:,7]\n r_u2 = data[:,8] \n r_p2 = data[:,9] \n\n s3 = data[:,10] \n u3 = data[:,11]\n p3 = data[:,12]\n r_u3 = data[:,13] \n r_p3 = data[:,14] \n\n s4 = data[:,15] \n u4 = data[:,16]\n p4 = data[:,17]\n r_u4 = data[:,18] \n r_p4 = data[:,19] \n \n # Création des graphiques\n plt.figure(1)\n plt.suptitle(\"Simulation d'une épidémie dans un environnement de 4 régions connectée\")\n\n plt.subplot(2,2,1)\n plt.plot(T,s1, color = 'blue')\n plt.plot(T,u1, color = 'red')\n plt.plot(T,p1, color = 'yellow')\n plt.plot(T,r_u1, color = 'green')\n plt.plot(T,r_p1, color = 'violet')\n plt.title(\"Région 1 Jeune : beta = \"+ str(self.virus.beta)+\"/ pi = \"+ str(self.virus.pi) + \"/ mu = \"+ str(self.virus.mu))\n \n plt.subplot(2,2,2)\n plt.plot(T,s2, color = 'blue')\n plt.plot(T,u2, color = 'red')\n plt.plot(T,p2, color = 'yellow')\n plt.plot(T,r_u2, color = 'green')\n plt.plot(T,r_p2, color = 'violet')\n plt.title(\"Région 1 Vieux : beta = \"+ str(self.virus.beta)+\"/ pi = \"+ str(self.virus.pi) + \"/ mu = \"+ str(self.virus.mu))\n\n plt.subplot(2,2,3)\n plt.plot(T,s3, color = 'blue')\n plt.plot(T,u3, color = 'red')\n plt.plot(T,p3, color = 'yellow')\n plt.plot(T,r_u3, color = 'green')\n plt.plot(T,r_p3, color = 'violet')\n plt.title(\"Région 2 Jeune : beta = \"+ str(self.virus.beta)+\"/ pi = \"+ str(self.virus.pi) + \"/ mu = \"+ str(self.virus.mu))\n plt.subplot(2,2,4)\n plt.plot(T,s4, color = 'blue')\n plt.plot(T,u4, color = 'red')\n plt.plot(T,p4, color = 'yellow')\n plt.plot(T,r_u4, color = 'green')\n plt.plot(T,r_p4, color = 'violet')\n plt.title(\"Région 2 Vieux : beta = \"+ str(self.virus.beta)+\"/ pi = \"+ str(self.virus.pi) + \"/ mu = \"+ str(self.virus.mu))\n\n plt.show()\n\nclass env_minimal :\n def __init__(self,name,NN,virus,influence_inter_regionale,S0,U0,P0,R0_U,R0_P) :\n self.history = [[S0],[U0],[P0],[R0_U],[R0_P]]\n self.name = name \n self.population = NN\n self.virus = virus\n virus.beta = virus.beta / self.population\n self.influence_inter_regionale = influence_inter_regionale\n self.S0 = S0\n self.P0 =P0\n self.U0 = U0\n self.R0_P = R0_P\n self.R0_U = R0_U \n\n# Les fonctions un peu chiantes des get qui sont utiles mais c'est surtout pour m'entrainer \n def get_population(self) :\n return self.population\n def get_susceptible(self) :\n return self.S0\n def get_name(self) :\n return self.name\n def get_positif(self) :\n return self.P0\n def get_recovered_positif(self) :\n return self.RP_0\n def get_recovered_undetected(self) :\n return self.RU_0\n def get_undetected(self) :\n return self.U0\n def get_history(self) :\n return self.history\n\n#Les fonctions de modifications des valeurs de la classe ,je vous jure après les fonctions sont mieux ! \n def evol_local_seule (self,proportion_de_la_pop_testee):\n # Condition pour pas faire des calculs inutiles ou incohérent\n if (self.U0 < (self.population)*(10**-10)) or(self.U0 > self.population):\n\n self.history[0].append(self.S0)\n self.history[1].append(self.U0)\n self.history[2].append(self.P0)\n self.history[3].append(self.R0_U)\n self.history[4].append(self.R0_P)\n else : \n # Calcul des variations \n nmbr_test = proportion_de_la_pop_testee * self.population\n dS = - self.virus.beta* self.S0 * (self.U0 + (1-self.virus.pi)*self.P0) # *(self.S0/self.population) \n dU = -dS - self.virus.mu*self.U0 - (nmbr_test)*(self.U0/(self.U0+self.R0_U+self.S0)) # (epsilon * NN )*(U0/(U0+R0+S0) le nombr de test * la proportion d'infectée dans la population qu'il reste à tester donc c'est le nombre de personne détectées positives à la fin des test\n dP = (nmbr_test )*(self.U0/(self.U0+self.R0_U+self.S0)) - self.virus.mu * self.P0\n dR_U = self.virus.mu * self.U0 # on ajoute les personnes guéries et qui vont se refaire testées\n dR_P = self.virus.mu * self.P0 # on ajoute les personnes guéries sans le savoir\n dS = (dP+dU+dR_U+dR_P) * (-1)\n\n #Actualistaion des facteurs \n self.S0 = self.S0 + dS \n self.U0 = self.U0 + dU \n self.P0 = self.P0 + dP \n self.R0_P = self.R0_P + dR_P\n self.R0_U = self.R0_U + dR_U\n self.history[0].append(self.S0)\n self.history[1].append(self.U0)\n self.history[2].append(self.P0)\n self.history[3].append(self.R0_U)\n self.history[4].append(self.R0_P)\n\n def crache_un_graphe (self,proportion_de_la_pop_testee,duree_de_experience) :\n # duree doit être un nombre de jour entier \n TEMPS = np.arange(0,duree_de_experience,1)\n for i in range (duree_de_experience-1) :\n self.evol_local_seule(proportion_de_la_pop_testee) \n data = np.array(self.get_history())\n data = data / self.population\n fig, ax = plt.subplots(nrows=1, ncols=1)\n S, = ax.plot(TEMPS, data[0], marker='+', color='blue', label='S0')\n U ,= ax.plot(TEMPS, data[1], marker='+', color='red', label='U0')\n P ,= ax.plot(TEMPS, data[2], marker='+', color='yellow', label='P0')\n RU, = ax.plot(TEMPS, data[3], marker='+', color='green', label='RO_U')\n RP ,= ax.plot(TEMPS, data[4], marker='+', color='violet', label='R0_P')\n R, = ax.plot(TEMPS, data[4] + data [3], marker='+', color='orange', label='RP+RU')\n ax.set(xlabel='Temps (en jours)', ylabel='Proportion de la population', title=\"Evolution discrète pour beta = \"+ str(self.virus.beta)+ \" / pi = \" + str(self.virus.pi) + \" / mu = \" + str(self.virus.mu))\n plt.legend([S,U,P,RU,RP,R], ['S', 'U', 'P','R_U','R_P','R total'], loc='best')\n plt.show()\n \n def crache_un_graphe_continu(self,proportion_test,time_limit) : \n # définition du système d'équation diff \n def evolution_continue(vecteur_condition_initiale,t) :# avec t = 1 dans notre cas\n population = self.population\n nmbr_test = proportion_test * population\n S,U,P,R_U,R_P = vecteur_condition_initiale\n dS = - self.virus.beta* S * (U + (1-self.virus.pi)*P) \n dU = -dS - self.virus.mu*U - (nmbr_test)*(U/(U+R_U+S)) \n dP = (nmbr_test )*((U/(U+R_U+S))) - self.virus.mu * P\n dR_U = self.virus.mu * U # on ajoute les personnes guéries et qui vont se refaire testées\n dR_P = self.virus.mu * P # on ajoute les personnes guéries sans le savoir\n dS = (dP+dU+dR_U+dR_P) * (-1)\n return [dS,dU,dP,dR_U,dR_P]\n \n T = np.arange(0,time_limit,1) # liste de 0 à 99 \n # On extrait les données de la résolution \n data = odeint (evolution_continue,[self.S0,self.U0,self.P0,self.R0_U,self.R0_P],T)\n data = data /self.population\n Susceptible = data[:,0] \n Undetected = data[:,1]\n Positive = data[:,2]\n R_Undectected = data[:,3]\n R_Positive = data[:,4]\n # Création du Graphique \n fig, ax = plt.subplots(nrows=1, ncols=1)\n S, = ax.plot(T, Susceptible, marker='+', color='blue', label='S0')\n U ,= ax.plot(T, Undetected , marker='+', color='red', label='U0')\n P ,= ax.plot(T, Positive, marker='+', color='yellow', label='P0')\n RU, = ax.plot(T, R_Undectected, marker='+', color='green', label='RO_U')\n RP ,= ax.plot(T, R_Positive, marker='+', color='violet', label='R0_P')\n R, = ax.plot(T, R_Undectected + R_Positive, marker='+', color='orange', label='RP+RU')\n ax.set(xlabel='Temps (en jours)', ylabel='Proportion de la population', title=\"Evolution continu pour beta = \"+ str(self.virus.beta)+ \" / pi = \" + str(self.virus.pi) + \" / mu = \" + str(self.virus.mu))\n plt.legend([S,U,P,RU,RP,R], ['S', 'U', 'P','R_U','R_P','R total'], loc='best')\n plt.show()\n\n def evol_local_seule_continu (self,proportion_test) :\n # définition du système d'équation diff \n def evolution_continue(vecteur_condition_initiale,t) :\n population = self.population\n nmbr_test = proportion_test * population\n S,U,P,R_U,R_P = vecteur_condition_initiale\n dS = - self.virus.beta* S * (U + (1-self.virus.pi)*P) \n dU = -dS - self.virus.mu*U - (nmbr_test)*(U/(U+R_U+S)) \n dP = (nmbr_test )*((U/(U+R_U+S))) - self.virus.mu * P\n dR_U = self.virus.mu * U # on ajoute les personnes guéries et qui vont se refaire testées\n dR_P = self.virus.mu * P # on ajoute les personnes guéries sans le savoir\n dS = (dP+dU+dR_U+dR_P) * (-1)\n return [dS,dU,dP,dR_U,dR_P]\n #Extraction et actualisation des données \n T = np.arange(0,100,1)\n data = odeint (evolution_continue,[self.S0,self.U0,self.P0,self.R0_U,self.R0_P],T)\n self.S0 = data[1,0] \n self.U0 = data[1,1]\n self.P0 = data[1,2]\n self.R0_U = data[1,3]\n self.R0_P = data[1,4]\n self.history[0].append(self.S0)\n self.history[1].append(self.U0)\n self.history[2].append(self.P0)\n self.history[3].append(self.R0_U)\n self.history[4].append(self.R0_P)\n\n \n def evol_local_seule_continu_controle (self,proportion_test) :\n # définition du système d'équation diff \n def evolution_continue(vecteur_condition_initiale,t) :\n population = self.population\n nmbr_test = proportion_test * population\n S,U,P,R_U,R_P = vecteur_condition_initiale\n dS = - self.virus.beta* S * (U + (1-self.virus.pi)*P) \n dU = -dS - self.virus.mu*U - (nmbr_test)*(U/(U+R_U+S)) \n dP = (nmbr_test )*((U/(U+R_U+S))) - self.virus.mu * P\n dR_U = self.virus.mu * U # on ajoute les personnes guéries et qui vont se refaire testées\n dR_P = self.virus.mu * P # on ajoute les personnes guéries sans le savoir\n dS = (dP+dU+dR_U+dR_P) * (-1)\n return [dS,dU,dP,dR_U,dR_P]\n #Extraction et actualisation des données \n T = np.arange(0,100,1)\n data = odeint (evolution_continue,[self.S0,self.U0,self.P0,self.R0_U,self.R0_P],T)\n self.S0 = data[1,0] \n self.U0 = data[1,1]\n self.P0 = data[1,2]\n self.R0_U = data[1,3]\n self.R0_P = data[1,4]\n self.history[0].append(self.S0)\n self.history[1].append(self.U0)\n self.history[2].append(self.P0)\n self.history[3].append(self.R0_U)\n self.history[4].append(self.R0_P)\n return []\n\n def determiner_controllabilite (self,proportion_de_la_pop_testee,time_limit) :\n # renvoie un vecteur des info cool et les parmètres \n #Ici on imule l'épidémie sur une durée donnée \n for i in range (time_limit) :\n self.evol_local_seule_continu(proportion_de_la_pop_testee) \n # Extraction des données de pic \n data = self.history\n data_U = np.array(self.history[1])\n data_P = np.array(self.history [2])\n data_I = data_P + data_U\n data_I = list(data_I)\n pic_value = max(data_I)\n pic_time = data_I.index(pic_value)\n I_last_time = data[1][-1] + data [2][-1] \n I_all_time = data[1][-1] + data [2][-1] + data [3][-1] + data [4][-1 ] # je somme au dernier temps les recovered les infecte à ce temps pour avoir le noimbre d'infécté jusque là par l'épidémie \n return [self.virus.R,self.virus.mu,self.virus.pi,proportion_de_la_pop_testee,pic_value,pic_time,I_all_time,I_last_time]\n\n","sub_path":"environnement_exp.py","file_name":"environnement_exp.py","file_ext":"py","file_size_in_byte":20903,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"595446149","text":"#!/usr/bin/env python\n# -*- coding: ISO-8859-1 -*-\n\n##################################\n# @program synda\n# @description climate models data transfer program\n# @copyright Copyright “(c)2009 Centre National de la Recherche Scientifique CNRS. \n# All Rights Reserved”\n# @license CeCILL (https://raw.githubusercontent.com/Prodiguer/synda/master/sdt/doc/LICENSE)\n##################################\n\n\"\"\"This module add data_node to dataset_id in case dataset_id is set without data_node in selection file.\n\nNotes\n - This module make possible to search files using reduced dataset_id (aka\n instance_id aka dataset_functional_id).\n - If 'data_node' is set in selection file use it, else use choose data_node\n depending on 'replica' flag (if set to false, use master data_node, if set\n to true use a random replica data_node).\n - This filter maybe be deprecated because of this:\n - it IS possible to use type=File and dataset_id WITHOUT DATA_NODE\n - example\n - http://esgf-index1.ceda.ac.uk/esg-search/search?query=cmip5.output1.MIROC.MIROC4h.rcp45.6hr.atmos.6hrLev.r1i1p1.v20110926\n - gives two datasets\n - http://esgf-index1.ceda.ac.uk/esg-search/search?type=File&query=cmip5.output1.MIROC.MIROC4h.rcp45.6hr.atmos.6hrLev.r1i1p1.v20110926\n - gives all files of the two datasets\n - the only thing to be careful of is replica\n (in the example above, both found datasets are the same, one is the master, one is a replica)\n\"\"\"\n\nimport sys\nimport argparse\nimport json\nimport random\nimport sdapp\nimport sdprint\nimport sdquicksearch\n\ndef run(facets_groups):\n for facets_group in facets_groups:\n if 'dataset_id' in facets_group:\n dataset_id_list=facets_group['dataset_id'] # dataset_id is a list of values\n\n new_values=[]\n\n for dataset_id in dataset_id_list: \n if is_data_node_present_in_dataset_id(dataset_id):\n new_values.append(dataset_id)\n else:\n instance_id=dataset_id # meaningfull as instance_id is dataset_id without data_node\n new_values.append(instance_id_to_dataset_id(instance_id,facets_group))\n\n facets_group['dataset_id']=new_values\n\n return facets_groups\n\ndef instance_id_to_dataset_id(instance_id,facets_group):\n if \"data_node\" in facets_group:\n return \"%s|%s\"%(instance_id,facets_group['data_node'])\n else:\n replica=facets_group['replica'] if \"replica\" in facets_group else None\n return retrieve_full_dataset_id(instance_id,replica)\n\ndef retrieve_full_dataset_id(instance_id,replica):\n \"\"\"Retrieve missing data_node from search-API.\"\"\"\n\n if replica is None:\n replica=['false'] # if replica flag not present, let's choose the master (arbitrary choice)\n\n # list to scalar\n replica=replica[0]\n\n # search-API call\n datanodes=get_data_nodes(instance_id,replica)\n\n if replica=='true':\n # retrieve a random replica data_node\n\n if len(datanodes)>0:\n return \"%s|%s\"%(instance_id, random.choice(datanodes))\n else:\n return instance_id # datanode not found, don't change anything\n\n elif replica=='false':\n # retrieve master data_node\n\n if len(datanodes)==1:\n return \"%s|%s\"%(instance_id,datanodes[0])\n else:\n return instance_id # datanode not found, don't change anything\n\ndef get_data_nodes(instance_id,replica_scalar):\n \"\"\"Return one or more data_nodes depending on the 'replica' flag.\"\"\"\n\n parameter=['limit=50','type=Dataset','instance_id=%s'%instance_id,'replica=%s'%replica_scalar]\n\n # debug\n #print parameter\n\n result=sdquicksearch.run(parameter=parameter,post_pipeline_mode=None,dry_run=False)\n if result.num_result>0:\n\n datanodes=[]\n for d in result.files:\n datanodes.append(d['data_node'])\n\n return datanodes\n else: \n return []\n\ndef is_data_node_present_in_dataset_id(dataset_id):\n \"\"\"Check whether data_node is present in the dataset_id.\"\"\"\n\n if '|' in dataset_id:\n return True\n\n \"\"\"\n if '%7C' in dataset_id:\n return True\n \"\"\"\n\n return False\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n parser.add_argument('-1','--print_only_one_item',action='store_true')\n parser.add_argument('-f','--format',choices=['raw','line','indent'],default='raw')\n args = parser.parse_args()\n\n facets_groups=json.load(sys.stdin)\n facets_groups=run(facets_groups)\n sdprint.print_format(facets_groups,args.format,args.print_only_one_item)\n","sub_path":"sdt/bin/sdcompletedatasetid.py","file_name":"sdcompletedatasetid.py","file_ext":"py","file_size_in_byte":4676,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"629031589","text":"import random\noptions = ['rock', 'paper', 'scissors']\nwinlose = {'scissors':'rock', 'rock':'paper', 'paper':'scissors'}\n\nwin = 0\nlose = 0\n\nwhile True:\n\tx = raw_input('Choose:\\n1. Rock\\n2. Paper\\n3. Scissors\\n')\n\n\ttry:\n\t\tx = int(x)\n\texcept:\n\t\tpass\n\t\n\tif x==1: x='rock'\n\telif x==2: x='paper'\n\telse: x='scissors'\n\n\tprint('You chose ' + x)\n\n\toptions.append(winlose[x])\n\n\ty = random.choice(options)\n\n\tprint('Opponent chose ' + y)\n\n\tif x == y:\n\t\tprint('Tie')\n\telif winlose[x] == y:\n\t\tprint('YOU LOSE')\n\t\tlose += 1\n\telse:\n\t\tprint('YOU WIN')\n\t\twin += 1\n\n\tprint('--------------\\n'+str(options)+'\\n----------------')\n\n\tprint(win, lose)","sub_path":"Python/rps.py","file_name":"rps.py","file_ext":"py","file_size_in_byte":625,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"14724570","text":"import json\nimport logging\nimport tempfile\nimport time\n\nimport boto3\nimport pandas as pd\nimport pytest # type: ignore\n\nimport awswrangler as wr\n\nfrom ._utils import extract_cloudformation_outputs\n\nlogging.getLogger(\"awswrangler\").setLevel(logging.DEBUG)\n\n\ninspections_documents = [\n {\n \"business_address\": \"315 California St\",\n \"business_city\": \"San Francisco\",\n \"business_id\": \"24936\",\n \"business_latitude\": \"37.793199\",\n \"business_location\": {\"lon\": -122.400152, \"lat\": 37.793199},\n \"business_longitude\": \"-122.400152\",\n \"business_name\": \"San Francisco Soup Company\",\n \"business_postal_code\": \"94104\",\n \"business_state\": \"CA\",\n \"inspection_date\": \"2016-06-09T00:00:00.000\",\n \"inspection_id\": \"24936_20160609\",\n \"inspection_score\": 77,\n \"inspection_type\": \"Routine - Unscheduled\",\n \"risk_category\": \"Low Risk\",\n \"violation_description\": \"Improper food labeling or menu misrepresentation\",\n \"violation_id\": \"24936_20160609_103141\",\n },\n {\n \"business_address\": \"10 Mason St\",\n \"business_city\": \"San Francisco\",\n \"business_id\": \"60354\",\n \"business_latitude\": \"37.783527\",\n \"business_location\": {\"lon\": -122.409061, \"lat\": 37.783527},\n \"business_longitude\": \"-122.409061\",\n \"business_name\": \"Soup Unlimited\",\n \"business_postal_code\": \"94102\",\n \"business_state\": \"CA\",\n \"inspection_date\": \"2016-11-23T00:00:00.000\",\n \"inspection_id\": \"60354_20161123\",\n \"inspection_type\": \"Routine\",\n \"inspection_score\": 95,\n },\n {\n \"business_address\": \"2872 24th St\",\n \"business_city\": \"San Francisco\",\n \"business_id\": \"1797\",\n \"business_latitude\": \"37.752807\",\n \"business_location\": {\"lon\": -122.409752, \"lat\": 37.752807},\n \"business_longitude\": \"-122.409752\",\n \"business_name\": \"TIO CHILOS GRILL\",\n \"business_postal_code\": \"94110\",\n \"business_state\": \"CA\",\n \"inspection_date\": \"2016-07-05T00:00:00.000\",\n \"inspection_id\": \"1797_20160705\",\n \"inspection_score\": 90,\n \"inspection_type\": \"Routine - Unscheduled\",\n \"risk_category\": \"Low Risk\",\n \"violation_description\": \"Unclean nonfood contact surfaces\",\n \"violation_id\": \"1797_20160705_103142\",\n },\n {\n \"business_address\": \"1661 Tennessee St Suite 3B\",\n \"business_city\": \"San Francisco Whard Restaurant\",\n \"business_id\": \"66198\",\n \"business_latitude\": \"37.75072\",\n \"business_location\": {\"lon\": -122.388478, \"lat\": 37.75072},\n \"business_longitude\": \"-122.388478\",\n \"business_name\": \"San Francisco Restaurant\",\n \"business_postal_code\": \"94107\",\n \"business_state\": \"CA\",\n \"inspection_date\": \"2016-05-27T00:00:00.000\",\n \"inspection_id\": \"66198_20160527\",\n \"inspection_type\": \"Routine\",\n \"inspection_score\": 56,\n },\n {\n \"business_address\": \"2162 24th Ave\",\n \"business_city\": \"San Francisco\",\n \"business_id\": \"5794\",\n \"business_latitude\": \"37.747228\",\n \"business_location\": {\"lon\": -122.481299, \"lat\": 37.747228},\n \"business_longitude\": \"-122.481299\",\n \"business_name\": \"Soup House\",\n \"business_phone_number\": \"+14155752700\",\n \"business_postal_code\": \"94116\",\n \"business_state\": \"CA\",\n \"inspection_date\": \"2016-09-07T00:00:00.000\",\n \"inspection_id\": \"5794_20160907\",\n \"inspection_score\": 96,\n \"inspection_type\": \"Routine - Unscheduled\",\n \"risk_category\": \"Low Risk\",\n \"violation_description\": \"Unapproved or unmaintained equipment or utensils\",\n \"violation_id\": \"5794_20160907_103144\",\n },\n {\n \"business_address\": \"2162 24th Ave\",\n \"business_city\": \"San Francisco\",\n \"business_id\": \"5794\",\n \"business_latitude\": \"37.747228\",\n \"business_location\": {\"lon\": -122.481299, \"lat\": 37.747228},\n \"business_longitude\": \"-122.481299\",\n \"business_name\": \"Soup-or-Salad\",\n \"business_phone_number\": \"+14155752700\",\n \"business_postal_code\": \"94116\",\n \"business_state\": \"CA\",\n \"inspection_date\": \"2016-09-07T00:00:00.000\",\n \"inspection_id\": \"5794_20160907\",\n \"inspection_score\": 96,\n \"inspection_type\": \"Routine - Unscheduled\",\n \"risk_category\": \"Low Risk\",\n \"violation_description\": \"Unapproved or unmaintained equipment or utensils\",\n \"violation_id\": \"5794_20160907_103144\",\n },\n]\n\n\n@pytest.fixture(scope=\"session\")\ndef cloudformation_outputs():\n return extract_cloudformation_outputs()\n\n\n@pytest.fixture(scope=\"session\")\ndef opensearch_password():\n return boto3.client(\"secretsmanager\").get_secret_value(SecretId=\"aws-data-wrangler/opensearch_password\")[\n \"SecretString\"\n ]\n\n\n@pytest.fixture(scope=\"session\")\ndef domain_endpoint_opensearch_1_0(cloudformation_outputs):\n return cloudformation_outputs[\"DomainEndpointwrangleros10\"]\n\n\n@pytest.fixture(scope=\"session\")\ndef domain_endpoint_elasticsearch_7_10_fgac(cloudformation_outputs):\n return cloudformation_outputs[\"DomainEndpointwrangleres710fgac\"]\n\n\ndef test_connection_opensearch_1_0(domain_endpoint_opensearch_1_0):\n client = wr.opensearch.connect(host=domain_endpoint_opensearch_1_0)\n print(client.info())\n assert len(client.info()) > 0\n\n\ndef test_connection_opensearch_1_0_https(domain_endpoint_opensearch_1_0):\n client = wr.opensearch.connect(host=f\"https://{domain_endpoint_opensearch_1_0}\")\n print(client.info())\n assert len(client.info()) > 0\n\n\ndef test_connection_elasticsearch_7_10_fgac(domain_endpoint_elasticsearch_7_10_fgac, opensearch_password):\n client = wr.opensearch.connect(\n host=domain_endpoint_elasticsearch_7_10_fgac, username=\"test\", password=opensearch_password\n )\n print(client.info())\n assert len(client.info()) > 0\n\n\n@pytest.fixture(scope=\"session\")\ndef opensearch_1_0_client(domain_endpoint_opensearch_1_0):\n client = wr.opensearch.connect(host=domain_endpoint_opensearch_1_0)\n return client\n\n\n@pytest.fixture(scope=\"session\")\ndef elasticsearch_7_10_fgac_client(domain_endpoint_elasticsearch_7_10_fgac, opensearch_password):\n client = wr.opensearch.connect(\n host=domain_endpoint_elasticsearch_7_10_fgac, username=\"test\", password=opensearch_password\n )\n return client\n\n\n# testing multiple versions\n@pytest.fixture(params=[\"opensearch_1_0_client\", \"elasticsearch_7_10_fgac_client\"])\ndef client(request):\n return request.getfixturevalue(request.param)\n\n\ndef test_create_index(client):\n index = \"test_create_index\"\n wr.opensearch.delete_index(client, index)\n time.sleep(0.5) # let the cluster clean up\n response = wr.opensearch.create_index(\n client=client,\n index=index,\n mappings={\"properties\": {\"name\": {\"type\": \"text\"}, \"age\": {\"type\": \"integer\"}}},\n settings={\"index\": {\"number_of_shards\": 1, \"number_of_replicas\": 1}},\n )\n assert response.get(\"acknowledged\", False) is True\n\n\ndef test_delete_index(client):\n index = \"test_delete_index\"\n wr.opensearch.create_index(client, index=index)\n response = wr.opensearch.delete_index(client, index=index)\n print(response)\n assert response.get(\"acknowledged\", False) is True\n\n\ndef test_index_df(client):\n response = wr.opensearch.index_df(\n client,\n df=pd.DataFrame([{\"_id\": \"1\", \"name\": \"John\"}, {\"_id\": \"2\", \"name\": \"George\"}, {\"_id\": \"3\", \"name\": \"Julia\"}]),\n index=\"test_index_df1\",\n )\n print(response)\n assert response.get(\"success\", 0) == 3\n\n\ndef test_index_documents(client):\n response = wr.opensearch.index_documents(\n client,\n documents=[{\"_id\": \"1\", \"name\": \"John\"}, {\"_id\": \"2\", \"name\": \"George\"}, {\"_id\": \"3\", \"name\": \"Julia\"}],\n index=\"test_index_documents1\",\n )\n print(response)\n assert response.get(\"success\", 0) == 3\n\n\ndef test_index_documents_id_keys(client):\n response = wr.opensearch.index_documents(\n client, documents=inspections_documents, index=\"test_index_documents_id_keys\", id_keys=[\"inspection_id\"]\n )\n print(response)\n\n\ndef test_index_documents_no_id_keys(client):\n response = wr.opensearch.index_documents(\n client, documents=inspections_documents, index=\"test_index_documents_no_id_keys\"\n )\n print(response)\n\n\ndef test_search(client):\n index = \"test_search\"\n wr.opensearch.index_documents(\n client, documents=inspections_documents, index=index, id_keys=[\"inspection_id\"], refresh=\"wait_for\"\n )\n df = wr.opensearch.search(\n client,\n index=index,\n search_body={\"query\": {\"match\": {\"business_name\": \"soup\"}}},\n _source=[\"inspection_id\", \"business_name\", \"business_location\"],\n )\n\n print(\"\")\n print(df.to_string())\n assert df.shape[0] == 3\n\n\ndef test_search_filter_path(client):\n index = \"test_search\"\n wr.opensearch.index_documents(\n client, documents=inspections_documents, index=index, id_keys=[\"inspection_id\"], refresh=\"wait_for\"\n )\n df = wr.opensearch.search(\n client,\n index=index,\n search_body={\"query\": {\"match\": {\"business_name\": \"soup\"}}},\n _source=[\"inspection_id\", \"business_name\", \"business_location\"],\n filter_path=[\"hits.hits._source\"],\n )\n\n print(\"\")\n print(df.to_string())\n assert df.shape[0] == 3\n\n\ndef test_search_scroll(client):\n index = \"test_search_scroll\"\n wr.opensearch.index_documents(\n client, documents=inspections_documents, index=index, id_keys=[\"inspection_id\"], refresh=\"wait_for\"\n )\n df = wr.opensearch.search(\n client, index=index, is_scroll=True, _source=[\"inspection_id\", \"business_name\", \"business_location\"]\n )\n\n print(\"\")\n print(df.to_string())\n assert df.shape[0] == 5\n\n\ndef test_search_sql(client):\n index = \"test_search_sql\"\n wr.opensearch.index_documents(\n client, documents=inspections_documents, index=index, id_keys=[\"inspection_id\"], refresh=\"wait_for\"\n )\n df = wr.opensearch.search_by_sql(client, sql_query=f\"select * from {index}\")\n\n print(\"\")\n print(df.to_string())\n assert df.shape[0] == 5\n\n\ndef test_index_json_local(client):\n file_path = f\"{tempfile.gettempdir()}/inspections.json\"\n with open(file_path, \"w\") as filehandle:\n for doc in inspections_documents:\n filehandle.write(\"%s\\n\" % json.dumps(doc))\n response = wr.opensearch.index_json(client, index=\"test_index_json_local\", path=file_path)\n print(response)\n assert response.get(\"success\", 0) == 6\n\n\ndef test_index_json_s3(client, path):\n file_path = f\"{tempfile.gettempdir()}/inspections.json\"\n with open(file_path, \"w\") as filehandle:\n for doc in inspections_documents:\n filehandle.write(\"%s\\n\" % json.dumps(doc))\n s3 = boto3.client(\"s3\")\n path = f\"{path}opensearch/inspections.json\"\n bucket, key = wr._utils.parse_path(path)\n s3.upload_file(file_path, bucket, key)\n response = wr.opensearch.index_json(client, index=\"test_index_json_s3\", path=path)\n print(response)\n assert response.get(\"success\", 0) == 6\n\n\ndef test_index_csv_local(client):\n file_path = f\"{tempfile.gettempdir()}/inspections.csv\"\n index = \"test_index_csv_local\"\n df = pd.DataFrame(inspections_documents)\n df.to_csv(file_path, index=False)\n response = wr.opensearch.index_csv(client, path=file_path, index=index)\n print(response)\n assert response.get(\"success\", 0) == 6\n\n\ndef test_index_csv_s3(client, path):\n file_path = f\"{tempfile.gettempdir()}/inspections.csv\"\n index = \"test_index_csv_s3\"\n df = pd.DataFrame(inspections_documents)\n df.to_csv(file_path, index=False)\n s3 = boto3.client(\"s3\")\n path = f\"{path}opensearch/inspections.csv\"\n bucket, key = wr._utils.parse_path(path)\n s3.upload_file(file_path, bucket, key)\n response = wr.opensearch.index_csv(client, path=path, index=index)\n print(response)\n assert response.get(\"success\", 0) == 6\n\n\n@pytest.mark.skip(reason=\"takes a long time (~5 mins) since testing against small clusters\")\ndef test_index_json_s3_large_file(client):\n path = \"s3://irs-form-990/index_2011.json\"\n response = wr.opensearch.index_json(\n client, index=\"test_index_json_s3_large_file\", path=path, json_path=\"Filings2011\", id_keys=[\"EIN\"], bulk_size=20\n )\n print(response)\n assert response.get(\"success\", 0) > 0\n","sub_path":"tests/test_opensearch.py","file_name":"test_opensearch.py","file_ext":"py","file_size_in_byte":12447,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"242869205","text":"import tkinter as tk\n\n# ----------- CONSTANTS ------------\n\nBG_COL = \"#EBF2F5\"\nFONT = \"Arial\"\n\nFG_LBL_COL = \"#0D1F2D\"\nFG_BTN_COL = \"#0D1F2D\"\nBG_BTN_COL = \"#9EA3B0\"\nHL_COL = \"#9EA3B0\"\n\nFT_LBL_TITLE = (FONT, 24, \"bold\")\nFT_LBL_DESCR = (FONT, 16, \"normal\")\nFT_LBL_NORM = (FONT, 11, \"normal\")\n\nFT_BTN_NORM = (FONT, 10, \"normal\")\n\nWD_TEXTBOXES = 35\n\n# ------------ METHODS -------------\n\n\ndef testing_list():\n for values in range(20):\n lbx_results.insert(tk.END, values)\n\n# --------------- UI ---------------\n\n\nwindow = tk.Tk()\nwindow.title(\"Der Stick\")\nwindow.config(bg=\"#EBF2F5\")\nwindow.iconbitmap(\"images/search-16.ico\")\n\nwindow.columnconfigure([0, 1, 2], minsize=10, weight=1)\nwindow.rowconfigure([0, 1, 2], minsize=10, weight=1)\n\nfr_main = tk.Frame(window, bg=BG_COL, highlightbackground=HL_COL, highlightthickness=1)\n\nfr_main.grid(column=1, row=1, sticky=\"news\")\n\nfr_main.columnconfigure([0, 1, 2, 3, 4, 5], minsize=10)\nfr_main.rowconfigure([0, 1, 2, 3, 4, 5, 6, 7], minsize=10)\n\n# ------------ Widgets ------------\n\nlbl_title = tk.Label(fr_main, text=\"Der Stick\", font=FT_LBL_TITLE, fg=FG_LBL_COL, bg=BG_COL)\nlbl_descr = tk.Label(fr_main, text=\"Unterrichtsvorbereitung\", font=FT_LBL_DESCR, fg=FG_LBL_COL, bg=BG_COL)\nlbl_search = tk.Label(fr_main, text=\"Suche\", font=FT_LBL_NORM, fg=FG_LBL_COL, bg=BG_COL)\nlbl_results = tk.Label(fr_main, text=\"Ergebnisse\", font=FT_LBL_NORM, fg=FG_LBL_COL, bg=BG_COL)\n\nbtn_tags = tk.Button(fr_main, text=\"Stichworte\\nbearbeiten\", width=15, font=FT_BTN_NORM, fg=FG_BTN_COL, bg=BG_BTN_COL)\nbtn_search = tk.Button(fr_main, text=\"Suche\", width=10, font=FT_BTN_NORM, fg=FG_BTN_COL, bg=BG_BTN_COL, command=testing_list)\nbtn_save = tk.Button(fr_main, text=\"Unterrichtseinheit\\nspeichern\", width=15, font=FT_BTN_NORM, fg=FG_BTN_COL, bg=BG_BTN_COL)\n\nent_search = tk.Entry(fr_main, width=WD_TEXTBOXES, highlightthickness=1, highlightbackground=HL_COL, relief=\"flat\")\nlbx_results = tk.Listbox(fr_main, width=WD_TEXTBOXES, height=8, highlightthickness=1, highlightbackground=HL_COL, relief=\"flat\")\nlbx_choice = tk.Listbox(fr_main, width=WD_TEXTBOXES, height=8, highlightthickness=1, highlightbackground=HL_COL, relief=\"flat\")\n\nscroll_results = tk.Scrollbar(fr_main)\nlbx_results.config(yscrollcommand=scroll_results.set)\nscroll_results.config(command=lbx_results.yview)\n\nscroll_choice = tk.Scrollbar(fr_main, highlightthickness=1, highlightbackground=HL_COL)\nlbx_choice.config(yscrollcommand=scroll_choice.set)\nscroll_choice.config(command=lbx_choice.yview)\n\nfr_choice = tk.Frame(fr_main, bg=BG_COL)\n\nbtn_add_files = tk.Button(fr_choice, text=\"Hinzufügen ->\", width=10, font=FT_BTN_NORM, fg=FG_BTN_COL, bg=BG_BTN_COL)\nbtn_remove_files = tk.Button(fr_choice, text=\"<- Entfernen\", width=10, font=FT_BTN_NORM, fg=FG_BTN_COL, bg=BG_BTN_COL)\n\nfr_do_not_empty = tk.Frame(fr_main, bg=BG_COL)\n\nlbl_do_not_empty = tk.Label(fr_do_not_empty, text=\"Ordner vorher nicht leeren\", font=FT_LBL_NORM, fg=FG_LBL_COL, bg=BG_COL)\nchk_do_not_empty = tk.Checkbutton(fr_do_not_empty, bg= BG_COL, highlightthickness=1, highlightbackground=HL_COL, relief=\"flat\")\n\n\n# ------------- Layout --------------\n\nlbl_title.grid(row=1, column=1, columnspan=2, sticky=\"w\")\nlbl_descr.grid(row=2, column=1, columnspan=3, sticky=\"w\")\nlbl_search.grid(row=3, column=1, sticky=\"nw\")\nlbl_results.grid(row=4, column=1, sticky=\"nw\")\n\nent_search.grid(row=3, column=2)\nlbx_results.grid(row=4, column=2, sticky=\"n\")\nlbx_choice.grid(row=4, column=4, sticky=\"n\")\n\nscroll_results.grid(row=4, column=2, sticky=\"nes\")\nscroll_choice.grid(row=4, column=4, sticky=\"nes\")\n\nbtn_tags.grid(row=1, column=4, sticky=\"e\")\nbtn_search.grid(row=3, column=3, pady=5)\nbtn_save.grid(row=6, column=4, sticky=\"e\")\n\nfr_choice.grid(row=4, column=3, sticky=\"n\", padx=5, pady=5)\nbtn_add_files.grid(row=0, column=0)\nbtn_remove_files.grid(row=1, column=0)\n\nfr_do_not_empty.grid(row=5, column=3, columnspan=2, sticky=\"e\")\nlbl_do_not_empty.grid(row=0, column=1, sticky=\"w\")\nchk_do_not_empty.grid(row=0, column=0, sticky=\"e\")\n\nwindow.mainloop()","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3996,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"177456903","text":"import math\n\ndef combination_sum(nums, target):\n sols = [0] * target\n sols[0] = 1\n for i in range(1,len(sols)):\n sols[i] = sols[i-1] + math.pow(2,i)\n\n return int(sols[target-2])\n\nprint(combination_sum([1,2,3,4], 5))\n\ndef min_path_sum(grid):\n\n for row in range(len(grid)-1, -1, -1):\n for col in range(len(row)-1, -1, -1):\n\n cost += min(grid[row-1][col], grid[row][col-1])\n\n\ndef min_cost_stairs(steps):\n\n cost = 0\n for i in range(2,len(steps)):\n cost[i] += min(steps[i-1], steps[i-2])\n\n return min(cost[len(steps)-1], cost[len(steps)-2])\n\n\ndef climb_stairs(n):\n\n if n == 1:\n return 1\n\n steps = [0] * (n+1)\n steps[0] = 1\n steps[1] = 1\n\n for k in range(2,len(steps)):\n steps[k] = steps[k-1] + steps[k-2]\n\n return steps[n]\n\ndef coins(n, denoms):\n built_up = [2147483647] * (n+1)\n built_up[0] = 0\n for denom in denoms:\n for j in range(len(built_up)):\n if denom <= j:\n diff = j - denom\n built_up[j] = min(1 + built_up[diff], built_up[j])\n\n return built_up[n]\n\n\ndef max_subarray(nums):\n \n cur_max = nums[0] \n global_max = nums[0]\n # at each index compute max \n # [3,5,-9,1] = 3, 5 or 8, -9 or -1, 1 or 0\n for k in range(1,len(nums)):\n if cur_max > global_max:\n global_max = cur_max\n cur_max = max(nums[k],cur_max+nums[k])\n\n return global_max \n\ndef max_profit(prices):\n\n min_price = float(\"inf\") \n max_price = 0\n index = 0\n for i in range(len(prices)):\n if prices[i] < min_price:\n min_price = prices[i]\n\n else:\n max_price = max(max_price, prices[i] - min_price)\n\n return max_price\n\ndef house_robber(nums):\n # keep track of cur_max and prev_max \n # loop over array at each i check if i + prev_max > cur_max. if so then update cur_max and prev_max\n\n cur_max = max(nums[0], nums[1])\n prev_max = min(nums[0], nums[1])\n total = 0\n\n for i in range(2, len(nums)):\n\n total = max(cur_max, prev_max + nums[i])\n prev_max = cur_max\n cur_max = total\n\n return cur_max\n\n \n","sub_path":"dp.py","file_name":"dp.py","file_ext":"py","file_size_in_byte":2141,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"647709765","text":"# search.py\n# ---------\n# Licensing Information: You are free to use or extend these projects for\n# educational purposes provided that (1) you do not distribute or publish\n# solutions, (2) you retain this notice, and (3) you provide clear\n# attribution to UC Berkeley, including a link to http://ai.berkeley.edu.\n# \n# Attribution Information: The Pacman AI projects were developed at UC Berkeley.\n# The core projects and autograders were primarily created by John DeNero\n# (denero@cs.berkeley.edu) and Dan Klein (klein@cs.berkeley.edu).\n# Student side autograding was added by Brad Miller, Nick Hay, and\n# Pieter Abbeel (pabbeel@cs.berkeley.edu).\n\n\n\"\"\"\nIn search.py, you will implement generic search algorithms which are called by\nPacman agents (in searchAgents.py).\n\"\"\"\n\nimport util\n\nclass SearchProblem:\n \"\"\"\n This class outlines the structure of a search problem, but doesn't implement\n any of the methods (in object-oriented terminology: an abstract class).\n\n You do not need to change anything in this class, ever.\n \"\"\"\n\n def getStartState(self):\n \"\"\"\n Returns the start state for the search problem.\n \"\"\"\n util.raiseNotDefined()\n\n def isGoalState(self, state):\n \"\"\"\n state: Search state\n\n Returns True if and only if the state is a valid goal state.\n \"\"\"\n util.raiseNotDefined()\n\n def getSuccessors(self, state):\n \"\"\"\n state: Search state\n\n For a given state, this should return a list of triples, (successor,\n action, stepCost), where 'successor' is a successor to the current\n state, 'action' is the action required to get there, and 'stepCost' is\n the incremental cost of expanding to that successor.\n \"\"\"\n util.raiseNotDefined()\n\n def getCostOfActions(self, actions):\n \"\"\"\n actions: A list of actions to take\n\n This method returns the total cost of a particular sequence of actions.\n The sequence must be composed of legal moves.\n \"\"\"\n util.raiseNotDefined()\n\n\ndef tinyMazeSearch(problem):\n \"\"\"\n Returns a sequence of moves that solves tinyMaze. For any other maze, the\n sequence of moves will be incorrect, so only use this for tinyMaze.\n \"\"\"\n from game import Directions\n s = Directions.SOUTH\n w = Directions.WEST\n return [s, s, w, s, w, w, s, w]\n\ndef depthFirstSearch(problem):\n \"\"\"\n Search the deepest nodes in the search tree first.\n\n Your search algorithm needs to return a list of actions that reaches the\n goal. Make sure to implement a graph search algorithm.\n\n To get started, you might want to try some of these simple commands to\n understand the search problem that is being passed in:\n\n print \"Start:\", problem.getStartState()\n print \"Is the start a goal?\", problem.isGoalState(problem.getStartState())\n print \"Start's successors:\", problem.getSuccessors(problem.getStartState()) \n \"\"\"\n from util import Stack\n\n # On initialization creates stack for the open list, \n # a set for seen states \n # and list which will contain the actions corresponding to the state it belongs to.\n dfsStack = Stack() \n geheugen = set()\n actions = []\n\n\n\n # We add the startstate to the memory and push the neighbouring states,\n # as a (state, action) tuple on the dfsStack, ready to be processed.\n geheugen.add(problem.getStartState())\n for node in problem.getSuccessors(problem.getStartState()):\n dfsStack.push((node, actions[:])) \n\n # While the dfsStack is not empty:\n # - we will pop a state from the stack\n # - add it to the memory \n # - add the corresponding direction the the action list of the state being processed\n # - check if the current state is a goal state, if so return the action list\n # - add the succesor nodes to the dfsStack if we havent seen them yet\n # if the stack is empty and we havent found the goals state returns None\n while not dfsStack.isEmpty():\n current_Node = dfsStack.pop()\n geheugen.add(current_Node[0][0]) \n current_Node[1].append(current_Node[0][1]) \n \n if problem.isGoalState(current_Node[0][0]): \n return current_Node[1] \n\n for x in problem.getSuccessors(current_Node[0][0]):\n if x[0] not in geheugen:\n dfsStack.push((x, current_Node[1][:]))\n\n return None\n\ndef breadthFirstSearch(problem):\n \"\"\"\n Search the shallowest nodes in the search tree first.\n \"\"\"\n from util import Queue\n\n # On initialization creates Queue for the open list,\n # a set for seen states\n # and list which will contain the actions corresponding to the state it belongs to.\n bfsQueue = Queue()\n geheugen = set()\n actions = []\n\n # We add the startstate to the memory and push the neighbouring states\n # as a (state, action) tuple on the bfsQueue and adding them to the memory.\n geheugen.add(problem.getStartState())\n for node in problem.getSuccessors(problem.getStartState()):\n geheugen.add(node[0])\n bfsQueue.push((node, actions[:]))\n\n # While the bfsQueue is not empty:\n # - we will pop a state from the Queue\n # - add the corresponding direction the the action list of the state being processed\n # - check if the current state is a goal state, if so return the action list\n # - add the succesor nodes to the bfsQueue if we havent seen them yet\n # - and add it to the memory \n # if the queue is empty and we havent found the goals state returns None\n while not bfsQueue.isEmpty():\n current_Node = bfsQueue.pop()\n current_Node[1].append(current_Node[0][1])\n\n if problem.isGoalState(current_Node[0][0]):\n return current_Node[1]\n\n for x in problem.getSuccessors(current_Node[0][0]):\n if x[0] not in geheugen:\n geheugen.add(x[0])\n bfsQueue.push((x, current_Node[1][:]))\n return None\n\n\ndef uniformCostSearch(problem):\n \"\"\"Search the node of least total cost first.\n \"\"\"\n from util import PriorityQueue\n\n # On initialization creates PriorityQueue for the open list, \n # a dictionary where we keep track of the seen states and the cost for reaching the states\n # and list which will contain the actions corresponding to the state it belongs to.\n ucsQueue = PriorityQueue()\n geheugen = dict()\n actions = []\n\n # Set the cost of the startstate to 0\n # for each succesor state we:\n # - push the state in the uscQueue based on the cost to reach it\n # - add the node to the memory with the corresponding cost\n geheugen[problem.getStartState()] = 0\n for node in problem.getSuccessors(problem.getStartState()):\n ucsQueue.push((node, actions[:]), node[2])\n geheugen[node[0]] = node[2]\n \n # While the uscQueue is not empty:\n # - we will pop a state from the Queue\n # - add the corresponding direction the the action list of the state being processed\n # - check if the current state is a goal state, if so return the action list\n # - for the succesor states:\n # - compute the cost to reach the state\n # - if we havent seen the state yet add it to the memory\n # - else if the succesor we are looking at is cheaper to reach via this path then update the memory\n # if the queue is empty and we havent found the goals state returns None\n while not ucsQueue.isEmpty():\n current_Node = ucsQueue.pop()\n current_Node[1].append(current_Node[0][1])\n\n if problem.isGoalState(current_Node[0][0]):\n return current_Node[1]\n\n for node in problem.getSuccessors(current_Node[0][0]):\n costSoFar = geheugen[current_Node[0][0]] + node[2]\n if node[0] not in (geheugen):\n ucsQueue.push((node, current_Node[1][:]), costSoFar)\n geheugen[node[0]] = costSoFar\n elif geheugen[node[0]] > costSoFar:\n ucsQueue.push((node, current_Node[1][:]), costSoFar)\n geheugen[node[0]] = costSoFar\n return None\n\ndef nullHeuristic(state, problem=None):\n \"\"\"\n A heuristic function estimates the cost from the current state to the nearest\n goal in the provided SearchProblem. This heuristic is trivial.\n \"\"\"\n return 0\n\ndef aStarSearch(problem, heuristic=nullHeuristic):\n \"\"\"\n Search the node that has the lowest combined cost and heuristic first.\n \"\"\"\n from util import PriorityQueue\n\n # On initialization creates PriorityQueue for the open list, \n # a dictionary where we keep track of the seen states and the cost for reaching the states\n # and list which will contain the actions corresponding to the state it belongs to.\n aStarQueue = PriorityQueue()\n geheugen = dict()\n actions = []\n\n # Set the cost of the startstate to 0\n # for each succesor state we:\n # - push the state in the aStarQueue based on the cost to reach it but this time with the added heuristic costs\n # - add the node to the memory with the corresponding cost\n geheugen[problem.getStartState()] = 0\n for node in problem.getSuccessors(problem.getStartState()):\n aStarQueue.push((node, actions[:]), node[2] + heuristic(node[0], problem) )\n geheugen[node[0]] = node[2]\n\n\n # While the aStarQueue is not empty:\n # - we will pop a state from the Queue\n # - add the corresponding direction the the action list of the state being processed\n # - check if the current state is a goal state, if so return the action list\n # - for the succesor states:\n # - compute the cost to reach the state\n # - if we havent seen the state yet add it to the memory and push it in the queue based on cost(=pathcosts + heuristic)\n # - else if the succesor we are looking at is cheaper to reach via this path then update the memory and push it on the queue\n # if the queue is empty and we havent found the goals state returns None\n while not aStarQueue.isEmpty():\n current_Node = aStarQueue.pop()\n current_Node[1].append(current_Node[0][1])\n\n if problem.isGoalState(current_Node[0][0]):\n return current_Node[1]\n\n for node in problem.getSuccessors(current_Node[0][0]):\n costSoFar = geheugen[current_Node[0][0]] + node[2]\n if node[0] not in (geheugen):\n aStarQueue.push((node, current_Node[1][:]), costSoFar + heuristic(node[0], problem))\n geheugen[node[0]] = costSoFar \n elif geheugen[node[0]] > costSoFar:\n aStarQueue.push((node, current_Node[1][:]), costSoFar + heuristic(node[0], problem))\n geheugen[node[0]] = costSoFar \n return None\n\n\n\n# Abbreviations\nbfs = breadthFirstSearch\ndfs = depthFirstSearch\nastar = aStarSearch\nucs = uniformCostSearch\n","sub_path":"search/search.py","file_name":"search.py","file_ext":"py","file_size_in_byte":10904,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"558798918","text":"import re\nimport time\nimport socket\n\ns = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\ns.connect((\"localhost\", 16834))\n\nlogpath = \"C:/Program Files (x86)/Steam/steamapps/common/Path of Exile/logs/Client.txt\"\nmaps = [ \"Courthouse\" ]\n\nf = open(logpath, encoding=\"utf8\")\n\nlocation = \"\"\nline = f.readline()\nwhile line:\n m = re.search(\"You have entered ([^.]*)\", line)\n if m:\n location = m.group(1)\n line = f.readline()\n\nwhile True:\n time.sleep(0.05)\n line = f.readline()\n while line:\n m = re.search(\"You have entered ([^.]*)\", line)\n if m:\n newlocation = m.group(1)\n if location in maps and re.search(\"Hideout\", newlocation):\n s.send(b'pause\\r\\n')\n if re.search(\"Hideout\", location) and newlocation in maps:\n# s.send(b'split\\r\\n')\n s.send(b'reset\\r\\n')\n s.send(b'starttimer\\r\\n')\n location = newlocation\n print(location)\n line = f.readline()\n\n","sub_path":"poesplit.py","file_name":"poesplit.py","file_ext":"py","file_size_in_byte":1004,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"184955554","text":"__author__ = 'lucile'\n# -*- coding: = utf-8 -*-\n\n#from app.Recepteur.nda import nda\nfrom app.Recepteur.nsa import nsa\nimport re\nfrom flask import Flask, jsonify\nfrom flask import request\nfrom flask.ext.log import Logging\nfrom flask.ext.sqlalchemy import SQLAlchemy\nfrom flask.ext.security import Security, SQLAlchemyUserDatastore, \\\n UserMixin, RoleMixin, login_required , utils, roles_accepted,confirmable,registerable\nfrom flask_mail import Mail\nfrom flask.ext.security.utils import send_mail, md5, url_for_security, get_token_status,\\\n config_value\nfrom flask.ext.security.signals import user_confirmed, confirm_instructions_sent\nfrom werkzeug.datastructures import ImmutableMultiDict as IM\n\napp = Flask(__name__)\napp.config['FLASK_LOG_LEVEL'] = 'INFO'\nflask_log = Logging(app)\napp.logger.debug('Testing a debug message')\napp.config['DEBUG'] = True\napp.config['SECRET_KEY'] = 'zijdle,t7ie1'\napp.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///rsdb.db'\napp.config['SECURITY_URL_PREFIX'] = \"/bck\"\napp.config['MAIL_SERVER'] = 'smtp.obs-nancay.fr'\n#app.config['CONFIRMABLE'] = True\n#app.config['REGISTERABLE'] = True\napp.config['SECURITY_EMAIL_SENDER'] = 'admin@ambari-rsdb.obs-nancay.fr'\napp.config['SECURITY_RECOVERABLE'] = True\napp.config['SECURITY_REGISTERABLE'] = True\napp.config['SECURITY_CONFIRMABLE'] = True\napp.config['SECURITY_PASSWORD_HASH'] ='sha256_crypt'\napp.config['SECURITY_PASSWORD_SALT'] = 'encore'\napp.config['SECURITY_UNAUTHORIZED_VIEW'] = 'bck/login'\n#app.config['SECURITY_CONFIRM_URL' ] = '/../confirm'\n#app.config['SECURITY_LOGIN_URL'] = '/bck/login'\n#app.config['SECURITY_LOGOUT_URL'] ='/bck/logout'\n#app.config['SECURITY_REGISTER_URL'] ='/bck/register'\n#app.config['SECURITY_RESET_URL'] ='/bck/reset'\n#app.config['SECURITY_CHANGE_URL'] ='/bck/change'\n\n#app.config['SECURITY_POST_LOGIN_VIEW'] ='/bck/'\n#app.config['SECURITY_POST_LOGOUT_VIEW'] ='/bck/'\n#app.config['MAIL_PORT'] = 465\n#app.config['MAIL_USE_SSL'] = True\n#app.config['MAIL_USERNAME'] = 'username'\n#app.config['MAIL_PASSWORD'] = 'password'\nmail = Mail(app)\ndb = SQLAlchemy(app)\n\nrecepteurs = {\n #'Nda':nda,\n #'Nrh':nrh,\n 'Nsa':nsa\n}\n# Define models\nroles_users = db.Table('roles_users',\n db.Column('user_id', db.Integer(), db.ForeignKey('user.id')),\n db.Column('role_id', db.Integer(), db.ForeignKey('role.id')))\n\nclass Role(db.Model, RoleMixin):\n id = db.Column(db.Integer(), primary_key=True)\n name = db.Column(db.String(80), unique=True)\n description = db.Column(db.String(255))\n\nclass User(db.Model, UserMixin):\n id = db.Column(db.Integer, primary_key=True)\n email = db.Column(db.String(255), unique=True)\n password = db.Column(db.String(255))\n active = db.Column(db.Boolean())\n confirmed_at = db.Column(db.DateTime())\n #token = db.Column(db.String(255))\n roles = db.relationship('Role', secondary=roles_users,\n backref=db.backref('users', lazy='dynamic'))\n# Setup Flask-Security\nuser_datastore = SQLAlchemyUserDatastore(db, User, Role)\nsecurity = Security(app, user_datastore,send_confirmation_form=True)\n\n# Create a user to test with\n\"\"\"\n@app.before_first_request\ndef role():\n user_datastore.create_role(name=\"view\", description=\"\")\n user_datastore.create_role(name=\"admin\", description=\"\")\n user_datastore.create_role(name=\"research\", description=\"\")\n db.session.commit()\n\"\"\"\n\n@app.route('/confirm/',methods=['POST'])\ndef confirm(token):\n expired, invalid, user= utils.get_token_status(token, \"confirm\", max_age=\"CONFIRM_EMAIL\", return_data=False)\n if expired or invalid:\n response = {\n \"status\": False,\n \"message\": \"Not Confirmed\",\n \"route\":\"register\"\n }\n else:\n if user:\n\n user_datastore.activate_user(user)\n confirmable.confirm_user(user)\n db.session.commit()\n response = {\n \"status\": True,\n \"message\": \"Confirmed\",\n \"route\":\"login\",\n \"confirmed_at\":user.confirmed_at\n\n }\n else:\n response = {\n \"status\": False,\n \"message\": \"Not Comfirmed\",\n \"route\":\"comfirm\"\n\n }\n\n return jsonify(response )\n\n\n@app.route('/register',methods=['POST'])\ndef register():\n\n json = request.get_json()\n mail = json[\"mail\"]\n password = json[\"password\"]\n #mail = request.args.get(\"mail\")\n #password = request.args.get(\"password\")\n\n if user_datastore.get_user(mail):\n response = {\n \"status\": True,\n \"message\": \"Allready registered\",\n \"route\":\"login\",\n \"user\":mail\n }\n else:\n #p = re.compile(ur'^((?=\\S*?[A-Z])(?=\\S*?[a-z])(?=\\S*?[0-9]).{6,})\\S$')\n\n\n valid_p = re.search('^((?=\\S*?[A-Z])(?=\\S*?[a-z])(?=\\S*?[0-9]).{6,})\\S$', password)\n if valid_p:\n #p = re.compile(ur'^[\\w\\d](\\.?[\\w\\d_-])*@[\\w\\d]+\\.([\\w]{1,6}\\.)?[\\w]{2,6}$')\n\n\n valid_m = re.findall('^[\\w\\d_-](\\.?[\\w\\d_-])*@[\\w\\d_-]+\\.([\\w]{1,6}\\.)?[\\w]{2,6}$', mail)\n if valid_m:\n p = utils.encrypt_password(password)\n u = user_datastore.create_user(email=mail, password=p,roles=[\"view\"],confirmed_at=None)\n\n db.session.commit()\n token = confirmable.generate_confirmation_token(u)\n confirmation_link = \"http://ambari-rsdb.obs-nancay.fr/#!/confirm/\" + token\n\n send_mail(config_value('EMAIL_SUBJECT_CONFIRM'), u.email,\n 'confirmation_instructions', user=u,\n confirmation_link=confirmation_link)\n\n #confirm_instructions_sent.send(app._get_current_object(), user=user)\n\n #token = confirmable.send_confirmation_instructions(u)\n #utils.send_mail(\"test\", mail, 'send_confirmation')\n\n response = {\n \"status\": True,\n \"message\": \"registered\",\n \"route\":\"confirm\",\n #\"mail\":mail,\n \"roles\":[\"view\"]\n }\n else:\n response = {\n \"status\": False,\n \"message\": \"invalid mail\",\n \"route\":\"register\"\n #\"user\":mail\n }\n else:\n response = {\n \"status\": False,\n \"message\": \"invalid password\",\n \"route\":\"register\"\n #\"user\":mail\n }\n\n return jsonify(response )\n\n@app.route('/login',methods=['GET'])\ndef login():\n\n n = request.args.get(\"next\")\n #mail = json[\"mail\"]\n #json = request.get_json()\n #n = json[\"next\"]\n response = {\n \"status\": False,\n \"message\": \"Not authenticated\",\n \"route\":\"login\",\n \"next\":n\n }\n return jsonify(response )\n #return render_template('index.html')\n@app.route('/logout',methods=['POST'])\ndef logout():\n\n utils.logout_user()\n response = {\n \"status\": True,\n \"message\": \"Not authenticated\",\n \"route\":\"home\",\n \"mail\":\"\",\n \"roles\":[],\n \"active\":False\n }\n #resp.set_cookie('username', expires=0)\n #session.pop('username', None)\n return jsonify(response )\n #return render_template('index.html')\n@app.route('/log',methods=['POST'])\ndef log():\n json = request.get_json()\n app.logger.debug(json)\n #mail = request.args.get(\"mail\")\n mail = json[\"mail\"]\n app.logger.debug(mail)\n #password = request.args.get(\"password\")\n password = json[\"password\"]\n #url = json[\"url\"]\n\n u = user_datastore.get_user(mail)\n if u:\n\n v = utils.verify_password(password, u.password)\n if v:\n if u.confirmed_at:\n utils.login_user(u, remember=True)\n i=[]\n for v in u.roles:\n i.append(v.name)\n\n response = {\n \"status\": True,\n \"message\": \"Authenticated\",\n #\"url\":url,\n \"route\":\"home\",\n \"mail\":mail,\n \"roles\":i,\n \"confirmed_at\":u.confirmed_at,\n \"active\":u.active\n }\n else:\n response = {\n \"status\": False,\n \"message\": \"Not logged\",\n \"route\":\"confirm\",\n \"mail\":\"\",\n \"roles\":[],\n \"active\":False,\n #\"url\":url\n }\n else:\n response = {\n \"status\": False,\n \"message\": \"Not logged\",\n \"route\":\"login\",\n \"mail\":\"\",\n \"roles\":[],\n \"active\":False,\n #\"url\":url\n }\n\n\n\n else:\n response = {\n \"status\": False,\n \"message\": \"Not a user\",\n \"route\":\"register\",\n \"mail\":\"\",\n \"roles\":[],\n \"active\":False,\n #\"url\":url\n }\n return jsonify(response )\n\n\n\n\n\n\n@app.route('/instrument/getImage//',methods=['POST'])\ndef getImage(name,option):\n #if not current_user.is_authenticated:\n #return current_app.login_manager.unauthorized()\n app.logger.debug('Recieved')\n r = recepteurs[name]()\n try:\n filtre = request.get_json()\n\n\n app.logger.debug(filtre)\n except:\n app.logger.debug('prob filtre')\n app.logger.debug(request.form)\n #filtre = request.args.get(\"filtre\")\n\n resultat = r.getRequest(filtre,\"getImage\",option)\n #return filtre\n return jsonify(resultat)\n\n@app.route('/instrument/getIntegre//',methods=['POST'])\n#@login_required\n#@roles_accepted('view', 'admin')\ndef getIntegre(name,option):\n #if not current_user.is_authenticated:\n #return current_app.login_manager.unauthorized()\n\n r = recepteurs[name]()\n filtre = request.get_json()\n #filtre = request.args.get(\"filtre\")\n resultat = r.getRequest(filtre,\"getIntegre\",option)\n\n return jsonify(resultat)\n\n\n\nif __name__ == \"__main__\":\n app.debug = True\n app.run()\n","sub_path":"app/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":10342,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"652442166","text":"from django.http import Http404, HttpResponseRedirect\nfrom django.urls import reverse\nfrom django.views import View\n\nfrom sso.wizard import IdentityWizard\n\n\nclass AccountCreateIdentityView(View):\n def dispatch(self, request, provider, *args, **kwargs): # pylint:disable=arguments-differ\n if not IdentityWizard.manager.knows(provider):\n raise Http404\n wizard = IdentityWizard.get_for_request(request)\n\n if wizard is None or not wizard.is_valid():\n wizard = IdentityWizard(\n provider_key=provider,\n request=request,\n )\n\n if request.method != 'POST' and not wizard.is_valid():\n return HttpResponseRedirect(reverse('users:login'))\n\n wizard.initialize()\n\n return wizard.current_step()\n","sub_path":"polyaxon/api/oauth/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":812,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"158469002","text":"from googleapiclient.discovery import build\n\napi_key = \"API KEY\"\n\nyoutube = build('youtube', 'v3', developerKey=api_key)\n\nrequest = youtube.channels().list(part='statistics', forUsername='MrBeast6000') \n\nresponse = request.execute()\n\nprint(response)\n\n#So far only prints Mr. Beasts info...\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":290,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"237115075","text":"import datetime\n\nfrom projet_b11.import_databases.Pfam import Pfam\nfrom projet_b11.import_databases.ThreeDid import ThreeDid\nfrom projet_b11.import_databases.DomainInteractionUpdater import DomainInteractionUpdater\n\nlast_update_filename = 'last_update_date.txt'\n\n\ndef update_db():\n\n inphinity = DomainInteractionUpdater()\n updated = False\n interaction_inserted = 0\n\n # 3did\n did = ThreeDid()\n if did.has_new_version():\n did.get_interactions()\n interaction_inserted += inphinity.update_inphinity_database(did.domain_interactions, '3did')\n updated = True\n\n # Pfam\n pfam = Pfam()\n if pfam.has_new_version():\n pfam.get_interactions()\n interaction_inserted += inphinity.update_inphinity_database(pfam.domain_interactions, 'iPfam')\n updated = True\n\n if updated:\n with open(last_update_filename, 'w') as f:\n f.writelines(datetime.datetime.today().__str__())\n\n\ndef get_last_update_date():\n try:\n with open(last_update_filename, 'r') as f:\n return f.readline()\n except FileNotFoundError:\n return ''\n","sub_path":"projet_b11/import_databases/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1111,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"580652848","text":"'''0208\n定一个 m x n 的非负整数矩阵来表示一片大陆上各个单元格的高度。“太平洋”处于大陆的左边界和上边界,而“大西洋”处于大陆的右边界和下边界。\n\n规定水流只能按照上、下、左、右四个方向流动,且只能从高到低或者在同等高度上流动。\n\n请找出那些水流既可以流动到“太平洋”,又能流动到“大西洋”的陆地单元的坐标。\n\n \n\n提示:\n\n输出坐标的顺序不重要\nm 和 n 都小于150\n \n\n示例:\n\n \n\n给定下面的 5x5 矩阵:\n\n 太平洋 ~ ~ ~ ~ ~ \n ~ 1 2 2 3 (5) *\n ~ 3 2 3 (4) (4) *\n ~ 2 4 (5) 3 1 *\n ~ (6) (7) 1 4 5 *\n ~ (5) 1 1 2 4 *\n * * * * * 大西洋\n\n返回:\n\n[[0, 4], [1, 3], [1, 4], [2, 2], [3, 0], [3, 1], [4, 0]] (上图中带括号的单元).\n\n'''\nclass Solution:\n def pacificAtlantic(self, matrix: List[List[int]]) -> List[List[int]]:\n #思考,要有起点copy:重要p_visited.a_visited,dfs\n '''使用两个二维数组,分别记录每个位置的点能不能到达太平洋和大西洋。然后对4条边界进行遍历,看这些以这些边为起点能不能所有的地方。注意了,因为是从边界向中间去寻找,所以,这个时候是新的点要比当前的点海拔高才行。\n最坏情况下的时间复杂度是O((M+N)*MN),空间复杂度是O(MN)。'''\n if not matrix : return []\n m, n = len(matrix), len(matrix[0]) #row,col\n p_visited = [[False] * n for _ in range(m)]#能到太平洋\n a_visited = [[False] * n for _ in range(m)]#能到大西洋\n for i in range(m):\n self.dfs(p_visited, matrix, m, n, i, 0)#左边都能到太平洋\n self.dfs(a_visited, matrix, m, n, i, n -1) #右边都能到大西洋\n for j in range(n):\n self.dfs(p_visited, matrix, m, n, 0, j)#上面\n self.dfs(a_visited, matrix, m, n, m - 1, j)\n res = []\n for i in range(m):\n for j in range(n):\n if p_visited[i][j] and a_visited[i][j]:#都能到达\n res.append([i, j])\n return res\n #重要\n def dfs(self, visited, matrix, m, n, i, j):#全部能下去的\n visited[i][j] = True\n directions = [(-1, 0), (1, 0), (0, 1), (0, -1)]\n for dire in directions:\n x, y = i + dire[0], j + dire[1]\n if x < 0 or x >= m or y < 0 or y >= n or visited[x][y] or matrix[x][y] < matrix[i][j]:\n continue #没参观过,所有周围比这小的\n self.dfs(visited, matrix, m, n, x, y)\n","sub_path":"leetcode_solution/leetcode类别/8BFS广度优先搜索/中等/.ipynb_checkpoints/417. 太平洋大西洋水流问题-checkpoint.py","file_name":"417. 太平洋大西洋水流问题-checkpoint.py","file_ext":"py","file_size_in_byte":2650,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"285820382","text":"#Name: Julia Yu\r\n#Date: October 25, 2019\r\n#Email: julia.yu83@myhunter.cuny.edu\r\n#Inspired by 2018 Nifty Program by Phil Ventura:\r\n# This program uses his turtle setup but processing\r\n# of data is with Pandas.\r\n\r\nimport turtle\r\nimport pandas as pd\r\n\r\ndef setup(windowTitle):\r\n \"\"\"Creates the Turtle and the Screen with the map background\r\n and coordinate system set to match latitude and longitude.\r\n :return: a tuple containing the Turtle and the Screen\r\n DO NOT CHANGE THE CODE IN THIS FUNCTION!\r\n \"\"\"\r\n screen = turtle.Screen()\r\n screen.title(windowTitle)\r\n\r\n # this assures that the size of the screen will match the map image:\r\n screen.setup(800, 404)\r\n #Set coordinates for latitude and longitude:\r\n screen.setworldcoordinates(-180, -90, 180, 90)\r\n\r\n # ... which is the same size as our image\r\n # now set the background to our space image\r\n screen.bgpic(\"mapNASA.gif\")\r\n\r\n t = turtle.Turtle()\r\n t.pensize(1)\r\n t.color('red')\r\n t.penup()\r\n\r\n return t, screen\r\n\r\ndef animate(t,lat,lon,wind):\r\n if wind > 157:\r\n t.goto(lon, lat)\r\n t.color(\"red\")\r\n t.pensize(5)\r\n # t.stamp()\r\n elif wind >= 130 and wind <= 156:\r\n t.goto(lon, lat)\r\n t.color(\"orange\")\r\n t.pensize(4)\r\n # t.stamp()\r\n elif wind >= 111 and wind <= 129:\r\n t.goto(lon, lat)\r\n t.color(\"yellow\")\r\n t.pensize(3)\r\n # t.stamp()\r\n elif wind >= 96 and wind <= 110:\r\n t.goto(lon, lat)\r\n t.color(\"green\")\r\n t.pensize(2)\r\n # t.stamp()\r\n elif wind >= 74 and wind <= 95:\r\n t.goto(lon, lat)\r\n t.color(\"blue\")\r\n t.pensize(1)\r\n # t.stamp()\r\n else:\r\n t.goto(lon, lat)\r\n t.color(\"white\")\r\n t.pensize(1)\r\n # t.stamp()\r\n\r\n return(t)\r\n\r\ndef main():\r\n \"\"\"Animates the path of hurricane from file:\r\n \"\"\"\r\n hFile = input('Enter file name: ')\r\n t, wn = setup(hFile)\r\n\r\n df = pd.read_csv(hFile)\r\n for index, row in df.iterrows():\r\n lat = int(row[\"Lat\"])\r\n lon = int(row[\"Lon\"])\r\n wind = row[\"Wind\"]\r\n print(lat,lon,wind)\r\n animate(t,lat,lon,wind)\r\n\r\nif __name__ == \"__main__\":\r\n main()\r\n","sub_path":"Comp Sci 127 hw/hurricane_jy.py","file_name":"hurricane_jy.py","file_ext":"py","file_size_in_byte":2243,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"142788974","text":"# Copyright The IETF Trust 2022, All Rights Reserved\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n__author__ = 'Richard Zilincik'\n__copyright__ = 'Copyright The IETF Trust 2022, All Rights Reserved'\n__license__ = 'Apache License, Version 2.0'\n__email__ = 'richard.zilincik@pantheon.tech'\n\nimport json\nimport os\nimport unittest\n\nfrom parseAndPopulate import integrity as itg\nfrom utility.create_config import create_config\nfrom utility.yangParser import parse\n\n\nclass TestIntegrityClass(unittest.TestCase):\n @classmethod\n def setUpClass(cls):\n config = create_config()\n cls.module_dir = os.path.join(os.environ['BACKEND'], 'tests/resources/integrity')\n cls.yang_models = config.get('Directory-Section', 'yang-models-dir')\n\n def module_path(self, name: str) -> str:\n return os.path.join(self.module_dir, f'{name}.yang')\n\n def test_check_revision(self):\n good = parse(self.module_path('good'))\n assert good\n self.assertTrue(itg.check_revision(good))\n\n missing_revision = parse(self.module_path('missing-revision'))\n assert missing_revision\n self.assertFalse(itg.check_revision(missing_revision))\n\n invalid_revision = parse(self.module_path('invalid-revision'))\n assert invalid_revision\n self.assertFalse(itg.check_revision(invalid_revision))\n\n def test_check_namespace(self):\n good = parse(self.module_path('good'))\n assert good\n self.assertTrue(itg.check_namespace(good))\n\n missing_namespace = parse(self.module_path('missing-namespace'))\n assert missing_namespace\n self.assertFalse(itg.check_namespace(missing_namespace))\n\n invalid_namespace = parse(self.module_path('invalid-namespace'))\n assert invalid_namespace\n self.assertFalse(itg.check_namespace(invalid_namespace))\n\n def test_check_depencendies(self):\n good = parse(self.module_path('good'))\n assert good\n\n all_imports, missing_imports = itg.check_dependencies('import', good, self.module_dir)\n self.assertSetEqual(all_imports, {'invalid-revision'})\n self.assertFalse(missing_imports)\n\n all_includes, missing_includes = itg.check_dependencies('include', good, self.module_dir)\n self.assertSetEqual(all_includes, {'invalid-namespace', 'l1-dependency'})\n self.assertFalse(missing_includes)\n\n missing_import = parse(self.module_path('missing-import'))\n assert missing_import\n\n all_imports, missing_imports = itg.check_dependencies('import', missing_import, self.module_dir)\n self.assertSetEqual(all_imports, {'nonexistent'})\n self.assertSetEqual(missing_imports, {'nonexistent'})\n\n missing_include = parse(self.module_path('missing-include'))\n assert missing_include\n\n all_includes, missing_includes = itg.check_dependencies('include', missing_include, self.module_dir)\n self.assertSetEqual(all_includes, {'nonexistent'})\n self.assertSetEqual(missing_includes, {'nonexistent'})\n\n def test_sdo(self):\n script_conf = itg.DEFAULT_SCRIPT_CONFIG.copy()\n setattr(script_conf.args, 'dir', self.module_dir)\n setattr(script_conf.args, 'sdo', True)\n itg.main(script_conf)\n\n expected = {\n 'missing-revisions': [self.module_path('invalid-revision'), self.module_path('missing-revision')],\n 'missing-namespaces': [self.module_path('invalid-namespace'), self.module_path('missing-namespace')],\n 'missing-modules': {self.module_path('missing-import'): ['nonexistent']},\n 'missing-submodules': {self.module_path('missing-include'): ['nonexistent']},\n 'unused-modules': {},\n }\n\n with open('integrity.json') as f:\n result = json.load(f)\n\n self.assertDictEqual(result, expected)\n\n def test_capabilities_to_modules(self):\n result = set(itg.capabilities_to_modules(os.path.join(self.module_dir, 'capabilities.xml')))\n expected = {\n 'good',\n 'deviation',\n 'missing-revision',\n 'invalid-revision',\n 'missing-namespace',\n 'invalid-namespace',\n 'missing-import',\n 'missing-include',\n 'nonexistent',\n }\n self.assertSetEqual(result, expected)\n\n def test_vendor(self):\n script_conf = itg.DEFAULT_SCRIPT_CONFIG.copy()\n setattr(script_conf.args, 'dir', self.module_dir)\n itg.main(script_conf)\n\n expected = {\n 'missing-revisions': [self.module_path('invalid-revision'), self.module_path('missing-revision')],\n 'missing-namespaces': [self.module_path('invalid-namespace'), self.module_path('missing-namespace')],\n 'missing-modules': {\n os.path.join(self.module_dir, 'capabilities.xml'): ['nonexistent'],\n self.module_path('missing-import'): ['nonexistent'],\n },\n 'missing-submodules': {self.module_path('missing-include'): ['nonexistent']},\n 'unused-modules': {self.module_dir: ['unused']},\n }\n\n with open('integrity.json') as f:\n result = json.load(f)\n\n self.assertDictEqual(result, expected)\n\n\nif __name__ == '__main__':\n unittest.main()\n","sub_path":"tests/test_integrity.py","file_name":"test_integrity.py","file_ext":"py","file_size_in_byte":5779,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"360124788","text":"from PyQt4 import QtGui, QtSql\n\nclass MySQLSetup:\n\tdef __init__(self, db):\n\t\tself.db = db\n\n\tdef setup_currencies(self):\n\t\tif not self.db.open():\n\t\t\treturn False\n\n\t\tquery = QtSql.QSqlQuery()\n\n\t\tif not query.exec_('SHOW TABLES;'):\n\t\t\tQtGui.QMessageBox.critical(self, 'Database error', query.lastError().text())\n\t\t\treturn False\n\n\t\twhile query.next():\n\t\t\tif str(query.value(0).toString()) == 'currencies':\n\t\t\t\treturn True\n\n\t\tif not query.exec_('CREATE TABLE currencies (name CHAR(3), exchange_rate_to_EUR FLOAT);'):\n\t\t\tQtGui.QMessageBox.critical(self, 'Database error', query.lastError().text())\n\t\t\treturn False\n\n\t\tif not query.exec_('INSERT INTO currencies (name, exchange_rate_to_EUR) VALUES (EUR, 1.0);'):\n\t\t\tQtGui.QMessageBox.critical(self, 'Database error', query.lastError().text())\n\t\t\treturn False\n\n\t\treturn True","sub_path":"mysqlsetup.py","file_name":"mysqlsetup.py","file_ext":"py","file_size_in_byte":815,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"218477295","text":"#!/usr/bin/env python\n\"\"\"\nScript that performs the first step of creating the desired chemical representation.\nThis was done in steps... this is primarily for my my own records.\nJanuary 2014\nLiz Wylie\n\"\"\"\n\nfrom rdkit import Chem\nimport elasticsearch\nimport json\nfrom os import listdir\nfrom collections import defaultdict\n\n\ndef label(mol):\n for atom in mol.GetAtoms():\n hybridization = int(atom.GetHybridization())\n atom = atom.GetAtomicNum()\n aromaticity = int(atom.GetIsAromatic())\n hashed = int(str(hybridization) + str(atom) + str(aromaticity))\n atom.SetIsotope(hashed)\n\n\nif __name__ == '__main__':\n es = elasticsearch.Elasticsearch()\n types = set()\n # must be directory containing .json files of list of chemical entries from database as\n # dictionaries. Each entry only needs the _id and SMILES from database.\n files = listdir('tmp')\n for i in files:\n with open('tmp/'+i) as f:\n data = json.load(f)\n for chem in data:\n smi = chem['smiles']\n _id = chem['_id']\n m = Chem.MolFromSmiles(smi.encode('ascii'))\n if not m:\n continue\n DM = Chem.GetDistanceMatrix(m)\n label(m)\n i = {}\n for a in m.GetAtoms():\n i[a.GetIdx()] = a.GetIsotope()\n types.add(a.GetIsotope())\n dat = {\n \"status\": \"undone\",\n \"body\": defaultdict(list)\n }\n for j in i.keys():\n t = defaultdict(list)\n for l, k in enumerate(DM[j]):\n t[i[l]].append(k)\n dat[\"body\"][i[j]].append(t)\n b = json.dumps(dat)\n es.index(index=\"dm_data\", doc_type=\"chemical\", id=int(_id), body=b)","sub_path":"chemical_representations/original_code/1_json_to_undone.py","file_name":"1_json_to_undone.py","file_ext":"py","file_size_in_byte":1797,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"253320799","text":"from tkinter import *\r\nimport threading\r\nimport time\r\ni=0\r\nj=0\r\nstop_threads=False\r\nmaster = Tk()\r\nmaster.geometry(\"200x100\")\r\nmaster.resizable(False, False)\r\n\r\ndef vypocet1(id,stop):\r\n \r\n while True:\r\n global i\r\n i+=1\r\n time.sleep(0.1)\r\n w1.config(text=i)\r\n master.update_idletasks()\r\n if stop():\r\n print(\"Stopping {}\\n\".format(id))\r\n break\r\n print(\"{}.Stoped \\n\".format(id))\r\n \r\n \r\n \r\ndef vypocet2(id,stop):\r\n \r\n while True:\r\n global j\r\n j+=1\r\n time.sleep(0.1)\r\n w2.config(text=j)\r\n master.update_idletasks()\r\n if stop():\r\n print(\"Stopping {}\\n\".format(id))\r\n break\r\n print(\"{}.Stoped \\n\".format(id)) \r\n \r\n \r\ndef on_click_w1():\r\n global i\r\n i=0\r\n \r\n \r\ndef on_click_w2():\r\n global j\r\n j=0\r\n \r\ndef on_click_w3():\r\n global stop_threads\r\n stop_threads=True\r\n print(\"Destroying GUI\")\r\n master.forget\r\n print(\"Destroyed GUI\")\r\n \r\n \r\n \r\nw1 = Button(master,command=on_click_w1)\r\nw1.pack()\r\nw2 = Button(master,command=on_click_w2)\r\nw2.pack()\r\n\r\nw4 = Button(master,text=\"Quit\",command=on_click_w3)\r\nw4.pack()\r\n\r\n\r\nt1=threading.Thread(target=vypocet1,args=(1,lambda: stop_threads))\r\nt1.start()\r\n\r\nt2=threading.Thread(target=vypocet2,args=(2,lambda: stop_threads))\r\nt2.start()\r\n\r\nmaster.mainloop()\r\n","sub_path":"aa.py","file_name":"aa.py","file_ext":"py","file_size_in_byte":1417,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"424016413","text":"import logging\nimport json\nimport gurobipy as gp\n\nfrom gurobipy import GRB, quicksum\nfrom flask import request\nfrom flask_restplus import Resource\nfrom iot_green_calculator.api.maintenance_serializer import maintenance_input_fields, maintenance, tot_results\nfrom iot_green_calculator.api.restplus import api\nfrom iot_green_calculator.maintenance import Maintenance, MaintenanceError\nfrom iot_green_calculator.green_proposal import greenComputation\nlog = logging.getLogger(__name__)\n\nns = api.namespace('maintenance', description='Maintenance operations')\n\n\ndef default_data():\n return {'avg_distance': 3.505, 'avg_fuel_cons': 6.0, 'conv_factor': 8.9, 'n_devices': 10.0, 'lifetime': 30.0, 'e_intervention': 6.738012,\n 'battery': {'technology': 'Li-Ion', 'lifetime': 9.0, 'efficiency': 90.0, 'density': 140.516129, 'capacity': 6600.0, 'weight': 0.155, 'e_manufacturing': 25.544, 'disposal': 0.08556000000000001},\n 'solar_panel': {'technology': 'mono-Si', 'surface': 0.03744, 'irradiance': 1.127419355, 's_hours': 9.0, 'lifetime': 20.0, 'efficiency': 17.0, 'kwp': 0, 'efficiency_w': 80.0, 'weight': 0.54, 'e_manufacturing': 205.02518400000002, 'disposal': 0.08650800000000002, 'e_produced': 0.025832875358534402},\n 'device': {'duty_cycle': 5, 'voltage': 3.3, 'output_regulator': 90, 'active_mode': 84.189030303, 'sleep_mode': 0.062,\n 'boards': [{'weight': 0.02, 'active_mode': 0.0, 'sleep_mode': 0.0, 'disposal': 0.0076},\n {'weight': 0.02, 'active_mode': 0.0, 'sleep_mode': 0.0, 'disposal': 0.0076}],\n 'sensors': [{'area': 13.8, 'lifetime': 1000000.0, 'active_mode': 11.0, 'sleep_mode': 0.0, 'e_manufacturing': 76.5072},\n {'area': 0.278, 'lifetime': 1000000.0, 'active_mode': 0.006, 'sleep_mode': 0.0, 'e_manufacturing': 1.541232},\n {'area': 0.283, 'lifetime': 1000000.0, 'active_mode': 0.5, 'sleep_mode': 0.0, 'e_manufacturing': 1.5689519999999997},\n {'area': 0.976, 'lifetime': 1000000.0, 'active_mode': 0.38, 'sleep_mode': 0.0, 'e_manufacturing': 5.410944},\n {'area': 0.665, 'lifetime': 10.0, 'active_mode': 3.0, 'sleep_mode': 0.0, 'e_manufacturing': 3.68676},\n {'area': 0.636, 'lifetime': 10.0, 'active_mode': 6.0, 'sleep_mode': 0.0, 'e_manufacturing': 3.525984},\n {'area': 0.636, 'lifetime': 10.0, 'active_mode': 34.0, 'sleep_mode': 0.0, 'e_manufacturing': 3.525984}],\n 'processor': {'area': 2.641, 'lifetime': 1000000.0, 'active_mode': 9.0, 'sleep_mode': 0.062, 'e_manufacturing': 14.641703999999999},\n 'radio': {'area': 6.731, 'lifetime': 1000000.0, 'active_mode': 0.303030303, 'sleep_mode': 0.0, 'e_manufacturing': 37.316663999999996},\n 'e_manufacturing': 144.14399999999998, 'disposal': 3.42, 'daily_e_required': 0.031477248}, \n 'sensors': {}, 'tot_e_intervention': 0, 'n_interventions': 0, 'tot_main_energy': 0, 'tot_main_disposal': 0} \n\n\n@ns.route('/')\nclass CategoryCollection(Resource):\n @api.marshal_with(maintenance)\n def get(self):\n return {}\n\n @api.expect(maintenance_input_fields)\n @api.response(200, 'maintenance analyzed', maintenance)\n def post(self):\n data = request.json\n try:\n maintenance = Maintenance(data) # inside the maintenance constructor there is an additional validator\n maintenance_aux = Maintenance(data)\n lifetime_units, e_manuf, disposal = prepare_maintenance(maintenance.__dict__) # default_data())\n results = maintenance_sched(lifetime_units, e_manuf, disposal, maintenance.__dict__)\n # results = maintenance_sched(lifetime_units, e_manuf, disposal, default_data())\n except MaintenanceError as e:\n print(e.message)\n return e.message, 400\n\n \n green_solar_panel, green_battery, green_maintenance = greenComputation(maintenance_aux) \n lifetime_units, e_manuf, disposal = prepare_maintenance(green_maintenance.__dict__)\n green_results = maintenance_sched(lifetime_units, e_manuf, disposal, green_maintenance.__dict__)\n r = prepare_result(maintenance)\n g = prepare_result(green_maintenance)\n for i, v in r.items():\n print('real ', i, '-', v, ' green ', g[i])\n return {'maintenance': maintenance.__dict__, 'real': prepare_result(maintenance), 'green': prepare_result(green_maintenance)}\n\ndef prepare_result(maintenance):\n solar_panel = {'energy': maintenance.solar_panel['e_manufacturing'], 'disposal': maintenance.solar_panel['disposal']}\n battery = {'energy': maintenance.battery['e_manufacturing'], 'disposal': maintenance.battery['disposal']}\n device = {'energy': maintenance.device['e_manufacturing'], 'disposal': maintenance.device['disposal']}\n maintenance = {'energy': maintenance.tot_main_energy, 'disposal': maintenance.tot_main_disposal}\n\n tot = {'tot_energy': (solar_panel['energy'] + battery['energy'] + device['energy'] + maintenance['energy']),\n 'tot_disposal': (solar_panel['disposal'] + battery['disposal'] + device['disposal'] + maintenance['disposal'])}\n \n result = {'solar_panel': solar_panel, \n 'battery': battery,\n 'device': device,\n 'maintenance':maintenance,\n 'tot': tot\n }\n\n return result\n\n''' Update the Device removing those sensors that have lifetime smaller than\n application.\n Returns data_for_maint()'''\n\ndef prepare_maintenance(dataset):\n maintenance = dataset\n sensors = maintenance['device']['sensors']\n spec_sensors = {}\n index = 0\n for value in sensors:\n if value['lifetime'] < maintenance['lifetime']:\n spec_sensors[index] = value\n index += 1\n return data_for_maint(spec_sensors, dataset)\n\n\n''' Takes as input a dictionary of sensors and returns a triple with:\n - a list of the components lifetime\n - a list of the components energy_manufacturing\n - a list of the components disposal\n components are sensors, battery, solar_panel and device'''\n\n\ndef data_for_maint(spec_sensors, dataset):\n disposal = {}\n lifetime = {}\n e_manuf = {}\n sens_e_manuf = 0\n for key, item in spec_sensors.items():\n lifetime[len(lifetime)] = item['lifetime']\n e_manuf[len(lifetime)] = item['e_manufacturing']\n sens_e_manuf += item['e_manufacturing']\n for key, value in dataset.items():\n if key == 'battery' or key == 'solar_panel':\n lifetime[key] = value['lifetime']\n e_manuf[key] = value['e_manufacturing']\n disposal[key] = value['disposal']\n if key == 'device':\n e_manuf[key] = (value['e_manufacturing'] - sens_e_manuf)\n disposal[key] = value['disposal']\n lifetime_dev = 0\n for code, item in dataset['device'].items():\n try:\n lifetime_dev = min(lifetime_dev, item['lifetime'])\n except (TypeError, KeyError):\n lifetime_dev = 10000\n lifetime[key] = lifetime_dev\n return (lifetime, e_manuf, disposal)\n\ndef update_maintenance(n_maintenance, e_manuf, disposal):\n key_maintenance = {'n_maintenance': n_maintenance,\n 'e_maintenance': n_maintenance * e_manuf,\n 'd_maintenance': n_maintenance * disposal}\n return key_maintenance\n\ndef maintenance_sched(life_units, e_manuf, disposal, maintenance_session):\n\n life = int(maintenance_session['lifetime'])\n n_devices = maintenance_session['n_devices']\n e_int = maintenance_session['e_intervention']\n print(\"lifeunits, \", life_units)\n life_units = [int(item) for key, item in life_units.items()] # list\n up_disposal = {}\n maintenance_session['sensors'] = {}\n maintenance_results = {}\n maintenance_results['sensors'] = {}\n # The algorithm needs to have equal length for e_manuf and disposal so, it adds as many 0(s) to disposal as the length difference\n if len(disposal) < (len(e_manuf) - len(up_disposal)):\n for key, value in e_manuf.items():\n if key not in disposal:\n up_disposal[key] = 0\n for key in disposal:\n up_disposal[key] = disposal[key]\n\n print('life units: ', life_units)\n print('e manu: ', e_manuf)\n print('e_int: ', e_int)\n print('n devices: ', n_devices)\n print('life: ', life)\n print('disposal: ', up_disposal)\n\n try:\n m = gp.Model()\n m.ModelSense = GRB.MINIMIZE\n n_unit = len(life_units)\n\n # Add variables\n x = [[m.addVar(vtype=GRB.BINARY, name=\"x[%s, %s]\" % (i, j))\n for i in range(n_unit)] for j in range(life)]\n\n e_manuf_t = [[elem for key, elem in e_manuf.items()]\n for i in range(life)]\n\n disposal_t = [[elem for key, elem in up_disposal.items()]\n for i in range(life)]\n\n w = [m.addVar(vtype=GRB.BINARY, name=\"w[%s]\" % i) for i in range(life)]\n\n e_int_t = [e_int for i in range(life)]\n\n # Objective function\n energy_objective = quicksum(quicksum(n_devices * (e_manuf_t[i][j] *\n x[i][j]) for j in range(n_unit)) +\n (e_int_t[i] * w[i]) for i in range(life))\n disposal_objective = quicksum(quicksum(n_devices * (disposal_t[i][j] *\n x[i][j]) for j in range(n_unit))\n for i in range(life))\n\n m.setObjectiveN(energy_objective, 0)\n m.setObjectiveN(disposal_objective, 1)\n\n # Constraints\n for i in range(n_unit):\n for k in range(life - life_units[i]):\n m.addConstr(quicksum(x[j][i] for j in range(k, k +\n life_units[i])) >= 1, 'c0')\n\n for j in range(life):\n m.addConstr(x[j][i] <= w[j], 'c1')\n\n m.optimize()\n\n for i in range(n_unit):\n r_times = [j for j in range(life) if x[j][i].x >= 0.99]\n key = list(e_manuf.keys())[i]\n if key == 'battery' or key == 'solar_panel' or key == 'device':\n maintenance_results[key] = update_maintenance(len(r_times),\n e_manuf[key],\n up_disposal[key])\n else:\n maintenance_results['sensors'][key] =\\\n update_maintenance(len(r_times), e_manuf[key],\n up_disposal[key])\n\n maintenance_list = [j + 1 for j in range(life) if w[j].x >= 0.99]\n maintenance_session['n_interventions'] = len(maintenance_list)\n maintenance_session['tot_e_intervention'] = len(maintenance_list) *\\\n e_int\n maintenance_results['n_interventions'] = len(maintenance_list)\n maintenance_results['tot_e_intervention'] = len(maintenance_list) *\\\n e_int\n\n m.setParam(GRB.Param.ObjNumber, 0)\n m.setParam(GRB.Param.SolutionNumber, 0)\n maintenance_session['tot_main_energy'] = (m.ObjNVal / n_devices)\n maintenance_results['tot_main_energy'] = (m.ObjNVal / n_devices)\n\n m.setParam(GRB.Param.ObjNumber, 1)\n m.setParam(GRB.Param.SolutionNumber, 0)\n maintenance_session['tot_main_disposal'] = (m.ObjNVal / n_devices)\n maintenance_results['tot_main_disposal'] = (m.ObjNVal / n_devices)\n\n return maintenance_results\n except gp.GurobiError as e:\n print('Error code ' + str(e.errno) + ': ' + str(e))\n\n except AttributeError:\n print('Encountered an attribute error')\n\n\n","sub_path":"iot_green_calculator/api/endpoints/maintenance.py","file_name":"maintenance.py","file_ext":"py","file_size_in_byte":11482,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"446477521","text":"import sys\nimport csv\n\n\ndef load(file_name):\n tmp = []\n with open(file_name, \"r\", encoding=\"utf-8-sig\") as f:\n for row in csv.DictReader(f):\n tmp.append(row)\n return tmp\n\n\nif __name__ == \"__main__\":\n A = load(sys.argv[1])\n B = load(sys.argv[2])\n\n i, d = 1, 0\n for a, b in zip(A, B):\n for key in a:\n if key == \"sorting_result\" and a[key] != b[key]:\n print(\n f\"diff in line: {i}, key: '{key}'\",\n a[key],\n b[key],\n b[\"id\"],\n b[\"citizen_id\"],\n b[\"round_id\"],\n )\n d += 1\n i += 1\n\n print(f\"total diff {d}\")\n print(\"done\")\n","sub_path":"csv_compare.py","file_name":"csv_compare.py","file_ext":"py","file_size_in_byte":745,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"555952354","text":"from . import metricsobj as mo\nfrom . import datecheck as dc\nfrom . import mapping as mp\nfrom . import colcomp as cc\nfrom . import troubleshooting as ts\nimport datetime as dtt\nimport time\nimport os\n\n\ndef run_main_program():\n print('\\n=== pyBev v0.4.1 ===\\n')\n mcap = None\n metrics = None\n report = None\n mcap, metrics, report = main_menu(mcap,metrics,report)\n \n return mcap, metrics, report\n\n\n#================================== Menus =====================================\n\n\ndef main_menu(mcap,metrics,report):\n main_menu_string = \"\"\"\n Main Menu\n 1. Data extraction\n 2. Set active date\n 3. Map ad count\n 4. Column comparison\n 5. Fix problems & report\n 6. Run full program\n 7. Options\nType 'q' to quit.\\n\\n\"\"\"\n while True:\n print(main_menu_string)\n \n user_input = input()\n \n try: user_input = int(user_input)\n except ValueError: user_input = user_input\n if type(user_input) is int:\n mcap,metrics,report = program_stages(user_input,mcap,metrics,report)\n else:\n if menu_commands(user_input) is 0:\n break\n else:\n continue\n \n return mcap,metrics,report\n \ndef options_menu():\n print(\"\"\"\\n Still under construction in this build!\\n\"\"\")\n \n print(\n\"\"\"\nActive panel:\nActive week:\nActive metrics file:\n\"\"\")\n return\n \ndef program_stages(stage,mcap,metrics,report):\n if stage == 1:\n mcap, metrics = stage_one()\n elif stage == 2:\n # try: \n stage_two(mcap, metrics)\n # except NameError: print('Please run stage 1 first.')\n elif stage == 3:\n # try:\n stage_three(mcap, metrics)\n # except NameError: print('Please run stage 2 first.')\n elif stage == 4:\n # try:\n stage_four(mcap, metrics)\n # except NameError: print('Please run stage 3 first.')\n elif stage == 5:\n stage_five(mcap,metrics)\n # try: report = stage_five(mcap,metrics)\n # except NameError: print('Please run stage 4 first.')\n elif stage == 6:\n # run_full_program()\n print('Still under construction.')\n elif stage == 7:\n options_menu()\n else:\n print('Unrecognized stage, please try again')\n \n return mcap,metrics,report\n \ndef menu_commands(user_input):\n if user_input == 'q':\n print('\\nTerminating program.')\n return 0\n \n # Easter Eggs\n elif user_input == 'friendly q':\n print('\\nGoodbye!')\n return 0\n elif user_input == 'dramatic q':\n print('\\nNow, these data will be lost... like tears in the rain...')\n return 0\n elif user_input == 'whiterabbit.obj':\n print('PERMISSION DENIED... ',end=''),\n time.sleep(1)\n print('and...\\n')\n time.sleep(1)\n for i in range(0,10):\n print(\n\"\"\" AH AH AH!\n YOU DIDN'T SAY THE MAGIC WORD!\\n\"\"\")\n time.sleep(.4)\n else:\n print('Unknown command, please try again.\\n')\n \n return\n \n \n#============================== Stage Subfunctions ============================\n\n\ndef panel_selection():\n while True:\n panel = input(\n\"\"\"Please select a panel:\n 1. BevAl\n 2. Non-Flash\n 3. WIP\n(Defaults to BevAl)\\n\"\"\") or '1'\n if (panel == '1' or panel=='BevAl'):\n panel = 'Beval'\n break\n elif (panel == '2' or panel=='Non-Flash'):\n panel = 'NonFlash'\n break\n else:\n print('Unrecognized panel, please choose another.\\n')\n continue\n mcap_path = [0,0]\n mcap_path[0] = 'Z:\\\\'+dtt.datetime.today().strftime('%m%d%Y_QC_Completed_')+panel+'.txt'\n mcap_path[1] = panel\n return mcap_path\n \ndef monday_archive(metrics):\n if dtt.datetime.today().weekday() == 0:\n print('Archiving monday data...\\n',end=''),\n filename = '\\\\' + dtt.datetime.today().strftime('%Y-%m-%d') + '.xlsx'\n while True:\n archive_path = (input(\n'''Monday archive folder location: \n(default: C:\\\\Users\\\\sroy\\\\Documents\\\\BevAl Metrics\\\\Archive)\\n''')\n or r'C:\\Users\\sroy\\Documents\\BevAl Metrics\\Archive') + filename\n if os.path.isfile(archive_path) == 0: \n metrics.book.save(archive_path)\n print('Done')\n else:\n print('Existing file found in archive, canceling operation.')\n return\n\n\n#============================== Program Stages ================================\n\n\ndef stage_one():\n \"\"\"Extracts data from the auto MCAP pull and the Metrics excel document,\n pivots where appropriate, and prepares it for further analysis.\"\"\"\n \n mcap_path = panel_selection()\n \n metrics_path = (input('Panel Metrics file location (press enter for default): ')\n or r'C:\\Users\\sroy\\Documents\\BevAl Metrics\\2017 BevAl Metrics.xlsx')\n # or r'C:\\Users\\sroy\\Documents\\BevAl Metrics\\test.xlsx')\n \n print('Location set to '+metrics_path,end='\\n')\n \n mcap = mo.MCAPData(mcap_path)\n \n metrics = mo.PanelMetrics(metrics_path)\n \n monday_archive(metrics)\n \n print('Extracting MCAP data... ',end=''),\n mcap.extract()\n print('Done')\n \n print('Extracting Metrics data... ',end=''),\n metrics.extract()\n print('Done')\n \n metrics.connect_book()\n \n mcap.fpivot()\n \n print('Data ready for processing.\\n')\n \n return mcap, metrics\n \ndef stage_two(mcap,metrics):\n \"\"\"User sets an active date for the program.\"\"\"\n \n metrics.week_date = dc.choose_week()\n \n return\n \ndef stage_three(mcap,metrics):\n \"\"\"Maps the VID count from the MCAP data to the metrics object and writes\n it to the excel document.\"\"\"\n \n print('Beginning MCAP-Metrics mapping...')\n \n if metrics.volatile_column not in metrics.df.columns:\n prompt = 'Name of the metrics column to write pivoted data to: '\n metrics.volatile_column = input(prompt)\n\n metrics.book.save()\n\n metrics.extract()\n \n mp.map_index(\n metrics.week_date,\n mcap.pivot_df,\n metrics.df,\n metrics.volatile_column,\n agg_func='len'\n )\n \n if (input('Filter-safe write y/n? ') or 0) == 'n': \n metrics.update_file_column(metrics.volatile_column,filter_safe=0)\n else:\n metrics.update_file_column(metrics.volatile_column,filter_safe=1)\n \n print('Mapping complete.')\n \n return\n \ndef stage_four(mcap,metrics):\n \"\"\"Compares the active week column to the volatile column, updating Pending,\n Non-Drop, and Naive items. When done comparing, it updates all appropriate\n end dates by one week (will make this step more sophisticated later). Finally,\n the Metrics document is sorted by expected date and active week item status.\"\"\"\n \n print('Beginning column comparison...')\n \n metrics.book.save()\n\n metrics.extract()\n \n cc.preliminary_update(mcap,metrics) # updates 0s to ints or pending and flags ?s\n \n metrics.xl_col_sort() # sorts all data by date ascending\n \n print('Column comparison complete.')\n \n return\n \ndef stage_five(mcap,metrics):\n \n report = mo.Report(mcap,metrics)\n \n ts.duplicate_items(mcap,metrics,report)\n \n ts.resolved_items(mcap,metrics,report)\n \n ts.unresolved_items(mcap,metrics,report)\n \n return report","sub_path":"PyBev/Old Versions/pyBev_0-4/pyBev_0-4-1/pybev/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":7474,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"63171697","text":"import cv2\nimport numpy as np\n\n#read in the image\n#to change to video just change the image with a video file\n#REMBER if you convert this to a video you have to create a while loop to keep the program running while the video plays\nimg = cv2.imread('crimson_image.jpg', 1)\n\n#converts the image into color schem HSV\nhsv = cv2.cvtColor(img,cv2.COLOR_BGR2HSV)\n\n#sets the upper and lower color values that the program is looking for\n#REMEBER THESE VALUES ARE BGR(BLUE,GREEN,RED) NOT RGB(RED, GREEN, BLUE)\nlower_crimson = np.array([60,60,0])\nupper_crimson = np.array([255,255,255])\n\n#creates a mask that uses the two thresholds created above to find specific color\nmask = cv2.inRange(hsv, lower_crimson,upper_crimson)\n\n#this uses the mask and displays only the parts of the image(img) that the mask(mask) picks up\nres = cv2.bitwise_and(img,img, mask = mask)\n\n#shows the original image\ncv2.imshow('img', img)\n\n#shows the mask\ncv2.imshow('mask',mask)\n\n#shows the result\ncv2.imshow('res',res)\n\ncv2.waitKey()\ncv2.destroyAllWindows()","sub_path":"working_code/Color_Filter_test.py","file_name":"Color_Filter_test.py","file_ext":"py","file_size_in_byte":1022,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"68437017","text":"\"\"\"DativeTop Constants\"\"\"\n\nimport os\n\n\nHERE = os.path.dirname(os.path.dirname(__file__))\nCONFIG_PATH = os.path.join(HERE, 'dativetop', 'config.json')\n\nAPP_FORMAL_NAME = 'DativeTop'\nAPP_NAME = 'dativetop'\nAPP_ID = 'org.dativebase.dativetop'\nICONS_FILE_NAME = 'OLDIcon.icns'\nICONS_FILE_PATH = os.path.join('icons', ICONS_FILE_NAME)\nIP = '127.0.0.1'\n\nDATIVE_PORT = 5678\nDATIVE_ROOT = os.path.join(HERE, 'src', 'dative', 'dist')\nDATIVE_URL = 'http://{}:{}/'.format(IP, DATIVE_PORT)\nDATIVE_WEB_SITE_URL = 'http://www.dative.ca/'\nDATIVE_WEBSITE_URL = 'http://www.dative.ca/'\n\nDATIVETOP_GUI_PORT = 5677\nDATIVETOP_GUI_ROOT = os.path.join(HERE, 'src', 'dativetop', 'gui', 'target')\nDATIVETOP_GUI_URL = 'http://{}:{}/'.format(IP, DATIVETOP_GUI_PORT)\n\nDATIVETOP_SERVER_PORT = 5676\nDATIVETOP_SERVER_DIR = os.path.join(HERE, 'src', 'dativetop', 'server')\nDATIVETOP_SERVER_URL = 'http://{}:{}/'.format(IP, DATIVETOP_SERVER_PORT)\n\nOLD_DIR = os.path.join(HERE, 'src', 'old')\nOLD_LOGO_PNG_PATH = 'private_resources/icons-originals/OLD-logo.png'\nOLD_PORT = 5679\nOLD_URL = 'http://{}:{}/'.format(IP, OLD_PORT)\nOLD_WEB_SITE_URL = 'http://www.onlinelinguisticdatabase.org/'\n","sub_path":"dativetop/constants.py","file_name":"constants.py","file_ext":"py","file_size_in_byte":1153,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"552317668","text":"import numpy as np\nimport matplotlib.pyplot as plt\n\n\nclass place_cells():\n '''\n Clopath 2019 place cell pcdev = sig**2, sig=0.4, lam0 = 4000Hz\n Foster 2000 Place cell pcdev = 2sig**2 sig=0.16, lam0=1000Hz\n '''\n def __init__(self,vpcn=11,hpcn=11,lam0=4000,pcdev=0.4, tstep=1,devv=1,au=2):\n self.v = devv\n self.lam0 = lam0*(tstep/1000) # init lam0 in hertz, tstep in ms. convert output to 1 ms\n self.npc = vpcn*hpcn\n self.pcdev = pcdev\n hori = np.linspace(-au/2,au/2,hpcn)\n vert = np.linspace(-au/2,au/2,vpcn)\n\n '''\n hori = np.zeros(hpcn)\n for i in range(len(hori) - 1):\n hori[i + 1] = hori[i] + pcdev\n hori -= hori[hpcn//2]\n vert = np.zeros(vpcn)\n for i in range(len(vert) - 1):\n vert[i + 1] = vert[i] + pcdev\n vert -= vert[vpcn // 2]'''\n\n self.au = np.round(hori.max()-hori.min(),2)\n self.pcs = np.zeros([vpcn*hpcn,2]) # coordinates go from top left to bottom right with centre being origin\n i = 0\n for x in hori[::-1]:\n for y in vert:\n self.pcs[i] = np.array([y, x])\n i+=1\n\n def check_pc(self,showpc='n'):\n if showpc=='y':\n plt.figure()\n plt.scatter(self.pcs[:, 0], self.pcs[:, 1], s=20, c='r')\n plt.axis((-self.au / 2, self.au / 2, -self.au / 2, self.au / 2))\n for i in range(self.npc):\n circ = plt.Circle(self.pcs[i], self.pcdev, color='g', fill=False)\n plt.gcf().gca().add_artist(circ)\n plt.show()\n\n def sense(self,s):\n norm = np.sum((s-self.pcs)**2,axis=1)\n pcact = self.lam0*np.exp(-norm/(self.v*self.pcdev**2))\n return pcact\n\n\ndef weightclip(neww,wmin,wmax):\n w = neww.copy()\n w[w>wmax]=wmax\n w[w\"))\n\n plt.subplot(236)\n plt.title('Critic')\n plt.imshow(mw[-2].reshape(npc, npc))\n plt.colorbar()\n","sub_path":"utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":4504,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"653145238","text":"from django.shortcuts import render, get_object_or_404\nfrom django.http import HttpResponse, Http404\nfrom .models import Book\n\nfrom django.contrib.auth.decorators import login_required\n\nimport json\n\n# Create your views here.\n\n@login_required(login_url='/admin/login')\ndef listbooks_view(request):\n listbooks = Book.objects.all()\n #return HttpResponse('OK')\n return render(request, 'books/listbook.html', {'listbooks': listbooks})\n\ndef books_view(request, author):\n book = get_object_or_404(Book, author__first_name=author)\n \n data = {\n 'title': book.title,\n 'order': book.order,\n 'cover_image': \" %s \" % book.cover_image,\n 'author': \"%s %s\" % (book.author.first_name, book.author.last_name),\n 'editorial': {\n 'name': book.editorial.name,\n 'logo': \" %s \" % book.editorial.logo,\n }\n } \n\n json_data = json.dumps(data)\n return HttpResponse(json_data, content_type='application/json')\n\n\nfrom django.core import serializers\ndef books_serial_view(request):\n data = serializers.serialize(\"json\", Book.objects.all())\n return HttpResponse(data, content_type='application/json')\n\nfrom .forms import SumaForm\ndef suma_view(request):\n \n resultado = 0\n if request.method == \"POST\":\n form = SumaForm(request.POST) \n if form.is_valid():\n valor1 = form.cleaned_data['valor1']\n valor2 = form.cleaned_data['valor2']\n valor3 = form.cleaned_data['valor3']\n\n resultado = float(valor1) + float(valor2)\n else:\n form = SumaForm()\n\n return render(request, 'books/suma.html', {\"form\":form, \"resultado\":resultado})\n\n\n","sub_path":"books/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1668,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"99087342","text":"import logging\nimport tempfile\nfrom urllib.request import Request, urlopen\n\nfrom optimus.helpers.raiseit import RaiseIt\nfrom optimus.spark import Spark\n\n\nclass Load:\n\n def url(self, path=None, type_of=\"csv\"):\n \"\"\"\n Reads a dataset from a URL.\n :param path: string for URL to read\n :param type_of: type of the URL backend (can be csv or json)\n :return: pyspark dataframe from URL.\n \"\"\"\n\n if \"https://\" in str(path) or \"http://\" in str(path) or \"file://\" in str(path):\n return self.data_loader(str(path), type_of)\n else:\n print(\"Unknown sample data identifier. Please choose an id from the list below\")\n\n def data_loader(self, url, type_of):\n \"\"\"\n Load data in from a url\n :param url: url string\n :param type_of: format data type\n :return:\n \"\"\"\n\n data_loader = None\n if type_of == \"csv\":\n data_loader = self.csv\n elif type_of == \"json\":\n data_loader = self.json\n elif type_of == \"parquet\":\n data_loader = self.parquet\n elif type_of == \"avro\":\n data_loader = self.avro\n else:\n RaiseIt.type_error(data_loader, [\"csv\", \"json\", \"parquet\", \"avro\", ])\n\n i = url.rfind('/')\n data_name = url[(i + 1):]\n data_def = {\n \"displayName\": data_name,\n \"url\": url\n }\n return Downloader(data_def).download(data_loader, type_of)\n\n @staticmethod\n def json(path):\n \"\"\"\n Return a dataframe from a json file.\n :param path:\n :return:\n \"\"\"\n try:\n # TODO: Check a better way to handle this Spark.instance.spark. Very verbose.\n df = Spark.instance.spark.read.json(path)\n except IOError as error:\n logging.error(error)\n raise\n return df\n\n @staticmethod\n def csv(path, sep=',', header='true', infer_schema='true', *args, **kwargs):\n \"\"\"\n Return a dataframe from a csv file.. It is the same read.csv Spark funciont with some predefined\n params\n\n :param path: Path or location of the file.\n :param sep: Usually delimiter mark are ',' or ';'.\n :param header: Tell the function whether dataset has a header row. 'true' default.\n :param infer_schema: Infers the input schema automatically from data.\n It requires one extra pass over the data. 'true' default.\n\n :return dataFrame\n \"\"\"\n try:\n df = (Spark.instance.spark.read\n .options(header=header)\n .options(mode=\"DROPMALFORMED\")\n .options(delimiter=sep)\n .options(inferSchema=infer_schema)\n .csv(path, *args, **kwargs))\n except IOError as error:\n logging.error(error)\n raise\n return df\n\n @staticmethod\n def parquet(path, *args, **kwargs):\n \"\"\"\n Return a dataframe from a parquet file.\n :param path: Path or location of the file. Must be string dataType.\n :return dataFrame\n \"\"\"\n\n try:\n df = Spark.instance.spark.read.parquet(path, *args, **kwargs)\n except IOError as error:\n logging.error(error)\n raise\n\n return df\n\n @staticmethod\n def avro(path, *args, **kwargs):\n try:\n df = Spark.instance.spark.read.format(\"com.databricks.spark.avro\").load(path, *args, **kwargs)\n except IOError as error:\n logging.error(error)\n raise\n\n return df\n\nclass Downloader(object):\n def __init__(self, data_def):\n self.data_def = data_def\n self.headers = {\"User-Agent\": \"Optimus Data Downloader/1.0\"}\n\n def download(self, data_loader, ext):\n display_name = self.data_def[\"displayName\"]\n bytes_downloaded = 0\n if \"path\" in self.data_def:\n path = self.data_def[\"path\"]\n else:\n url = self.data_def[\"url\"]\n req = Request(url, None, self.headers)\n\n logging.info(\"Downloading %s from %s\", display_name, url)\n\n # It seems that avro need a .avro extension file\n with tempfile.NamedTemporaryFile(suffix=\".\" + ext, delete=False) as f:\n bytes_downloaded = self.write(urlopen(req), f)\n path = f.name\n self.data_def[\"path\"] = path = f.name\n if path:\n try:\n if bytes_downloaded > 0:\n logging.info(\"Downloaded %s bytes\", bytes_downloaded)\n logging.info(\"Creating DataFrame for %s. Please wait...\", display_name)\n return data_loader(path)\n finally:\n logging.info(\"Successfully created DataFrame for '%s'\", display_name)\n\n @staticmethod\n def write(response, file, chunk_size=8192):\n \"\"\"\n Load the data from the http request and save it to disk\n :param response: data retruned\n :param file:\n :param chunk_size: size chunk size of the data\n :return:\n \"\"\"\n total_size = response.headers['Content-Length'].strip() if 'Content-Length' in response.headers else 100\n total_size = int(total_size)\n bytes_so_far = 0\n\n while 1:\n chunk = response.read(chunk_size)\n bytes_so_far += len(chunk)\n if not chunk:\n break\n file.write(chunk)\n total_size = bytes_so_far if bytes_so_far > total_size else total_size\n\n return bytes_so_far\n","sub_path":"optimus/io/load.py","file_name":"load.py","file_ext":"py","file_size_in_byte":5577,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"296062412","text":"\"\"\"\n{\n \"author\": \"Yucheng Huang\",\n \"difficulty\": \"medium\",\n \"link\": \"https://leetcode.com/problems/array-nesting/\",\n \"beats\": 0.1728,\n \"category\": [\"array\"],\n \"tags\": [],\n \"questions\": []\n}\n\"\"\"\n\nclass Solution:\n def arrayNesting(self, nums):\n \"\"\"\n :type nums: List[int]\n :rtype: int\n \"\"\"\n visited = [0 for __ in range(len(nums))]\n maxCount = 0\n for i in range(len(nums)):\n count = 0\n j = i\n while visited[j]==0:\n visited[j] = 1\n count += 1\n j = nums[j]\n maxCount = max(maxCount, count)\n return maxCount\n\n","sub_path":"solutions/565.second.py","file_name":"565.second.py","file_ext":"py","file_size_in_byte":673,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"83793420","text":"import sys\n\nN,l,v = [int(x) for x in sys.stdin.readline().split(\" \")]\ntreads = []\n\nfor i in range(N):\n cur = [int(x) for x in sys.stdin.readline().split(\" \")] \n cur.append(cur[2]/(cur[1]-cur[0]))\n treads.append(cur)\n\nrecord = l*v\n\ndef go(x,time,new_list):\n global record\n if len(new_list) == 0:\n time += v*(l-x)\n if time < record:\n record = time\n return\n\n lowest = min([p[1] for p in new_list])\n first_layer = [p for p in new_list if p[0]<= lowest]\n \n for tread in first_layer:\n if tread[3] < v:\n go(tread[1],time + (tread[0]-x)*v + tread[2],[p for p in new_list if p[0] >= tread[1]])\n \n\ngo(0,0,treads)\n \n\nprint(record)\n","sub_path":"Python/code_comp/Programmeringsolympiaden/Rullband/app copy.py","file_name":"app copy.py","file_ext":"py","file_size_in_byte":715,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"89684419","text":"\"\"\"\n Reads the xml file and produces following files:\n orig_file_count.txt, orig_freq_word.txt, orig_freq_tag.txt, orig_alldataset_section_stats.txt\n\"\"\"\n\nimport xml.sax, fnmatch, os, errno, sys\n\nglobal finalname, exception_ctr, total_ctr, punc_ctr, s_tok_ctr, psuedo_total_ctr, print_flag, s_ctr, tag_ctr\nglobal s_excep_ctr, tok_excep_ctr, tok_used_ctr, s_used_ctr\nd_word = {}\nd_tag = {}\nd_section_tag = {} \nd_word_tag = {}\n\n# Function to check whether the path exists or not\ndef make_sure_path_exists(path):\n try:\n os.makedirs(path)\n except OSError as exception:\n if exception.errno != errno.EEXIST:\n raise\n\nclass ANC_ContentHandler(xml.sax.ContentHandler):\n\n def __init__(self):\n xml.sax.ContentHandler.__init__(self)\n\n \"\"\"\n 2 lists are defined, viz. pubList and fieldList\n pubList contains all the other headers\n fieldList contains the main header which we want to parse, here \n tok has a attribute 'msd' which contains the POS tag as a part of its data\n separated by '+' sign\n \"\"\"\n\n def startElement(self, name, attrs):\n global total_ctr, s_tok_ctr, psuedo_total_ctr, print_flag, s_ctr, tag_ctr\n #print \"SE current name is: \", name, \"attrs ? \", attrs.getNames()\n ANC_ContentHandler.endflag = 0 \n if name == \"doc\":\n ##print \"File:\", finalname\n ANC_ContentHandler.pubList = [\"doc\",\"xcesHeader\",\"text\",\"body\",\"div\",\"head\",\"p\",\"s\",\"closer\",\"opener\"]\n ANC_ContentHandler.fieldList = [\"tok\"]\n ANC_ContentHandler.endflag = 0\n ANC_ContentHandler.content = \"\"\n\n if name in ANC_ContentHandler.pubList:\n if name == \"s\":\n s_ctr += 1\n s_tok_ctr = 0\n print_flag = 1\n ##print \"new sentence start, buffer_file initiated\"\n ANC_ContentHandler.content = \"\"\n\n if name in ANC_ContentHandler.fieldList:\n total_ctr += 1\n psuedo_total_ctr += 1\n s_tok_ctr += 1\n if \"msd\" in attrs.getNames():\n tag = ''\n for i in attrs.getValue(\"msd\"):\n if i != '+': tag += i.upper()\n else: break\n if tag:\n tag_ctr += 1\n ANC_ContentHandler.tag = tag\n else:\n print_flag = 0\n ANC_ContentHandler.field = name\n ANC_ContentHandler.content = \"\"\n \n def endElement(self, name):\n global print_flag, s_excep_ctr, tok_used_ctr, s_used_ctr\n #global d_word, d_tag, d_word_tag\n #print \"EE current name is: \", name\n if name in ANC_ContentHandler.fieldList:\n if name == \"tok\":\n #print \"endflag set here\"\n ANC_ContentHandler.endflag = 1\n\n if not ANC_ContentHandler.content in d_word:\n d_word[ANC_ContentHandler.content] = 1\n else:\n d_word[ANC_ContentHandler.content] = d_word.get(ANC_ContentHandler.content) + 1\n #print \"Word\",d_word\n\n ctag = ANC_ContentHandler.tag.encode('ascii','ignore')\n if not ctag in d_tag:\n d_tag[ctag] = 1\n else:\n d_tag[ctag] = d_tag.get(ctag) + 1\n #print \"tag\",d_tag \n\n word_tag = ANC_ContentHandler.content + '_' + ctag\n if not word_tag in d_word_tag:\n d_word_tag[word_tag] = 1\n else:\n d_word_tag[word_tag] = d_word_tag.get(word_tag) + 1\n #print \"word_tag\",d_word_tag\n\n\n if not ctag in d_section_tag:\n d_section_tag[ctag] = 1\n else:\n d_section_tag[ctag] = d_section_tag.get(ctag) + 1\n \n \"\"\"\n Character encoding in utf-8 format\n Some ambigiuos files exist which have text data after \n A count of no. of such ambigous lines in the xml file is maintained\n in the following Function\n If the text data following is a punctuation, then a punctuation counter\n is incremented accordingly\n The statistics.txt files represents net % ambiguity for each xml file\n \"\"\"\n\n def characters(self, content):\n global exception_ctr, punc_ctr, psuedo_total_ctr, print_flag, tok_excep_ctr\n #print \"content passed\", content.strip() , \";\"\n ANC_ContentHandler.content += content.encode('utf-8').replace('\\\\','\\\\\\\\')\n if not content.isspace():\n pass\n ##print \"content passed\", content.strip() , \";\"\n if ANC_ContentHandler.endflag == 1 and not content.isspace():\n ##print \"in if\"\n exception_ctr += 1\n punc_list = [\".\", \",\", \"-\", \"_\", \"/\", \"(\", \")\", \"'\"]\n if content.strip() in punc_list:\n punc_ctr += 1\n #print content ,\"in punc_list\"\n else:\n ##print \"Discard the whole sentence\"\n tok_excep_ctr += 1\n psuedo_total_ctr -= 1\n print_flag = 0\n \ndef update_progress(progress):\n barLength = 10 # Modify this to change the length of the progress bar\n status = \"\" \n if progress >= 1:\n progress = 1\n status = \"Done...\\n\"\n block = int(round(barLength*progress))\n text = \"\\rPercent: [{0}] {1}% {2}\".format( \"#\"*block + \"-\"*(barLength-block), round(progress*100), status)\n sys.stdout.write(text)\n sys.stdout.flush()\n\ndef main(sourceFileName):\n source = open(sourceFileName)\n xml.sax.handler.feature_external_pes = False\n xml.sax.parse(source, ANC_ContentHandler())\n \nif __name__ == \"__main__\":\n #main(\"./test.xml\")\n \"\"\" rootPath for input files and pattern : *.xml \"\"\"\n\n rootPath = './POS_tagging'\n pattern = '*.xml' \n make_sure_path_exists(\"./Statistics/Original\")\n deeppath = './POS_tagging/nytimes'\n prefix = \"./POS_tagging/\"\n\n\n \"\"\" xml_list.txt for storing the list of xml files to beparsed \"\"\"\n \n xml_ctr = sum_sentences = 0\n f = open(\"./Statistics/Original/xml_list.txt\",\"w\")\n for root, dirs, files in os.walk(rootPath):\n for filename in fnmatch.filter(files, pattern):\n if not \"header\" in filename:\n f.write( os.path.join(root, filename) + '\\n')\n xml_ctr += 1\n f.close()\n\n\n \"\"\"\n orig_freq_word ==> word freqency in original dataset\n orig_freq_tag ==> tag freqency in original dataset\n orig_freq_word_tag ==> word_tag freqency in original dataset\n orig_file_count ==> files, words, sentences in original dataset\n orig_alldataset_section_stats ==> files, words, sentences, tags section-wise\n \"\"\"\n\n orig_freq_word = open(\"./Statistics/Original/orig_freq_word.txt\",\"w\")\n orig_freq_tag = open(\"./Statistics/Original/orig_freq_tag.txt\",\"w\")\n orig_freq_word_tag = open(\"./Statistics/Original/orig_freq_word_tag.txt\",\"w\")\n orig_file_count = open(\"./Statistics/Original/orig_file_count.txt\",\"w\")\n alldataset_section_stats = open(\"./Statistics/Original/orig_alldataset_section_stats.txt\",\"w\")\n\n orig_file_count.write(\"\\t\\tXML FILE NAME\" + \"%28s\" % \"SENTENCES\" + \"%12s\" % \"WORDS\" + '\\n')\n\n d_word = {}\n d_tag = {}\n d_word_tag = {}\n \n\n subdir_list = os.listdir(deeppath)\n subdir_list.sort()\n\n last_line = file(\"./Statistics/Original/xml_list.txt\", \"r\").readlines()[-1]\n last_line = last_line[:len(last_line)-1]\n f = open(\"./Statistics/Original/xml_list.txt\",\"r\")\n num_lines = sum(1 for line in f)\n line_ctr = 0\n done_sections =[]\n prev_section = -1\n f = open(\"./Statistics/Original/xml_list.txt\",\"r\")\n for line in f:\n exception_ctr = total_ctr = punc_ctr = psuedo_total_ctr = s_ctr = s_excep_ctr = tok_excep_ctr = tok_used_ctr = s_used_ctr = tag_ctr = 0\n strlen = len(line)\n xmlname = line[0:strlen-1]\n filename = line[2:strlen-5] + \".txt\"\n main(xmlname)\n orig_file_count.write(xmlname + \"%10s\" % str(s_ctr) + \"%13s\" % str(total_ctr) + '\\n')\n section = line.split(\"/\")[3]\n\n if (section != prev_section or line.strip() == last_line.strip()) and prev_section != -1:\n if line.strip() == last_line.strip():\n alldataset_section_stats.write(xmlname[len(prefix):] + \"%10s\" % str(s_ctr) + \"%13s\" % str(total_ctr) + '\\n')\n alldataset_section_stats.write(\"\\n\\nNumber of Files in Section \" + str(prev_section) + \" is: \" + str(section_files) + '\\n')\n alldataset_section_stats.write(\"Number of Sentences in this section is: \" + str(section_sum_sentences) + '\\n')\n alldataset_section_stats.write(\"Number of Words in this section is: \" + str(section_sum_words) + '\\n')\n alldataset_section_stats.write(\"Total no. of unique tags in this section: \" + str(len(d_section_tag.keys())) + '\\n')\n prev_section = section\n\n if section not in done_sections:\n d_section_tag = {}\n section_sum_sentences = section_sum_words = section_files = 0\n done_sections.append(section)\n alldataset_section_stats.write(\"\\n\\n-------------------------------------------------------\" + '\\n')\n alldataset_section_stats.write(\"\\t\\t\\t\\tSection: \" + str(section) + '\\n\\n\\n')\n alldataset_section_stats.write(\"\\t\\tXML FILE NAME\" + \"%14s\" % \"SENTENCES\" + \"%12s\" % \"WORDS\" + '\\n')\n \n if line.strip() != last_line.strip():\n alldataset_section_stats.write(xmlname[len(prefix):] + \"%10s\" % str(s_ctr) + \"%13s\" % str(total_ctr) + '\\n')\n section_sum_sentences += s_ctr\n section_sum_words += total_ctr\n section_files += 1\n\n\n sum_sentences += s_ctr\n line_ctr += 1\n update_progress(line_ctr*1.0/num_lines)\n f.close()\n\n\n orig_freq_word.write(\"Total no. of unique words present in the dataset: \" + str(len(d_word.keys())) + '\\n')\n sum_words = sum(d_word.itervalues())\n d_word = [ (v,k) for k,v in d_word.iteritems() ]\n d_word.sort(reverse=True)\n for v,k in d_word:\n #orig_freq_word.write(\"%s\\t:\\t %d\" % (k,v) + '\\n')\n orig_freq_word.write(\"%20s\" % k + \":\" \"%7s\" % v + '\\n')\n orig_freq_word.write(\"Total no. of words present in the dataset: \" + str(sum_words))\n orig_freq_word.close()\n\n orig_freq_tag.write(\"Total no. of unique tags present in the dataset: \" + str(len(d_tag.keys())) + '\\n')\n unique_tags = len(d_tag.keys())\n sum_tags = sum(d_tag.itervalues())\n d_tag = [ (va,ke) for ke,va in d_tag.iteritems() ]\n d_tag.sort(reverse=True)\n for va,ke in d_tag:\n orig_freq_tag.write(\"%s\\t:\\t %d\" % (ke,va) + '\\n')\n orig_freq_tag.write(\"Total no. of tags present in the dataset: \" + str(sum_tags))\n orig_freq_tag.close()\n\n d_word_tag = [ (val,key) for key,val in d_word_tag.iteritems() ]\n d_word_tag.sort(reverse=True)\n for val,key in d_word_tag:\n orig_freq_word_tag.write(\"%20s:\\t %d\" % (key,val) + '\\n')\n orig_freq_word_tag.close() \n\n\n orig_file_count.write(\"Total no. of files: \" + str(xml_ctr) + '\\n')\n orig_file_count.write(\"Total no. of sentences present in the dataset: \" + str(sum_sentences) + '\\n')\n orig_file_count.write(\"Total no. of words present in the dataset: \" + str(sum_words))\n\n alldataset_section_stats.write(\"\\n\\n\\n\\n-------------------------- Dataset Statitics --------------------------\" + '\\n\\n\\n')\n alldataset_section_stats.write(\"Total Number of Sections in the dataset is: \" + str(len(done_sections)) + '\\n')\n alldataset_section_stats.write(\"Total Number of Data files in the dataset is: \" + str(xml_ctr) + '\\n')\n alldataset_section_stats.write(\"Total number of Sentences in the dataset is: \" + str(sum_sentences) + '\\n')\n alldataset_section_stats.write(\"Total no. of Words present in the dataset: \" + str(sum_words) + '\\n')\n alldataset_section_stats.write(\"Total no. of Unique Tags present in the dataset: \" + str(unique_tags) + '\\n')\n alldataset_section_stats.write(\"Total no. of Tags present in the dataset: \" + str(sum_tags))","sub_path":"orig_stats_generator.py","file_name":"orig_stats_generator.py","file_ext":"py","file_size_in_byte":11121,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"123178286","text":"from django.conf import settings\n\nfrom cassandra.cqlengine.management import delete_keyspace\n\nfrom djangocassandra.db.backends.cassandra.base import DatabaseWrapper\n\n\ndef connect_db():\n connection = DatabaseWrapper(settings.DATABASES['default'])\n connection_params = connection.get_connection_params()\n connection.get_new_connection(connection_params)\n return connection\n\n\ndef create_model(connection, model):\n connection.creation.sql_create_model(\n model,\n None\n )\n\n\ndef populate_db(connection, values):\n for value in values:\n value.save()\n\n\ndef destroy_db(connection):\n if None is not connection:\n keyspace_names = [\n key for key in settings.DATABASES['default']['KEYSPACES'].keys()\n ]\n for keyspace in keyspace_names:\n delete_keyspace(keyspace)\n","sub_path":"tests/util.py","file_name":"util.py","file_ext":"py","file_size_in_byte":836,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"8506046","text":"import os\nimport time\nfrom tensorflow.keras.layers import LSTM\n\n# Window size or the sequence length\nN_STEPS = 5\n# Lookup step, 1 is the next day\nLOOKUP_STEP = 1\n\n# test ratio size, 0.2 is 20%\nTEST_SIZE = 0.1\n# features to use\nFEATURE_COLUMNS = [\"Open\", 'Close', \"OpenMax\", \"OpenMin\", \"Day\"]\n# date now\ndate_now = time.strftime(\"%Y-%m-%d\")\n\n### model parameters\nTRAIN_RAW = True\nN_LAYERS = 4\n# LSTM cell\nCELL = LSTM\n# 256 LSTM neurons\nUNITS = 12\n# 40% dropout\nDROPOUT = 0.3\n\n### training parameters\n\n# mean squared error loss\nLOSS = \"mse\"\nOPTIMIZER = \"rmsprop\"\nBATCH_SIZE = 16\nEPOCHS = 300\n\n# Apple stock market\nticker = \"WIG20\"\nticker_data_filename = os.path.join(\"data\", f\"WIG20_d.csv\")\n# model name to save\nmodel_name = f\"{date_now}_{ticker}-{LOSS}-{CELL.__name__}-seq-{N_STEPS}-step-{LOOKUP_STEP}-layers-{N_LAYERS}-units-{UNITS}\"\n\n# Row data name\nglobal row_data\n","sub_path":"lstm/parameters.py","file_name":"parameters.py","file_ext":"py","file_size_in_byte":867,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"109638932","text":"import pandas as pd\r\nimport json\r\nimport os\r\nimport numpy as np\r\nfrom ZibiDB.core.attribute import Attribute\r\nfrom BTrees.OOBTree import OOBTree\r\n\r\nclass Table:\r\n # info = {}\r\n # need a load() outside\r\n def __init__(self, attrls, info):\r\n self.data = {}\r\n self.datalist = []\r\n self.df = pd.DataFrame()\r\n self.name = info['name']\r\n self.attrls = attrls\r\n self.attrs = {} #{name: attributeobj}\r\n self.primary = info['primary']\r\n self.foreign = info['foreign']\r\n self.uniqueattr = {} # {attribute_name: {attibute_value: primarykey_value}}\r\n self.index={} #{attr: idex_name}\r\n self.BTree={} #{idex_name: BTree}\r\n self.flag = 0\r\n\r\n for attr in info['attrs']:\r\n temp = Attribute(attr)\r\n self.attrs[attr['name']] = temp\r\n if temp.unique:\r\n self.uniqueattr[attr['name']] = {}\r\n\r\n def add_index(self, attr, idex_name):\r\n if attr[0] not in self.uniqueattr.keys():\r\n raise Exception('ERROR: The attr is not unique and cannot create index')\r\n # If unique:\r\n if attr[0] not in self.index:\r\n self.index[attr[0]]=idex_name\r\n # Get pairs {v1:p1, v2:p2,...}\r\n nodes=self.uniqueattr[attr[0]]\r\n\r\n # Create a b tree\r\n t=OOBTree()\r\n t.update(nodes)\r\n self.BTree[idex_name]=t\r\n return t\r\n \r\n def drop_index(self, idex_name):\r\n # TABLE is a obj\r\n # index name must in index attrs\r\n if idex_name in self.index.keys():\r\n del self.index[idex_name]\r\n del self.BTree[idex_name]\r\n else:\r\n raise Exception('ERROR: The index does not exist')\r\n def index_search(self, attrs, condition):\r\n '''\r\n attrs: [attr1, attr2, ...]\r\n condition:{attr: , value:, symbol, }\r\n '''\r\n attr=condition['attr']\r\n value=condition['value']\r\n symbol=condition['symbol']\r\n idex_name=self.index[attr]\r\n BTree=self.BTree[idex_name]\r\n\r\n min_key=BTree.minKey()\r\n max_key=BTree.max_key()\r\n\r\n if symbol=='=':\r\n pks=[BTree[value]]\r\n content=[]\r\n for pk in pks:\r\n content.append(self.data[pk])\r\n \r\n elif symbol=='<':\r\n pks=list(BTree.values(min=min_key, max=value, excludemin=False, excludemax=True)) #[pk1, pk2,...]\r\n content=[]\r\n for pk in pks:\r\n content.append(self.data[pk])\r\n\r\n elif symbol=='>':\r\n pks=list(BTree.values(min=value, max=max_key, excludemin=True, excludemax=False)) #[pk1, pk2,...]\r\n content=[]\r\n for pk in pks:\r\n content.append(self.data[pk])\r\n\r\n elif symbol=='<=':\r\n pks=list(BTree.values(min=min_key, max=value, excludemin=False, excludemax=False)) #[pk1, pk2,...]\r\n content=[]\r\n for pk in pks:\r\n content.append(self.data[pk])\r\n\r\n elif symbol=='>=':\r\n pks=list(BTree.values(min=value, max=max_key, excludemin=True, excludemax=False)) #[pk1, pk2,...]\r\n content=[]\r\n for pk in pks:\r\n content.append(self.data[pk])\r\n\r\n elif symbol=='<>':\r\n pk1=list(BTree.values(min=min_key, max=value, excludemin=False, excludemax=True)) #[pk1, pk2,...]\r\n pk2=list(BTree.values(min=value, max=max_key, excludemin=True, excludemax=False)) #[pk1, pk2,...]\r\n content=[]\r\n for pk in pk1:\r\n content.append(self.data[pk]) \r\n for pk in pk2:\r\n content.append(self.data[pk]) \r\n att=self.attrls[len(pks):]\r\n\r\n df=pd.DataFrame(content, columns=att)\r\n my_df=pd.DataFrame()\r\n for i in attrs:\r\n my_df[i]=df[i]\r\n return my_df\r\n\r\n def insert(self, attrs: list, data: list) -> None:\r\n \"\"\"\r\n Add data into self.data as a hash table.\r\n TODO:\r\n -Put data into self.data{} as hash table. Key is prmkvalue, and value is attvalue = [].\r\n -Use attribute_object.typecheck to check every value, and if the value is invalid, raise error. If not put into attvalu.\r\n -Check primary key value, if the value already in prmkvalue, raise error.\r\n -Print essential information\r\n \"\"\"\r\n # TODO: typecheck?\r\n prmkvalue = []\r\n attvalue = []\r\n if attrs==[]:\r\n # TODO: typecheck\r\n # Must enter full-attr values by order\r\n if len(data)!=len(self.attrls):\r\n raise Exception('ERROR: Full-attr values is needed')\r\n\r\n dat = data[::]\r\n # Get primary-key values\r\n for _ in range(len(self.primary)):\r\n prmkvalue.append(dat.pop(0))\r\n # the remaining data is attr data\r\n attvalue=dat\r\n\r\n # TODO: typecheck\r\n for i in data:\r\n value = data[i]\r\n attname = self.attrls[i]\r\n # typecheck()\r\n # If false, raise error in typecheck()\r\n # If true, nothing happens and continue\r\n # If unique, call self uniquecheck()\r\n if attname in self.uniqueattr.keys():\r\n if value in self.uniqueattr[attname].keys():\r\n raise Exception('ERROR: Unique attribute values are in conflict! ' + attname + \" : \" + str(value))\r\n self.uniqueattr[attname][value] = prmkvalue\r\n self.attrs[attname].typecheck(value)\r\n # If it is not unique, raise error in the function\r\n # Else, continue\r\n\r\n # Hash data\r\n self.data[tuple(prmkvalue)]=attvalue\r\n else:\r\n # Reorder by the oder of self.attrs\r\n attrs_dict=dict()\r\n for name in self.attrls:\r\n attrs_dict[name]=None\r\n\r\n # Get primary-key values\r\n for name in self.primary:\r\n if name not in attrs:\r\n raise Exception('ERROR: Primary key cannot be NULL.')\r\n prmkvalue.append(data[attrs.index(name)])\r\n\r\n for i in range(len(attrs)):\r\n value = data[i]\r\n attname = attrs[i]\r\n\r\n if attname in self.uniqueattr.keys():\r\n if value in self.uniqueattr[attname].keys():\r\n raise Exception('ERROR: Unique attribute values are in conflict! ' + attname + \" : \" + str(value))\r\n self.uniqueattr[attname][value] = prmkvalue\r\n #self.attrs[attname].typecheck(value)\r\n self.attrs[attname].typecheck(value)\r\n\r\n attrs_dict[attname] = value\r\n # Get primary-key values\r\n for name in self.primary:\r\n # Pop primary-key value from the full-attr dict\r\n attrs_dict.pop(name)\r\n # The remaining data is attr data\r\n attvalue=list(attrs_dict.values())\r\n\r\n # Hash data\r\n if tuple(prmkvalue) not in self.data.keys():\r\n self.datalist = self.datalist + [prmkvalue + attvalue]\r\n self.data[tuple(prmkvalue)] = attvalue\r\n else:\r\n raise Exception('ERROR: Primary key value collision')\r\n \r\n def serialize(self):\r\n pass\r\n \r\n def deserialize(self):\r\n pass\r\n \r\n def delete(self, table_name, where):\r\n if where == []:\r\n self.data = {}\r\n self.datalist = []\r\n for a in self.uniqueattr.keys():\r\n self.uniqueattr[a] = {}\r\n #self.BTree\r\n elif len(where) > 1:\r\n raise Exception('ERROR: Mutiple where conditions is coming soon')\r\n elif len(where) == 1:\r\n if where[0]['attr'] not in self.primary:\r\n raise Exception('ERROR: You should delete by one of the primary key!')\r\n else:\r\n if where[0]['symbol']=='=':\r\n value=where[0]['value']\r\n try:\r\n value=int(value)\r\n except:\r\n pass\r\n\r\n del self.data[tuple([value])]\r\n self.df=self.df[self.df[where[0]['attr']]!=value]\r\n elif where[0]['symbol']=='<>':\r\n try:\r\n value=int(value)\r\n except:\r\n pass\r\n self.data={self.data[value]}\r\n self.df=self.df[self.df[where[0]['attr']]==value]\r\n\r\n def search(self, attr, sym, tag, condition, gb):\r\n # attr: [] or *\r\n # situation: number means different conditions\r\n # gb: true/false have group by\r\n # condition: [], base on situation\r\n # df = pd.DataFrame(self.datalist, columns = self.attrls)\r\n if self.flag == 0:\r\n self.df = pd.DataFrame(self.datalist, columns = self.attrls)\r\n symbols = {\r\n '=': 1,\r\n '>': 2,\r\n '>=': 3,\r\n '<': 4,\r\n '<=': 5,\r\n 'LIKE': 6,\r\n 'NOT LIKE': 7,\r\n '<>': 8\r\n }\r\n if len(sym) == 0:\r\n situation = 0\r\n else:\r\n situation = symbols[sym]\r\n if gb:\r\n temp = self.group_by(condition[2], condition[3], attr, df)\r\n else:\r\n temp = self.df\r\n\r\n if situation == 0: # no where\r\n if attr == ['*']:\r\n return temp\r\n else:\r\n return temp.loc[:, attr]\r\n\r\n if situation == 1:\r\n if tag:\r\n if attr == ['*']:\r\n return temp.loc[temp[condition[0]] == temp[condition[1]]]\r\n return temp.loc[temp[condition[0]] == temp[condition[1]], attr]\r\n if attr == ['*']:\r\n return temp.loc[temp[condition[0]] == condition[1]]\r\n return temp.loc[temp[condition[0]] == condition[1], attr]\r\n if situation == 2:\r\n if tag:\r\n if attr == ['*']:\r\n return temp.loc[temp[condition[0]] > temp[condition[1]]]\r\n return temp.loc[temp[condition[0]] > temp[condition[1]], attr]\r\n if attr == ['*']:\r\n return temp.loc[temp[condition[0]] > condition[1]]\r\n return temp.loc[temp[condition[0]] > condition[1], attr]\r\n if situation == 3:\r\n if tag:\r\n if attr == ['*']:\r\n return temp.loc[temp[condition[0]] >= temp[condition[1]]]\r\n return temp.loc[temp[condition[0]] >= temp[condition[1]], attr]\r\n if attr == ['*']:\r\n return temp.loc[temp[condition[0]] >= condition[1]]\r\n return temp.loc[temp[condition[0]] >= condition[1], attr]\r\n if situation == 4:\r\n if tag:\r\n if attr == ['*']:\r\n return temp.loc[temp[condition[0]] < temp[condition[1]]]\r\n return temp.loc[temp[condition[0]] < temp[condition[1]], attr]\r\n if attr == ['*']:\r\n return temp.loc[temp[condition[0]] < condition[1]]\r\n return temp.loc[temp[condition[0]] < condition[1], attr]\r\n if situation == 5:\r\n if tag:\r\n if attr == ['*']:\r\n return temp.loc[temp[condition[0]] <= temp[condition[1]]]\r\n return temp.loc[temp[condition[0]] <= temp[condition[1]], attr]\r\n if attr == ['*']:\r\n return temp.loc[temp[condition[0]] <= condition[1]]\r\n return temp.loc[temp[condition[0]] <= condition[1], attr]\r\n if situation == 8:\r\n if tag:\r\n if attr == ['*']:\r\n return temp.loc[temp[condition[0]] != temp[condition[1]]]\r\n return temp.loc[temp[condition[0]] != temp[condition[1]], attr]\r\n if attr == ['*']:\r\n return temp.loc[temp[condition[0]] != condition[1]]\r\n return temp.loc[temp[condition[0]] != condition[1], attr]\r\n\r\n if situation == 6:\r\n if tag:\r\n if attr == ['*']:\r\n return temp.loc[temp[condition[0]].str.contains(temp[condition[1]])]\r\n return temp.loc[temp[condition[0]].str.contains(temp[condition[1]]), attr]\r\n if attr == ['*']:\r\n return temp.loc[temp[condition[0]].str.contains(condition[1])]\r\n return temp.loc[temp[condition[0]].str.contains(condition[1]), attr]\r\n if situation == 7:\r\n if tag:\r\n if attr == ['*']:\r\n return temp.loc[~temp[condition[0]].str.contains(temp[condition[1]])]\r\n return temp.loc[~temp[condition[0]].str.contains(temp[condition[1]]), attr]\r\n if attr == ['*']:\r\n return temp.loc[~temp[condition[0]].str.contains(condition[1]), attr]\r\n return temp.loc[~temp[condition[0]].str.contains(condition[1]), attr]\r\n\r\n\r\n def group_by(self, agg, attr_gr, attr, df):\r\n \"\"\"\r\n :param situation: calculation of group by\r\n :param attr_gr: the attrs for group\r\n :param attr: attrs for calculation\r\n :param df: dataframe for group by\r\n :return: a dataframe\r\n \"\"\"\r\n agg_funcs = {\r\n 'MAX': 0,\r\n 'MIN': 1,\r\n 'AVG': 2,\r\n 'SUM': 3,\r\n 'COUNT': 4\r\n }\r\n situation = agg_funcs[agg]\r\n\r\n if attr == '*':\r\n raise Exception('ERROR: Invalid search.')\r\n gb = df.groupby(attr_gr)\r\n if situation == 0:\r\n return gb[attr].max()\r\n if situation == 1:\r\n return gb[attr].min()\r\n if situation == 2:\r\n return gb[attr].mean()\r\n if situation == 3:\r\n return gb[attr].sum()\r\n if situation == 4:\r\n return gb[attr].value_counts()\r\n\r\n def table_join(self, table, attr):\r\n df1 = pd.DataFrame(self.data)\r\n df2 = pd.DataFrame(table.data)\r\n return pd.merge(df1, df2, on=attr)\r\n\r\n\"\"\"\r\nif __name__ == '__main__':\r\n data = []\r\n for i in range(100):\r\n if i > 50:\r\n data.append([i,2])\r\n else:\r\n data.append([i,1])\r\n attr1 = {'name': 'id', 'type': 'INT', 'notnull': False, 'unique': False}\r\n attr2 = {'name': 'num', 'type': 'INT', 'notnull': False, 'unique': False}\r\n info = {'name': 'test', 'attrs': [attr1, attr2], 'primary': '', 'foreign': []}\r\n table = Table(['id', 'num'], info)\r\n table.df = pd.DataFrame(data, columns=['id', 'num'])\r\n res = table.search('*', '=', False, ['id', 5], False)\r\n print(res)\r\n\"\"\"","sub_path":"core/table.py","file_name":"table.py","file_ext":"py","file_size_in_byte":14735,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"385521832","text":"from __future__ import annotations\n\nimport datetime\nimport functools\nimport json\nimport subprocess\nfrom dataclasses import dataclass\nfrom pathlib import Path\n\nimport click\nimport matplotlib.pyplot as pp\nimport pandas as pd\nimport seaborn as sns\n\n\n@click.group()\ndef cli():\n pass\n\n\n@dataclass\nclass Stats:\n name: str\n run: str\n pattern: str # mixed, random, linear\n operation: str # read, write\n speed: float # in mb/s\n\n @staticmethod\n def from_run(new, resultfile: Path) -> list[Stats]:\n\n results = json.loads(resultfile.read_text())\n jobs = results[\"jobs\"]\n\n def get(name, value):\n job = next(j for j in jobs if j[\"jobname\"] == name)\n # NOTE there are also bw_dev and other stats that could be interesting\n return job[value][\"bw_mean\"] / 1e3 # in mb/s\n\n return [\n new(\"mixed\", \"read\", get(\"mixed\", \"read\")),\n new(\"mixed\", \"write\", get(\"mixed\", \"write\")),\n new(\"random\", \"read\", get(\"random-read\", \"read\")),\n new(\"random\", \"write\", get(\"random-write\", \"write\")),\n new(\"linear\", \"read\", get(\"linear-read\", \"read\")),\n new(\"linear\", \"write\", get(\"linear-write\", \"write\")),\n ]\n\n @staticmethod\n def from_runs(folder: Path) -> list[Stats]:\n return [\n stat\n for name in folder.iterdir()\n if name.is_dir()\n for run in name.glob(\"*.json\")\n for stat in Stats.from_run(\n functools.partial(Stats, name.name, run.stem),\n run,\n )\n ]\n\n\n@cli.command()\n@click.option(\"--basefolder\", default=\"./runs\")\ndef summary(basefolder):\n\n basefolder = Path(basefolder).expanduser()\n\n stats = Stats.from_runs(basefolder)\n df = pd.DataFrame(stats)\n df = df.rename(columns={\"speed\": \"speed [mb/s]\"})\n\n sns.set_theme(\"paper\", palette=\"colorblind\")\n sns.catplot(\n data=df,\n kind=\"bar\",\n col=\"pattern\",\n hue=\"operation\",\n x=\"name\",\n y=\"speed [mb/s]\",\n legend=True,\n legend_out=True,\n )\n pp.savefig(str(basefolder / \"plots.png\"), bbox_inches=\"tight\")\n\n\n@cli.command()\n@click.option(\"--basefolder\", default=\"./runs\")\n@click.option(\"--name\", default=\"debug\")\n@click.option(\n \"--testfile\",\n default=\"./disk-test-file\",\n help=\"The testfile has to be on the filesystem you want to benchmark. It needs to be writeable by you.\",\n)\n@click.option(\"--repeat\", default=1)\ndef run(basefolder, name, testfile, repeat):\n\n basefolder = Path(basefolder).expanduser()\n basefolder.mkdir(parents=True, exist_ok=True)\n (basefolder / name).mkdir(parents=True, exist_ok=True)\n\n for i in range(repeat):\n\n now = datetime.datetime.now().isoformat(timespec=\"seconds\")\n resultsfile = basefolder / name / f\"{now}.json\"\n assert resultsfile.parent.exists()\n assert not resultsfile.exists()\n\n testfile = Path(testfile).expanduser()\n assert not testfile.exists()\n\n description = describe_location(testfile.parent)\n\n print(f\"run test for {testfile}\")\n print(f\"with results in {resultsfile}\")\n print(description)\n\n run_fio(testfile, resultsfile, description)\n\n\ndef describe_location(path) -> str:\n\n mount = (\n subprocess.run(\n [\"findmnt\", \"--noheadings\", \"--output\", \"SOURCE\", \"--target\", str(path)],\n capture_output=True,\n text=True,\n check=True,\n )\n .stdout.split(\"\\n\")[0]\n .strip()\n )\n\n devices = subprocess.run(\n [\"lsblk\", \"--inverse\", \"--output\", \"+model\", mount],\n capture_output=True,\n text=True,\n check=True,\n ).stdout.strip()\n\n return devices\n\n\ndef run_fio(testfile, resultsfile, description):\n\n try:\n # see https://fio.readthedocs.io/en/latest/\n command = [\n \"fio\",\n \"--filename\",\n str(testfile),\n \"--output-format=json\",\n \"--output\",\n str(resultsfile),\n \"--description\",\n json.dumps(description).strip('\"'),\n # settings inspired by https://arstech.net/how-to-measure-disk-performance-iops-with-fio-in-linux/\n \"fio-job\",\n ]\n subprocess.run(command, check=True)\n print()\n\n finally:\n testfile.unlink(missing_ok=True)\n\n\nif __name__ == \"__main__\":\n cli()\n","sub_path":"bench.py","file_name":"bench.py","file_ext":"py","file_size_in_byte":4424,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"187209292","text":"from pathlib import Path\nimport json\nimport os\n\nroot = Path(__file__).parent.parent.parent / \"assets\" / \"datafiles\"\nhabitat_json = root / \"habitat.json\"\npokemons_json = root / \"pokemon.json\"\npokedex_extra_json = root / \"pokedex_extra.json\"\npokemon_order_json = root / \"pokemon_order.json\"\nmoves_json = root / \"moves.json\"\ntm_json = root / \"move_machines.json\"\nabilities_json = root / \"abilities.json\"\nfeats_json = root / \"feats.json\"\n\n\ndef habitat():\n with open(habitat_json, \"r\") as fp:\n with open(pokemon_order_json, \"r\") as f:\n pokemon_data = json.load(f)\n habitat_data = json.load(fp)\n\n for _, pokemon_list in habitat_data.items():\n for poke in pokemon_list:\n pokemon_data[\"number\"].remove(poke)\n print(pokemon_data[\"number\"])\n\ndef pokedex_extra():\n with open(pokedex_extra_json, \"r\", encoding=\"utf8\") as fp:\n with open(pokemon_order_json, \"r\") as f:\n pokemon_order_data = json.load(f)\n pokedex_extra_data = json.load(fp)\n\n for species in pokemon_order_data[\"unique\"]:\n try:\n pokedex_extra_data[species]\n except:\n print(\"Can't find\", species)\n\ndef moves():\n with open(pokemons_json, \"r\") as fp:\n with open(moves_json, \"r\") as f:\n move_data = json.load(f)\n pokemon_data = json.load(fp)\n \n for _, data in pokemon_data.items():\n for move in data[\"Moves\"][\"Starting Moves\"]:\n if not move in move_data:\n print(\"Can't find move: \", move)\n\n\ndef tm():\n with open(tm_json, \"r\") as fp:\n with open(moves_json, \"r\") as f:\n move_data = json.load(f)\n tm_data = json.load(fp)\n\n for num, move in tm_data.items():\n if not move in move_data:\n print(\"Can't find TM: \", num, move)\n\ndef abilities():\n with open(pokemons_json, \"r\") as fp:\n with open(abilities_json, \"r\") as f:\n ability_data = json.load(f)\n pokemon_data = json.load(fp)\n\n for _, data in pokemon_data.items():\n for ability in data[\"Abilities\"]:\n if not ability in ability_data:\n print(\"Can't find ability \", ability)\n if \"Hidden Ability\" in data and data[\"Hidden Ability\"] not in ability_data:\n print(\"Can't find hidden ability \", data[\"Hidden Ability\"])\n\ndef images():\n with open(pokemons_json, \"r\") as fp:\n pokemon_data = json.load(fp)\n for p, data in pokemon_data.items():\n for x in [\"pokemons\", \"sprites\"]:\n file_path = r\"D:\\Repo\\Pokedex\\assets\\textures/{}/{}{}.png\".format(x, data[\"index\"], p)\n if not os.path.exists(file_path):\n print(\"Can't find image: \", data[\"index\"],p, \"in atlas \", x)\npokedex_extra()","sub_path":"tools/scripts/validate.py","file_name":"validate.py","file_ext":"py","file_size_in_byte":2956,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"259066490","text":"\"\"\"This module provides persistent global storage for other modules.\"\"\"\n\nfrom collections import defaultdict\n\nfrom .util import printf\nfrom .settings import Settings\n\n\nif 'plugin_is_loaded' not in globals():\n settings = Settings()\n\n scheme = None\n\n # A mapping between buffer ids and errors,\n # Dict[buffer_id, [error]]\n errors = defaultdict(list)\n\n # A mapping between linter class names and linter classes\n linter_classes = {}\n\n # A mapping between view ids and a set of linter instances\n view_linters = {}\n\n # A mapping between view ids and views\n views = {}\n\n # Every time a view is modified, this is updated with a mapping between a view id\n # and the time of the modification. This is checked at various stages of the linting\n # process. If a view has been modified since the original modification, the\n # linting process stops.\n last_hit_times = {}\n\n edits = defaultdict(list)\n\n # Whether sys.path has been imported from the system.\n sys_path_imported = False\n\n # Set to true when the plugin is loaded at startup\n plugin_is_loaded = False\n\n\ndef edit(vid, edit):\n \"\"\"Perform an operation on a view with the given edit object.\"\"\"\n callbacks = edits.pop(vid, [])\n\n for c in callbacks:\n c(edit)\n\n\ndef debug_mode():\n \"\"\"Return whether the \"debug\" setting is True.\"\"\"\n return settings.get('debug', False)\n\n\ndef debug(*args):\n \"\"\"Print args to the console if the \"debug\" setting is True.\"\"\"\n if debug_mode():\n printf(*args)\n","sub_path":"lint/persist.py","file_name":"persist.py","file_ext":"py","file_size_in_byte":1522,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"61309060","text":"from ball import Ball\nfrom palette import Palette\nimport numpy as np\n\nclass Air_hockey_env():\n\tdef __init__(self, X, Y):\n\t\tself.X = X\n\t\tself.Y = Y\n\t\tself.palette1 = Palette(28, Y-2, 5) \n\t\tself.palette2 = Palette(28, 2, 5)\n\t\tself.ball = Ball(X, Y, self.palette1, self.palette2)\n\t\tself.points1 = 0\n\t\tself.points2 = 0\n\t\t#self.state = np.ones((self.Y, self.X), dtype=np.int8)\n\n\t\tself.actions = [\"LEFT\", \"RIGHT\", \"NONE\"]\n\t\tself.render_history_palette1 = []\n\t\tself.render_history_palette2 = []\n\n\t\tself.action_n = len(self.actions)\n\n\n\tdef sample(self):\n\t\treturn np.random.choice(self.actions, 1)[0]\n\n\tdef reset(self):\n\t\tself.palette1 = Palette(28, self.Y-2, 5) \n\t\tself.palette2 = Palette(28, 2, 5)\n\t\tself.ball = Ball(self.X, self.Y, self.palette1, self.palette2)\n\t\tself.points1 = 0\n\t\tself.points2 = 0\n\t\tself.render_history_palette1 = []\n\t\tself.render_history_palette2 = []\n\t\t#self.state_update()\n\t\tstate1 = np.array([self.palette1.x, self.palette1.y, self.ball.x, self.ball.y, self.ball.dx, self.ball.dy])\n\t\tstate2 = np.array([self.palette2.x, self.palette2.y, self.ball.x, self.ball.y, self.ball.dx, self.ball.dy])\n\n\t\treturn state1, state2\n\n\tdef step(self, action1, action2):\n\n\t\tif self.palette1.shoot:\n\t\t\t\tif self.palette1.shoot == 2:\n\t\t\t\t\tself.palette1.y += 2\n\t\t\t\t\tself.palette1.shoot = 0\n\t\t\t\telse:\n\t\t\t\t\tself.palette1.y -= 1\n\t\t\t\t\tself.palette1.shoot += 1\n\t\telif action1 == \"LEFT\":\n\t\t\tif self.palette1.x > 1:\n\t\t\t\tself.palette1.x -= 1\n\t\telif action1 == \"RIGHT\":\n\t\t\tif self.palette1.x < self.X-1 - self.palette1.len:\n\t\t\t\tself.palette1.x += 1\n\n\n\t\tif self.palette2.shoot:\n\t\t\tif self.palette2.shoot == 2:\n\t\t\t\tself.palette2.y -= 2\n\t\t\t\tself.palette2.shoot = 0\n\t\t\telse:\n\t\t\t\tself.palette2.y += 1\n\t\t\t\tself.palette2.shoot += 1\n\t\telif action2 == \"LEFT\":\n\t\t\tif self.palette2.x > 1:\n\t\t\t\tself.palette2.x -= 1\n\t\telif action2 == \"RIGHT\":\n\t\t\tif self.palette2.x < self.X-1 - self.palette2.len:\n\t\t\t\tself.palette2.x += 1\n\n\t\tself.ball.next_move()\n\t\tself.ball_clear()\n\n\t\treward1 = 0\n\t\treward2 = 0\n\n\t\tif self.ball.y == 1:\n\t\t\treward2 -= 10\n\t\t\tself.points1 += 1\n\n\t\telif self.ball.y == self.Y-2:\n\t\t\treward1 -= 10\n\n\t\t\tself.points2 += 1\n\n\t\tdone = False\n\t\tif self.points1 == 5 or self.points2 == 5:\n\t\t\tdone = True\n\n\t\t#self.state_update()\n\n\t\tself.render_history_palette1.append(action1)\n\t\tself.render_history_palette2.append(action2)\n\n\t\tstate1 = np.array([self.palette1.x, self.palette1.y, self.ball.x, self.ball.y, self.ball.dx, self.ball.dy])\n\t\tstate2 = np.array([self.palette2.x, self.palette2.y, self.ball.x, self.ball.y, self.ball.dx, self.ball.dy])\n\n\n\t\treturn state1, state2, reward1, reward2, done\n\n\tdef render(self):\n\t\treturn self.render_history_palette1, self.render_history_palette2\n\n\tdef ball_clear(self):\n\t#print(ball.x, ball.y, ball.dx, ball.dy)\n\n\t\tif self.ball.reset:\n\t\t\tself.ball.reset = False\n\t\t\tself.ball.y = 10\n\t\t\tself.ball.dy *= -1\n\t\t\tself.ball.dx = int(self.ball.dx / abs(self.ball.dx))\n\n\t\tif self.ball.y == 1 or self.ball.y == self.ball.dimY-1:\n\t\t\tself.ball.reset = True\n\n\t# def state_update(self):\n\t# \tself.state = np.zeros((self.Y, self.X), dtype=np.byte)\n\t\t\n\t# \tfor i in range(self.palette1.len):\n\t# \t self.state[self.palette1.y, self.palette1.x+i] = 1\n\t# \t self.state[self.palette2.y, self.palette2.x+i] = 1\n\n\t# \tself.state[self.ball.y, self.ball.x] = 1","sub_path":"model4_diff_reward_diff_shape_no_shooting_diff_model/air_hockey_env.py","file_name":"air_hockey_env.py","file_ext":"py","file_size_in_byte":3248,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"133925080","text":"import setuptools\nimport shutil\nimport sys\nimport os\nimport subprocess\n\nsetuptools.setup(\n version=\"0.0.1\",\n license='mit',\n name='cli-aws',\n author='nathan todd-stone',\n author_email='me@nathants.com',\n url='http://github.com/nathants/cli-aws',\n packages=['aws'],\n python_requires='>=3.6',\n install_requires=['requests >2, <3',\n 'boto3 >1, <2',\n 'awscli >1, <2',\n 'argh >0.26, <0.27'],\n description='composable, succinct aws scripts',\n)\n\nparent = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'bin')\n\nif 'develop' not in sys.argv:\n try:\n subprocess.check_call([sys.executable, '-m', 'pip', 'install', '-r', 'requirements.txt'])\n except:\n print('fatal: failure: pip install -r requirements.txt ')\n sys.exit(1)\n\nscripts = [path\n for service in os.listdir(parent)\n if service.startswith('aws')\n for script in os.listdir(os.path.join(parent, service))\n for path in [os.path.abspath(os.path.join(parent, service, script))]\n if os.path.isfile(path)]\n\ndst_path = os.path.dirname(os.path.abspath(sys.executable))\nfor src in scripts:\n name = os.path.basename(src)\n dst = os.path.join(dst_path, name)\n try:\n os.remove(dst)\n except FileNotFoundError:\n pass\n if 'develop' in sys.argv:\n os.symlink(src, dst)\n os.chmod(dst, 0o775)\n print('link:', dst, '=>', src, file=sys.stderr)\n else:\n shutil.copyfile(src, dst)\n os.chmod(dst, 0o775)\n print('copy:', src, '=>', dst, file=sys.stderr)\n","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1627,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"133005373","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sun Jul 15 16:36:27 2018\n\n@author: Kazuki\n\"\"\"\n\nimport gc, os\nfrom tqdm import tqdm\nimport pandas as pd\nimport numpy as np\nimport sys\nsys.path.append(f'/home/{os.environ.get(\"USER\")}/PythonLibrary')\nimport lgbextension as ex\nimport lightgbm as lgb\nfrom multiprocessing import cpu_count, Pool\nfrom sklearn.model_selection import StratifiedKFold\n#from glob import glob\nimport count\nimport utils, utils_cat\n#utils.start(__file__)\n#==============================================================================\n\nPREF = 'f052_'\n\nos.system(f'rm ../feature/t*_{PREF}*')\n\nSEED = 71\nFOLD = 5\nlabel_name = 'f001_EXT_SOURCE_3'\n\nfeature1 = 'EXT_SOURCE_3_test-imputation'\nfeature2 = 'EXT_SOURCE_3_diff'\n\n\n\nparams = {\n 'objective': 'regression',\n 'metric': 'rmse',\n 'learning_rate': 0.01,\n 'max_depth': 6,\n 'num_leaves': 63,\n 'max_bin': 255,\n \n 'min_child_weight': 10,\n 'min_data_in_leaf': 150,\n 'reg_lambda': 0.5, # L2 regularization term on weights.\n 'reg_alpha': 0.5, # L1 regularization term on weights.\n \n 'colsample_bytree': 0.9,\n 'subsample': 0.9,\n 'nthread': 21,\n# 'nthread': cpu_count(),\n 'bagging_freq': 1,\n 'verbose':-1,\n 'seed': SEED\n }\n\n\nnp.random.seed(SEED)\n# =============================================================================\n# load\n# =============================================================================\n\nuse_files = [\n 'f001', \n 'f002', \n ]\n\nfiles = utils.get_use_files(use_files, True)\n\nX_train = pd.concat([\n pd.read_feather(f) for f in tqdm(files, mininterval=60)\n ], axis=1)\ny_train = X_train[label_name]\nX_train.drop(label_name, axis=1, inplace=True)\n\n\nCAT = list( set(X_train.columns) & set(utils_cat.ALL) )\n\nif X_train.columns.duplicated().sum()>0:\n raise Exception(f'duplicated!: { X_train.columns[X_train.columns.duplicated()] }')\nprint('no dup :) ')\nprint(f'X_train.shape {X_train.shape}')\n\ngc.collect()\n\n\n\nfiles = utils.get_use_files(use_files, False)\n\nX_test = pd.concat([\n pd.read_feather(f) for f in tqdm(files, mininterval=60)\n ], axis=1)\ny_test = X_test[label_name]\nX_test.drop(label_name, axis=1, inplace=True)\n\n\n\n#X_train_train, y_train_train = X_train[~y_train.isnull()], y_train[~y_train.isnull()]\n#X_train_test, y_train_test = X_train[y_train.isnull()], y_train[y_train.isnull()]\n\nX_test_train, y_test_train = X_test[~y_test.isnull()], y_test[~y_test.isnull()]\nX_test_test, y_test_test = X_test[y_test.isnull()], y_test[y_test.isnull()]\n\n# =============================================================================\n# \n# =============================================================================\n\n\ndtrain = lgb.Dataset(X_test_train, y_test_train, categorical_feature=CAT)\ngc.collect()\n\nret = lgb.cv(params, dtrain, 99999, nfold=FOLD, stratified=False,\n early_stopping_rounds=100, verbose_eval=50,\n seed=SEED)\n\n\n# =============================================================================\n# \n# =============================================================================\nNROUND = int(len(ret['rmse-mean'])*1.3) # 12234\nprint(f'NROUND: {NROUND}')\n\ndtrain = lgb.Dataset(X_test_train, y_test_train, categorical_feature=CAT)\n\nmodel = lgb.train(params, dtrain, NROUND)\n\ntrain = pd.DataFrame(model.predict(X_train), \n columns=[feature1])\n\ntrain[feature2] = y_train - train[feature1]\n\n\n# =============================================================================\n# otuput\n# =============================================================================\nutils.to_feature(train.add_prefix(PREF), '../feature/train')\n#utils.to_feature(sub_test.add_prefix(PREF), '../feature/test')\n\n#==============================================================================\nutils.end(__file__)\n\n\n\n","sub_path":"Kaggle/Playgroud/RiskPrediction/Home-Credit-Default-Risk-master/py/trash/052.py","file_name":"052.py","file_ext":"py","file_size_in_byte":3990,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"523581035","text":"import os\nimport argparse\nimport glob\n\n\ndef setup_args():\n parser = argparse.ArgumentParser(prog='post_stats.py',description=\"Jekyll Workflow Enhancer: Statistics on Total Posts\")\n\n parser.add_argument(\"--verbose\", \"-v\", default=False, action=\"store_true\")\n parser.add_argument(\"--incremential\", \"-i\", default=False, action=\"store_true\")\n\n return parser.parse_args()\n\ndef main(args=None):\n ## Storage Dict\n aprox = {\n 'char':0,\n 'words':0,\n 'sentances':0,\n 'paragraphs':0\n }\n\n ## Get Files In Target Directory\n for file_d in os.listdir(os.path.dirname(__file__)+\"\\\\..\\\\_posts\\\\\"):\n ## Fix Filename with local offset\n file_d = os.path.dirname(__file__)+\"\\\\..\\\\_posts\\\\\" + file_d\n\n ## Open Current File\n with open(file_d) as fin:\n ## Read Data\n cfile = fin.read()\n\n ## Store Information\n aprox[\"char\"] += len(cfile)\n aprox[\"words\"] += cfile.count(' ')+1\n aprox[\"sentances\"] += cfile.count('.')\n aprox[\"paragraphs\"] += cfile.count('\\n\\n')\n\n ## Print Incremential Information\n if args.verbose or args.incremential:\n print(aprox)\n\n ## Print Totals\n print(\"\\n\\n{separator}\\nTotal Approximate Amount of:\\nCharacters: {char}\\nWords: {words}\\nSentances: {sentances}\\nParagraphs: {paragraphs}\\n{separator}\".format(\n separator=\"=\"*80,\n **aprox\n )\n )\nif __name__ == '__main__':\n main(setup_args())\n","sub_path":"utils/post_stats.py","file_name":"post_stats.py","file_ext":"py","file_size_in_byte":1540,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"519066114","text":"# -*- coding: utf-8 -*-\n# @Time : 2018/10/23 20:04\n# @Author : Wang Xin\n# @Email : wangxin_buaa@163.com\n\n\nimport torch\nimport torch.nn as nn\n\n\nclass MaskedMSELoss(nn.Module):\n def __init__(self):\n super(MaskedMSELoss, self).__init__()\n\n def forward(self, pred, target):\n assert pred.dim() == target.dim(), \"inconsistent dimensions\"\n valid_mask = (target > 0).detach()\n diff = target - pred\n diff = diff[valid_mask]\n self.loss = (diff ** 2).mean()\n return self.loss\n\n\nclass MaskedL1Loss(nn.Module):\n def __init__(self):\n super(MaskedL1Loss, self).__init__()\n\n def forward(self, pred, target):\n assert pred.dim() == target.dim(), \"inconsistent dimensions\"\n valid_mask = (target > 0).detach()\n diff = target - pred\n diff = diff[valid_mask]\n self.loss = diff.abs().mean()\n return self.loss\n\n\nclass berHuLoss(nn.Module):\n def __init__(self):\n super(berHuLoss, self).__init__()\n\n def forward(self, pred, target):\n assert pred.dim() == target.dim(), \"inconsistent dimensions\"\n\n huber_c = torch.max(pred - target)\n huber_c = 0.2 * huber_c\n\n valid_mask = (target > 0).detach()\n diff = target - pred\n diff = diff[valid_mask]\n diff = diff.abs()\n\n huber_mask = (diff > huber_c).detach()\n\n diff2 = diff[huber_mask]\n diff2 = diff2 ** 2\n\n self.loss = torch.cat((diff, diff2)).mean()\n\n return self.loss\n\ndef compute_scale_and_shift(prediction, target, mask):\n # system matrix: A = [[a_00, a_01], [a_10, a_11]]\n a_00 = torch.sum(mask * prediction * prediction, (1, 2))\n a_01 = torch.sum(mask * prediction, (1, 2))\n a_11 = torch.sum(mask, (1, 2))\n\n # right hand side: b = [b_0, b_1]\n b_0 = torch.sum(mask * prediction * target, (1, 2))\n b_1 = torch.sum(mask * target, (1, 2))\n\n # solution: x = A^-1 . b = [[a_11, -a_01], [-a_10, a_00]] / (a_00 * a_11 - a_01 * a_10) . b\n x_0 = torch.zeros_like(b_0)\n x_1 = torch.zeros_like(b_1)\n\n det = a_00 * a_11 - a_01 * a_01\n valid = det.nonzero()\n\n x_0[valid] = (a_11[valid] * b_0[valid] - a_01[valid] * b_1[valid]) / det[valid]\n x_1[valid] = (-a_01[valid] * b_0[valid] + a_00[valid] * b_1[valid]) / det[valid]\n\n return x_0, x_1\n\n\ndef reduction_batch_based(image_loss, M):\n # average of all valid pixels of the batch\n\n # avoid division by 0 (if sum(M) = sum(sum(mask)) = 0: sum(image_loss) = 0)\n divisor = torch.sum(M)\n\n if divisor == 0:\n return 0\n else:\n return torch.sum(image_loss) / divisor\n\n\ndef reduction_image_based(image_loss, M):\n # mean of average of valid pixels of an image\n\n # avoid division by 0 (if M = sum(mask) = 0: image_loss = 0)\n valid = M.nonzero()\n\n image_loss[valid] = image_loss[valid] / M[valid]\n\n return torch.mean(image_loss)\n\n\ndef mse_loss(prediction, target, mask, reduction=reduction_batch_based):\n\n M = torch.sum(mask, (1, 2))\n res = prediction - target\n image_loss = torch.sum(mask * res * res, (1, 2))\n\n return reduction(image_loss, 2 * M)\n\n\ndef gradient_loss(prediction, target, mask, reduction=reduction_batch_based):\n\n M = torch.sum(mask, (1, 2))\n\n diff = prediction - target\n diff = torch.mul(mask, diff)\n\n grad_x = torch.abs(diff[:, :, 1:] - diff[:, :, :-1])\n mask_x = torch.mul(mask[:, :, 1:], mask[:, :, :-1])\n grad_x = torch.mul(mask_x, grad_x)\n\n grad_y = torch.abs(diff[:, 1:, :] - diff[:, :-1, :])\n mask_y = torch.mul(mask[:, 1:, :], mask[:, :-1, :])\n grad_y = torch.mul(mask_y, grad_y)\n\n image_loss = torch.sum(grad_x, (1, 2)) + torch.sum(grad_y, (1, 2))\n\n return reduction(image_loss, M)\n\n\nclass MSELoss(nn.Module):\n def __init__(self, reduction='batch-based'):\n super().__init__()\n\n if reduction == 'batch-based':\n self.__reduction = reduction_batch_based\n else:\n self.__reduction = reduction_image_based\n\n def forward(self, prediction, target, mask):\n return mse_loss(prediction, target, mask, reduction=self.__reduction)\n\n\nclass GradientLoss(nn.Module):\n def __init__(self, scales=4, reduction='batch-based'):\n super().__init__()\n\n if reduction == 'batch-based':\n self.__reduction = reduction_batch_based\n else:\n self.__reduction = reduction_image_based\n\n self.__scales = scales\n\n def forward(self, prediction, target, mask):\n total = 0\n\n for scale in range(self.__scales):\n step = pow(2, scale)\n\n total += gradient_loss(prediction[:, ::step, ::step], target[:, ::step, ::step],\n mask[:, ::step, ::step], reduction=self.__reduction)\n\n return total\n\n\nclass ScaleAndShiftInvariantLoss(nn.Module):\n def __init__(self, alpha=0.5, scales=4, reduction='batch-based'):\n super().__init__()\n\n self.__data_loss = MSELoss(reduction=reduction)\n self.__regularization_loss = GradientLoss(scales=scales, reduction=reduction)\n self.__alpha = alpha\n\n self.__prediction_ssi = None\n\n def forward(self, prediction, target):\n\n mask = torch.ones(target.size()).bool().cuda() # no need for mask\n\n scale, shift = compute_scale_and_shift(prediction, target, mask)\n self.__prediction_ssi = scale.view(-1, 1, 1) * prediction + shift.view(-1, 1, 1)\n\n total = self.__data_loss(self.__prediction_ssi, target, mask)\n if self.__alpha > 0:\n total += self.__alpha * self.__regularization_loss(self.__prediction_ssi, target, mask)\n\n return total\n\n def __get_prediction_ssi(self):\n return self.__prediction_ssi\n\n prediction_ssi = property(__get_prediction_ssi)","sub_path":"criteria.py","file_name":"criteria.py","file_ext":"py","file_size_in_byte":5736,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"184761254","text":"\"\"\" Below, see a sample of /var/log/messages.\n\nJan 20 03:25:08 fakehost logrotate: ALERT exited abnormally with [1]\nJan 20 03:25:08 fakehost logrotate: ALERT exited abnormally with [1]\nJan 20 03:25:09 fakehost run-parts(/etc/cron.daily)[20447]: finished logrotate\n\nWrite a script which parses /var/log/messages and generates a\nCSV with two columns: minute, number_of_messages in sorted time order.\n---------- begin sample output ----------\nminute, number_of_messages\nJan 20 03:25,2\nJan 20 03:26,2\nJan 20 03:30,2\nJan 20 05:03,1\nJan 20 05:20,1\nJan 20 05:22,6\n---------- end sample output ------------\n\"\"\"\n\nimport csv\n\ndef Parsing():\n logs = open(\"fakelog1.txt\", \"r\")\n \n output = {}\n\n minute = len(\"Jan 20 03:25\")\n\n for log in logs:\n k = log[:minute]\n v = output.get(k, 0) + 1\n output[k] = v\n\n print(output)\n\n with open(\"output.csv\", \"w\") as f:\n for key in output.keys():\n f.write(\"%s, %s\\n\"%(key, output[key]))\n\nParsing()","sub_path":"Programming/Python/log_parsing.py","file_name":"log_parsing.py","file_ext":"py","file_size_in_byte":979,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"2927261","text":"import os\nimport sys\nimport subprocess\nimport stat\nimport platform\nimport time\nimport distro\n\n\nclass Utility:\n \"\"\"\n Utility class holds all the working functions!\n \"\"\"\n\n @staticmethod\n def is_root():\n \"\"\"\n if the current user has root privileges or not!\n :return: boolean\n \"\"\"\n is_root = False\n try:\n if not os.path.exists('/etc/foo'):\n os.makedirs('/etc/foo')\n else:\n os.rename('/etc/foo', '/etc/bar')\n is_root = True\n except IOError as e:\n if 'Permission denied' in str(e):\n print(\"You need root permissions to do this (O_o)\")\n Utility().terminate(\"Permission Denied!\")\n finally:\n if os.path.exists('/etc/foo'):\n os.rmdir('/etc/foo')\n if os.path.exists('/etc/bar'):\n os.rmdir('/etc/bar')\n return is_root\n\n @staticmethod\n def system_upgrade(is_test=False):\n \"\"\"\n :param is_test -> if true, upgrade command will run in force yes mode.\n run system update upgrade before doing anything.\n :return:\n \"\"\"\n return_code = 200\n error_message = \"\"\n try:\n file_status = os.stat('bash_scripts/updater.sh')\n os.chmod('bash_scripts/updater.sh', file_status.st_mode | stat.S_IEXEC)\n result = subprocess.Popen(['./bash_scripts/updater.sh', str(is_test)])\n result.communicate()\n return_code = result.returncode\n del file_status\n del result\n except IOError as error:\n return_code = 255\n error_message = str(error)\n except subprocess.CalledProcessError as error:\n return_code = 255\n error_message = str(error)\n except OSError as error:\n return_code = 255\n error_message = str(error)\n except KeyboardInterrupt as error:\n return_code = 255\n error_message = str(error)\n finally:\n if return_code != 0:\n # error occurred in the process\n Utility().terminate(\"Error Code --> \" + str(return_code) + \" \" + error_message)\n return return_code\n\n @staticmethod\n def install_psql(is_test=False):\n \"\"\"\n Install Postgresql and Metasploit Framework related stuff\n :return: 0 on success\n \"\"\"\n return_code = 200\n error_message = \"\"\n try:\n file_status = os.stat('bash_scripts/psql.sh')\n os.chmod('bash_scripts/psql.sh', file_status.st_mode | stat.S_IEXEC)\n result = subprocess.Popen(['./bash_scripts/psql.sh', str(is_test)])\n result.communicate()\n return_code = result.returncode\n del result\n del file_status\n except IOError as error:\n return_code = 255\n error_message = str(error)\n except subprocess.CalledProcessError as error:\n return_code = 255\n error_message = str(error)\n except OSError as error:\n return_code = 255\n error_message = str(error)\n except KeyboardInterrupt as error:\n return_code = 255\n error_message = str(error)\n finally:\n if return_code != 0:\n Utility().terminate(\"Error Code --> \" + str(return_code) + \" \" + error_message)\n return return_code\n\n @staticmethod\n def install_metasploit(is_test=False):\n \"\"\"\n installs metasploit framework in the system.\n :param is_test: :bool\n :return: :Integer | exit value\n \"\"\"\n return_code = 200\n error_message = \"\"\n try:\n metasploit = os.stat('bash_scripts/metasploit.sh')\n os.chmod('bash_scripts/metasploit.sh', metasploit.st_mode | stat.S_IEXEC)\n\n if distro.linux_distribution(False)[0] == 'kali':\n result = subprocess.Popen(['./bash_scripts/metasploit.sh', 'Kali', str(is_test)])\n else:\n result = subprocess.Popen(['./bash_scripts/metasploit.sh', 'Linux', str(is_test)])\n result.communicate()\n return_code = result.returncode\n del result\n del metasploit\n except IOError as error:\n return_code = 255\n error_message = str(error)\n except subprocess.CalledProcessError as error:\n return_code = 255\n error_message = str(error)\n except OSError as error:\n return_code = 255\n error_message = str(error)\n except KeyboardInterrupt as error:\n return_code = 255\n error_message = str(error)\n finally:\n if return_code != 0:\n Utility().terminate(\"Error Code --> \" + str(return_code) + \" \" + error_message)\n return return_code\n\n @staticmethod\n def chmod_scripts():\n \"\"\"\n Installing scripts and apps inside /opt/\n :return:\n \"\"\"\n return_code = 0\n error_message = \"\"\n try:\n file_status = os.stat('bash_scripts/install_scripts.sh')\n os.chmod('bash_scripts/install_scripts.sh', file_status.st_mode | stat.S_IEXEC)\n del file_status\n except IOError as error:\n return_code = 255\n error_message = str(error)\n except subprocess.CalledProcessError as error:\n return_code = 255\n error_message = str(error)\n except OSError as error:\n return_code = 255\n error_message = str(error)\n except KeyboardInterrupt as error:\n return_code = 255\n error_message = str(error)\n finally:\n if return_code != 0:\n Utility().terminate(\"Error Code --> \" + str(return_code) + \" \" + error_message)\n return return_code\n\n @classmethod\n def terminate(cls, message=None):\n \"\"\"\n force the python script before exiting\n :param message: exit message\n :return: none\n \"\"\"\n sys.exit(message)\n\n @classmethod\n def baby_step(cls, is_test=False):\n \"\"\"\n baby step takes care of the installing scripts\n :param is_test: boolean\n :return:\n \"\"\"\n return_code = 0\n error_message = \"\"\n try:\n while True:\n print(\"Choose your options:\"\n \"\\n** Press [ 1 ] to install Discover Script (Former Backtrack Script)\"\n \"\\n** Press [ 2 ] to install SMBExec (grab hashes out of Domain Controller and reverse shells)\"\n \"\\n** Press [ 3 ] to install Veil 3.0 (to create Python based Metepreter executable)\"\n \"\\n** Press [ 4 ] to install PeepingTom (to take snapshots of web pages) (NOT AVAILABLE NOW***)\"\n \"\\n** Press [ 5 ] to install Eye Witness (to take snapshots of web pages\"\n \"\\n** Press [ 6 ] to install Powersploit (to create Powershell script)\"\n \"\\n** Press [ 7 ] to install Responder (to gain NTLM challenge/hashes)\"\n \"\\n** Press [ 8 ] to install Social Engineering Toolkit\"\n \"\\n** Press [ 9 ] to install bypassUAC (NOT AVAILABLE NOW***)\"\n \"\\n** Press [ 10 ] to install beEF for cross site scripting\"\n \"\\n** Press [ 11 ] to install Fuzzing Lists (for Social Engineering Campaign)\"\n \"\\n** Press [ 12 ] to download & install other necessary scripts like\\n - WCE (Windows \"\n \"Credential \"\n \"Editor), \"\n \"\\n - Mimikatz (to recover password from memory),\\n - Custom password list from Skull \"\n \"Security and \"\n \"Crackstation,\\n - & NMap scripts (for quicker scanning and smarter identification\"\n \"\\n** Press [ 13 ] to download ** all scripts **\"\n \"\\n\\n** Press [ 14 ] to terminate the script\")\n\n user_input = input(\"Your option:: \")\n if user_input == \"14\":\n # terminate the loop\n time.sleep(3)\n print(\"~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\")\n print(\"~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\")\n print(\"~~~~~~~~~~~~~~That's a wrap baby!~~~~~~~~~~~~~~\")\n print(\"~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\")\n print(\"~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\")\n sys.exit(0) # The END\n elif user_input == \"13\":\n # install all\n return_code = Utility.__do_it_all(is_test)\n if return_code != 0:\n Utility().terminate(\"System terminated!\")\n else:\n time.sleep(3)\n print(\"~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\")\n print(\"~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\")\n print(\"~~~~~~~~~~~~~~That's a wrap baby!~~~~~~~~~~~~~~\")\n print(\"~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\")\n print(\"~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\")\n else:\n time.sleep(5)\n result = subprocess.Popen(['./bash_scripts/install_scripts.sh', str(is_test), user_input])\n result.communicate()\n return_code = result.returncode\n del result\n if return_code != 0:\n Utility().terminate(\"System terminated!\")\n except subprocess.CalledProcessError as error:\n return_code = 255\n error_message = str(error)\n except OSError as error:\n return_code = 255\n error_message = str(error)\n except KeyboardInterrupt as error:\n return_code = 255\n error_message = str(error)\n finally:\n if return_code != 0:\n Utility().terminate(\"System terminated! \" + str(error_message))\n\n @classmethod\n def __do_it_all(cls, is_test=False):\n \"\"\"\n do it install all the scripts in a linear fashion.\n :param is_test: bool\n :return: integer (success = 0, failure = 255)\n \"\"\"\n return_code = 0\n for user_input in range(1, 14):\n try:\n time.sleep(2)\n result = subprocess.Popen(['./bash_scripts/install_scripts.sh', str(is_test), str(user_input)])\n result.communicate()\n return_code = result.returncode\n del result\n if return_code != 0:\n Utility().terminate(\"System terminated!\")\n except subprocess.CalledProcessError as error:\n return_code = 255\n error_message = str(error)\n except OSError as error:\n return_code = 255\n error_message = str(error)\n except KeyboardInterrupt as error:\n return_code = 255\n error_message = str(error)\n finally:\n if return_code != 0:\n Utility().terminate(\"System terminated! \" + str(error_message))\n return return_code\n\n\ndef main():\n util = Utility()\n if util.is_root():\n if platform.system() != 'Linux':\n print(\"###############################################\")\n print(\"###############################################\")\n sys.stderr.write(\"This script must run in a Kali or any Linux distro.\\nGood bye! :)\\n\")\n print(\"###############################################\")\n print(\"###############################################\")\n sys.exit(255)\n\n if sys.version_info < (3, 0, 0):\n print(\"###############################################\")\n print(\"###############################################\")\n sys.stderr.write(\"Need python3 to run this script\\nGood bye! :)\\n\")\n print(\"###############################################\")\n print(\"###############################################\")\n sys.exit(255)\n\n if distro.linux_distribution(False)[0] != 'kali':\n print(\"~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\")\n print(\"You are not using Kali OS! Please at least use these in a VirtualBOX so that you can roll back more \"\n \"easily!\")\n print(\"~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\")\n\n time.sleep(5)\n # call the utility functions\n util.system_upgrade()\n util.install_psql()\n util.install_metasploit()\n util.chmod_scripts()\n util.baby_step()\n else:\n print(\"###############################################\")\n print(\"###############################################\")\n print(\"You need to be a superuser or run this script in super user mode.\\nGood bye. :)\")\n sys.exit(\"Permission Denied!\\n###############################################\\n\"\n \"###############################################\")\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"HP_Utility.py","file_name":"HP_Utility.py","file_ext":"py","file_size_in_byte":13372,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"649561390","text":"import io\nimport re\nimport csv\nimport magic\nimport shutil\nimport hashlib\nimport pathlib\nimport mimetypes\n\nfrom airtable import Airtable\n\n\nclass Base:\n \"\"\"\n Represents an Airtable Base and all its Tables.\n \"\"\"\n\n def __init__(self, base_id, api_key, schema):\n self.id = base_id\n self.tables = {}\n self.api_key = api_key\n self.schema = schema\n self.load()\n\n def load(self):\n for table_name, relations in self.schema.items():\n self.tables[table_name] = Table(self, table_name, relations)\n for table in self.tables.values():\n table.link()\n \n def wipe(self):\n \"\"\"\n Empty all the tables but leave the schema intact.\n \"\"\"\n # safety to only ever wipe this airtable base\n assert self.id == 'appqn0kIOXRo00kdN'\n\n # first get the latest data\n self.load()\n\n for table in self.tables.values():\n table.wipe()\n\n self.load()\n\n\nclass Table:\n \"\"\"\n Represents an Airbase table.\n \"\"\"\n\n def __init__(self, base, table_name, relations):\n self.base = base\n self.table_name = table_name\n self.relations = relations\n self.airtable = Airtable(base.id, table_name, base.api_key)\n self.load()\n\n def load(self):\n self.data = self.airtable.get_all()\n\n def insert(self, row):\n result = self.airtable.insert(row)\n self.data.append(result)\n self.link()\n return result\n\n def update(self, id, row):\n return self.airtable.update(id, row)\n\n def get(self, id):\n for row in self.data:\n if row['id'] == id:\n return row\n return None\n\n def find(self, fields, first=False):\n results = []\n for row in self.data:\n match = True\n for k, v in fields.items():\n if k not in row['fields']:\n if v is not None:\n match = False\n elif row['fields'][k] != v:\n match = False\n if match:\n results.append(row)\n if first:\n if len(results) == 0:\n return None\n else:\n return results[0]\n return results\n\n def get_or_insert(self, fields, extra=None):\n \"\"\"\n Get or insert and get the first record that matches the fields.\n When inserting you can add additional things using the extras value\n which should be a dictionary of names and values to set in addition\n to the supplied fields.\n \"\"\"\n r = self.find(fields, first=True)\n if not r:\n f = fields\n if extra:\n f.update(extra)\n r = self.insert(f)\n return r\n\n def link(self):\n \"\"\"\n Use the table's schema relations to turn IDs into objects.\n \"\"\"\n for row in self.data:\n for prop, other_table_name in self.relations.items():\n other_table = self.base.tables[other_table_name]\n if prop in row['fields']:\n value = row['fields'][prop]\n if type(value) == list:\n new_value = []\n for v in value:\n new_value.append(other_table.get(v))\n row['fields'][prop] = new_value\n else:\n row['fields'][prop] = other_table.get(value)\n\n def wipe(self):\n \"\"\"\n Remove all rows from the table.\n \"\"\"\n self.load()\n ids = [row['id'] for row in self.data]\n self.airtable.batch_delete(ids)\n\n\ndef parse_name(s):\n \"\"\"\n Parse a name string into its parts.\n \"\"\"\n parts = s.split(' ')\n f = parts.pop(0)\n l = parts.pop() if len(parts) > 0 else None\n s = None\n if l and re.match(r'^(sr)|jr|[iv]+$', l, re.IGNORECASE):\n s = l\n l = parts.pop()\n m = ' '.join(parts) if len(parts) > 0 else None\n return f, m, l, s\n\ndef get_sha256(f):\n d = hashlib.sha256()\n fh = open(f, 'rb')\n while True:\n chunk = fh.read(512 * 1024)\n if not chunk:\n break\n d.update(chunk)\n return d.hexdigest()\n\ndef csv_list(s):\n \"Parse a CSV row into a list\"\n if not s or s == '':\n return []\n elif \",\" in s and '\"' not in s:\n return [s]\n else:\n return next(csv.reader(io.StringIO(s)))\n\ndef csv_str(l):\n \"Turn a list into a CSV row\"\n out = io.StringIO()\n csv.writer(out).writerow(l)\n return out.getvalue().strip()\n\ndef save_file(src, accession_dir, sha256, ext):\n ext = ext.lstrip('.')\n filename = \"{}.{}\".format(sha256, ext)\n rel_path = pathlib.Path(str(accession_dir)) / filename\n abs_path = pathlib.Path(\"/mnt/data\") / rel_path\n\n # make the directory if needed\n if not abs_path.parent.is_dir():\n abs_path.parent.mkdir(parents=True)\n\n # copy the file\n shutil.copyfile(src, abs_path)\n\n return str(rel_path)\n\ndef get_ext(mimetype):\n result = mimetypes.guess_extension(mimetype)\n if not result and mimetype == 'image/vnd.adobe.photoshop':\n result = '.psd'\n elif result is None:\n result = ''\n return result\n","sub_path":"schema.py","file_name":"schema.py","file_ext":"py","file_size_in_byte":5244,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"632763551","text":"from sqlalchemy import create_engine\nimport pymysql\nimport pandas as pd\nimport db_config\nimport db_utils\n\ndef get_latest_update_covid(date: str) -> pd.DataFrame:\n dbConnection = db_utils.db_connect()\n\n # Gets the last valid date's data if exists and no data exists on the actual date\n sql = '''SELECT t.index, t.date, t.county, t.state, t.fips, t.cases, t.deaths \n FROM (\n SELECT county, MAX(date) as MaxDate\n FROM (\n select date, county, state,cases, deaths from main_covid_data where DATE(date)<= %(var)s) as A\n GROUP BY county\n ) r \n INNER JOIN main_covid_data t \n ON t.county = r.county AND t.date = r.MaxDate'''\n\n return pd.read_sql(sql=sql, con=dbConnection, params={\"var\": date})\n\ndef get_latest_update_prison(date: str) -> pd.DataFrame:\n dbConnection = db_utils.db_connect()\n sql = '''SELECT t.index, t.name, t.date, t.address, t.county, t.residents_confirmed, t.staff_confirmed, t.residents_active, t.staff_active, t.residents_deaths, t.staff_deaths\n FROM (\n SELECT name, MAX(date) as MaxDate\n FROM (\n select * from main_prison_data where DATE(date)<= %(var)s) as A \n GROUP BY name \n ) r \n INNER JOIN main_prison_data t\n ON t.name = r.name AND t.date = r.MaxDate'''\n return pd.read_sql(sql=sql, con=dbConnection, params={\"var\": date})\n\ndef prepare_one(return_type: str, prepname: str, tbl_name: str, where_clause: str, var_a: str):\n dbConnection = db_utils.db_connect()\n\n # Set variable :: Ex: varA = \"'2020-04-07'\"\n setup = \"SET @a = \" + str(var_a) + \";\"\n result = dbConnection.execute(setup)\n\n # Do query\n result = dbConnection.execute(\n \"PREPARE \" + str(prepname) + \" from 'SELECT * from \" + str(tbl_name) + \" \" + str(where_clause) + \";';\"\n )\n covid_data_df = pd.read_sql(\"EXECUTE \" + str(prepname) + \" using @a;\", dbConnection)\n\n # Display\n pd.set_option('display.expand_frame_repr', False)\n if return_type == \"csv\":\n return_this = covid_data_df.to_csv()\n elif return_type == \"print\":\n print(covid_data_df.to_csv())\n print(covid_data_df)\n return 0\n else:\n return_this = covid_data_df\n dbConnection.close()\n return return_this\n\n\ndef prepare_two(return_type: str, prepname: str, tbl_name: str, where_clause: str, var_a: str, var_b):\n dbConnection = db_utils.db_connect()\n\n # Set variable :: Ex: varA = \"'2020-04-07'\"\n setup = \"SET @a = \" + str(var_a) + \";\"\n print(setup)\n result = dbConnection.execute(setup)\n\n # Set variable :: Ex: var_b =\n setup = \"SET @b = \" + str(var_b) + \";\"\n print(setup)\n result = dbConnection.execute(setup)\n\n # Do query\n result = dbConnection.execute(\n \"PREPARE \" + str(prepname) + \" from 'SELECT * from \" + str(tbl_name) + \" \" + str(where_clause) + \";';\"\n )\n covid_data_df = pd.read_sql(\"EXECUTE \" + str(prepname) + \" using @a, @b;\", dbConnection)\n\n # Display\n pd.set_option('display.expand_frame_repr', False)\n if return_type == \"csv\":\n return_this = covid_data_df.to_csv()\n elif return_type == \"print\":\n print(covid_data_df.to_csv())\n print(covid_data_df)\n return 0\n else:\n return_this = covid_data_df\n dbConnection.close()\n return return_this\n\n\n# Examples below\nif __name__ == \"__main__\":\n # Return type can return a csv (csv) or print (print) or a dataframe (anything else)\n # prepname needs to be unique for the query -- TODO: actually prepare all queries and delete the prep creation lines\n # tbl_name is the name of the table in the database\n # where_clause is remaining sql to be executed -- variables will be assigned as ?s\n # var_a is the first variable (?) to be replaced\n # var_b is the second variable (?) to be replaced\n prepare_one(return_type=\"print\",\n prepname=\"sdtest\",\n tbl_name=\"main_covid_data\",\n where_clause=\"where county=? limit 10\",\n var_a='\"Alameda\"')\n prepare_two(return_type=\"print\",\n prepname=\"sdtest2\",\n tbl_name=\"main_covid_data\",\n where_clause=\"where county=? and date=? limit 10\",\n var_a='\"Alameda\"',\n var_b=\"'2020-12-15'\")\n # prepare_two(return_type=\"print\",\n # prepname=\"prison_data\",\n # tbl_name=\"main_vaccine_by_cty\",\n # where_clause=\"where county=? and administered_date=? limit 10\",\n # var_a='\"Alameda\"',\n # var_b=\"'2020-12-15'\") # Note, dates must be like \"'2020-04-07'\"\n\n\n","sub_path":"project/database_queries_covid.py","file_name":"database_queries_covid.py","file_ext":"py","file_size_in_byte":4547,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"255950880","text":"#!/usr/bin/python\n\nimport math\n\n# # should return 0 since we don't have enough butter!\n# recipe_batches(\n# { 'milk': 100, 'butter': 50, 'flour': 5 },\n# { 'milk': 138, 'butter': 48, 'flour': 51 }\n# )\n\n\ndef recipe_batches(recipe, ingredients):\n batches = None\n for ingredient in recipe:\n if ingredient in ingredients:\n batch = ingredients[ingredient] // recipe[ingredient]\n if batches == None:\n batches = batch\n elif batch < batches:\n batches = batch\n else:\n batches = 0\n return batches\n\n\nif __name__ == '__main__':\n # Change the entries of these dictionaries to test\n # your implementation with different inputs\n recipe = {'milk': 100, 'butter': 50, 'cheese': 10}\n ingredients = {'milk': 198, 'butter': 52, 'cheese': 10}\n print(\"{batches} batches can be made from the available ingredients: {ingredients}.\".format(\n batches=recipe_batches(recipe, ingredients), ingredients=ingredients))\n","sub_path":"recipe_batches/recipe_batches.py","file_name":"recipe_batches.py","file_ext":"py","file_size_in_byte":1009,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"1539916","text":"import os\nfrom collections import defaultdict\n\n\nclass dynamicdict(defaultdict):\n def __missing__(self, key):\n if self.default_factory:\n self.__setitem__(key, self.default_factory(key))\n return self[key]\n else:\n return super(dynamicdict, self).__missing__(key)\n\n\nLOG_LEVEL = os.environ.get('LOG_LEVEL', 'INFO')\n\nTESTING = False\nDEBUG = True\n\nPORT = 1218\n\n# Clickhouse Options\nCLICKHOUSE_SERVER = os.environ.get('CLICKHOUSE_SERVER', 'localhost:9000')\nCLICKHOUSE_CLUSTER = None\nCLICKHOUSE_TABLE = 'dev'\nCLICKHOUSE_MAX_POOL_SIZE = 25\n\n# Dogstatsd Options\nDOGSTATSD_HOST = 'localhost'\nDOGSTATSD_PORT = 8125\n\n# Redis Options\nUSE_REDIS_CLUSTER = False\nREDIS_HOST = os.environ.get('REDIS_HOST', 'localhost')\nREDIS_PORT = 6379\nREDIS_DB = 1\n\n# Query Recording Options\nRECORD_QUERIES = False\nQUERIES_TOPIC = 'snuba-queries'\n\n# Runtime Config Options\nCONFIG_MEMOIZE_TIMEOUT = 10\n\n# Sentry Options\nSENTRY_DSN = None\n\n# Snuba Options\nTIME_GROUPS = dynamicdict(\n lambda sec: 'toDateTime(intDiv(toUInt32(timestamp), {0}) * {0})'.format(sec),\n {\n 3600: 'toStartOfHour(timestamp)',\n 60: 'toStartOfMinute(timestamp)',\n 86400: 'toDate(timestamp)',\n }\n)\n\nTIME_GROUP_COLUMN = 'time'\n\n# Processor/Writer Options\nDEFAULT_BROKERS = ['localhost:9093']\nDEFAULT_MAX_BATCH_SIZE = 50000\nDEFAULT_MAX_BATCH_TIME_MS = 2 * 1000\nDEFAULT_QUEUED_MAX_MESSAGE_KBYTES = 50000\nDEFAULT_QUEUED_MIN_MESSAGES = 20000\nDISCARD_OLD_EVENTS = True\nKAFKA_TOPICS = {\n 'raw-events': {\n 'topic': 'events',\n 'replication_factor': 1,\n 'num_partitions': 1,\n },\n 'replacements': {\n 'topic': 'event-replacements',\n 'replication_factor': 1,\n 'num_partitions': 1,\n },\n 'commit-log': {\n 'topic': 'snuba-commit-log',\n 'replication_factor': 1,\n 'num_partitions': 1,\n },\n}\n\n# project_id and timestamp are included for queries, event_id is included for ReplacingMergeTree\nDEFAULT_SAMPLE_EXPR = 'cityHash64(toString(event_id))'\nDEFAULT_ORDER_BY = '(project_id, toStartOfDay(timestamp), %s)' % DEFAULT_SAMPLE_EXPR\nDEFAULT_PARTITION_BY = '(toMonday(timestamp), if(equals(retention_days, 30), 30, 90))'\nDEFAULT_VERSION_COLUMN = 'deleted'\nDEFAULT_SHARDING_KEY = 'cityHash64(toString(event_id))'\nDEFAULT_LOCAL_TABLE = 'sentry_local'\nDEFAULT_DIST_TABLE = 'sentry_dist'\nDEFAULT_RETENTION_DAYS = 90\n\nRETENTION_OVERRIDES = {}\n\n# the list of keys that will upgrade from a WHERE condition to a PREWHERE\nPREWHERE_KEYS = ['project_id']\nMAX_PREWHERE_CONDITIONS = 1\n\nSTATS_IN_RESPONSE = False\n\nPAYLOAD_DATETIME_FORMAT = \"%Y-%m-%dT%H:%M:%S.%fZ\"\n\nREPLACER_MAX_BLOCK_SIZE = 512\nREPLACER_MAX_MEMORY_USAGE = 10 * (1024**3) # 10GB\n# TLL of Redis key that denotes whether a project had replacements\n# run recently. Useful for decidig whether or not to add FINAL clause\n# to queries.\nREPLACER_KEY_TTL = 12 * 60 * 60\nREPLACER_MAX_GROUP_IDS_TO_EXCLUDE = 256\n\nTURBO_SAMPLE_RATE = 0.1\n","sub_path":"snuba/settings_base.py","file_name":"settings_base.py","file_ext":"py","file_size_in_byte":2953,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"5830641","text":"import os\r\n\r\ndir_name = [\"C:\\Shared_Folder\\Builds\\Banker12\"]\r\nfor i in dir_name:\r\n a = os.listdir(i)\r\n for item in a:\r\n if item.endswith(\".txt\"):\r\n os.remove(os.path.join(i,item))\r\n print(\"Removed - \" + os.path.join(i,item))\r\n if item.endswith(\"Validation\"):\r\n x = os.listdir(i + '\\Validation')\r\n for val_item in x:\r\n if val_item.endswith(\".txt\") or val_item.endswith(\".log\"):\r\n os.remove(os.path.join(i + '\\Validation', val_item))\r\n print (\"Removed - \" + os.path.join(i + '\\Validation', val_item))","sub_path":"PycharmProjects/PythonRulz/Folder_main/PersonalProjects/Work_related/clean_projects.py","file_name":"clean_projects.py","file_ext":"py","file_size_in_byte":615,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"154432755","text":"import pymel.core as pm\nimport maya.mel as mel\nimport datetime\nimport os\n\nimport petfactory.util.verify as pet_verify\n\ndef get_current_camera():\n \n try:\n camera_unicode = pm.modelPanel(pm.getPanel(wf=True), q=True, cam=True)\n return pet_verify.to_pynode(camera_unicode)\n\n except RuntimeError as e:\n print('Could not get the camera', e)\n return None\n \n\ndef get_time():\n \n return pm.currentTime(query=True)\n\n \ndef do_playblast(current_camera, file_name, start_time, end_time, dir_path, width, height):\n \n if not pet_verify.verify_pynode(current_camera, pm.nodetypes.Camera):\n pm.warning('Not a valid camera!')\n return None\n \n \n # look through the camera\n mel.eval('lookThroughModelPanel {0} modelPanel4'.format(current_camera)) \n \n # do the playblast \n pm.playblast(startTime=start_time, endTime=end_time, format=\"avfoundation\", viewer=False, compression=\"H.264\", percent=100, widthHeight=(width,height), showOrnaments=True, filename='{0}/{1}'.format(dir_path, file_name), offScreen=True)\n \n print('Playblast created in directory:\\n{0}'.format(dir_path))\n\n\ndef create_playblast_directory(root_path):\n \n time_string = datetime.datetime.now().strftime(\"%Y-%m-%d %H.%M.%S\")\n dir_name = 'playblasts {0}'.format(time_string)\n\n dir_path = os.path.join(root_path, dir_name)\n\n if not os.path.exists(dir_path):\n os.makedirs(dir_path)\n return dir_path\n\n else:\n print('Exists!')\n return None\n\n\n'''\ncam_1 = pm.PyNode('camera1')\ncam_2 = pm.PyNode('camera2')\n\ndir = create_playblast_directory()\n\nif dir is not None:\n do_playblast(cam_1, start_time=40, end_time=80, dir_path=dir, width=960, height=540)\n'''\n\n#pm.playblast(startTime=1, endTime=40, format=\"avfoundation\", viewer=False, compression=\"H.264\", widthHeight=(960,540), showOrnaments=True, filename='/Users/johan/Desktop/playblasts 2015-04-06 23.01.59/{0}'.format(cam_1), offScreen=True)\n","sub_path":"petfactory/animation/playblast/playblast_creator.py","file_name":"playblast_creator.py","file_ext":"py","file_size_in_byte":1989,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"206113194","text":"import os\nimport datetime\nimport persistence.save as persave\nimport persistence.load as perload\nimport persistence.print as perprint\nimport persistence.roundclass as perround\nimport persistence.database as database_module\n\n\nos.system(\"Clear\")\n\npeople = []\nhot_drinks = []\nsoft_drinks = []\nalcoholic_drinks = []\nhot_drinks_pref = {}\nsoft_drinks_pref = {}\nalcoholic_drinks_pref = {}\nnew_round = {}\ncurrentDT = datetime.datetime.now()\ntime = currentDT.strftime(\"%H:%M\")\n\n# inital_options -> display_inital_options\n# refactored display_inital_options\n# perddb to database_module\n\n# Asks for user input which equals result, returns a string\ndef Inital_options_displayer():\n header(\"Welcome to the BrIW app!\")\n result = input(f\"\"\"Please select which option you would like to use:\n [1] Search People List\n [2] Search Drinks List\n [3] Add Person\n [4] Add Drink\n [5] Delete Person\n [6] Delete Drinks\n [7] Choose Preferences\n [8] Print Functions\"\n [9] Take Round\n [10] Exit App \n Enter Here: \"\"\")\n initial_function(result)# evokes the if function (menu) within the \"Initial_Function\"\n\n\ndef initial_function(number): # Defines what outcome is associated with each input from initial options\n if number == \"1\": # Search person\n s1_search_people()\n elif number == \"2\": # Search drinks\n s2_search_drinks()\n elif number == \"3\": # Create Person\n s3_create_person()\n elif number == \"4\": # Create Drink\n s4_add_drinks()\n elif number == \"5\": # Delete Person\n s5_delete_person()\n elif number == \"6\": # Delete Drinks\n s6_delete_drinks_type()\n elif number == \"7\": # Choose Drinks Prefs\n s7_drinks_pref_menu()\n elif number == \"8\": # Print stored info\n s8_print_functions()\n elif number == \"9\": # Round Builder\n s9_round_type()\n elif number == \"10\": # Exit App\n os.system(\"Clear\")\n print(\"Catch you later\")\n SystemExit()\n\n else:\n print(\"This is an invalid input, what would you like to do?\")\n Inital_options_displayer()\n\n\ndef sn_nav_options(): # Once operation is complete what will the user want to do next? (if function)\n print_line()\n numbertwo = input(\"\"\"Please select your next option:\\n[1] Return to main menu\\n[2] Exit the app\\nEnter Here: \"\"\")\n if numbertwo == \"1\":\n Inital_options_displayer()\n elif numbertwo == \"2\":\n os.system(\"Clear\")\n print(\"Catch you later\")\n SystemExit()\n\n\ndef s1_search_people():\n people = database_module.load_tables(\"people\")\n header(\" Search People\")\n search_entry = str(input(f\"Enter first name, surname or age (Hit ENTER to quit) \\nEnter here: \").capitalize())\n try:\n for person in people:\n if search_entry == \"\":\n sn_nav_options()\n elif search_entry in (person.first_name, person.surname, person.age):\n print(f\"{person.first_name} {person.surname}, {person.age}\")\n else:\n continue\n except:\n print(\"Sorry\")\n not_found = input(f\"\\nWould you like to add someone new? Y/N (Hit ENTER to quit) \\nEnter here: \")\n if not_found == \"Y\":\n s3_create_person()\n else:\n sn_nav_options()\n sn_nav_options()\n\n\ndef s2_search_drinks():\n header(\" Search Drinks\")\n drink_type = input(f\"Which type of drink are you looking for? (Hit ENTER to quit)\\n[1] Hot Drinks \\n[2] \"\n f\"Soft Drinks \\n[3] Alcoholic Drinks \\nEnter here: \")\n if drink_type == \"\":\n sn_nav_options()\n search_entry = str(input(f\"\\nWhat drink would you like to search for? (Hit ENTER to quit)\\nEnter here: \").capitalize())\n if drink_type == \"\":\n sn_nav_options()\n elif drink_type == \"1\":\n hot_drinks = database_module.load_tables(\"hot_drinks\")\n for drinks in hot_drinks:\n if search_entry in drinks.drink_choice:\n print(f\"{drinks.drink_choice}, {drinks.milk_choice}, {drinks.strength_choice}, {drinks.sugar_choice}\")\n else:\n continue\n not_found = input(\"\\nWould you like to add a new drink? Y/N (Hit ENTER to quit) \\nEnter here: \")\n if not_found == \"Y\":\n s4_add_drinks()\n else:\n sn_nav_options()\n elif drink_type == \"2\":\n soft_drinks = database_module.load_tables(\"soft_drinks\")\n for drink in soft_drinks:\n if search_entry in drink.drink_choice:\n print(f\"{drink.drink_choice}, {drink.drink_quantity} ml\")\n else:\n continue\n not_found = input(\"\\nDid you find what you were after, would you like to add a new drinks? Y/N \\nEnter here: \")\n if not_found == \"Y\":\n s4_add_drinks()\n else:\n sn_nav_options()\n elif drink_type == \"3\":\n alcoholic_drinks = database_module.load_tables(\"alcoholic_drinks\")\n for drink in alcoholic_drinks:\n if search_entry in drink.drink_choice:\n print(f\"{drink.drink_choice}, {drink.drink_quantity} ml\")\n else:\n continue\n not_found = input(\"\\nDid you find what you were after, would you like to add a new drinks? Y/N \\nEnter here: \")\n if not_found == \"Y\":\n s4_add_drinks()\n else:\n sn_nav_options()\n else:\n sn_nav_options()\n\n\ndef s3_create_person():\n header(\" Add Drinker\")\n try:\n while True:\n first_name = input(\"What is the new drinkers first name?: (Hit ENTER to quit) \\nEnter here:\")\n if first_name == \"\": # Break out from the loop when user hits ENTER\n sn_nav_options()\n surname = input(f\"What is {first_name}'s surname? \")\n age = int(input(f\"What is {first_name}'s age? \"))\n new_person = database_module.Person(first_name, surname, age)\n people.append(new_person)\n database_module.save_people(new_person)\n sn_nav_options()\n except:\n sn_nav_options()\n\n\ndef s4_add_drinks():\n header(\" Add Drinks\")\n try:\n drink_type = input(f\"What type of drink would you like to input:\\n[1] Hot Drink\\n[2] Soft Drink\\n[3] Alcoholic \"\n f\"\\n(Hit ENTER to quit) \")\n if drink_type == \"\": # Break out from the loop when user hits ENTER\n sn_nav_options()\n elif drink_type == \"1\":\n header(f\"Hot drinks in BrIW\")\n perprint.print_hot_drinks(database_module.load_tables(\"hot_drinks\"))\n print_line()\n drink_choice = str(input(\"\\nWhat hot drink would you like to add?: \").title())\n if drink_choice == \"\":\n sn_nav_options()\n else:\n milk_choice = str(input(f\"Is that {drink_choice} white or black?: \").lower())\n strength_choice = str(input(f\"What strength should the {drink_choice} be?: \").lower())\n sugar_choice = int(input(f\"How many sugars does the {drink_choice} have? (Enter number of teaspoons): \"))\n new_hot_drink = database_module.HotDrinks(drink_choice, milk_choice, strength_choice, sugar_choice)\n database_module.save_drinks([new_hot_drink], \"Hot\")\n hot_drinks.append(new_hot_drink)\n elif drink_type == \"2\":\n header(f\"Soft drinks in BrIW\")\n perprint.print_soft_or_alcy_drinks(database_module.load_tables(\"soft_drinks\"))\n print_line()\n drink_choice = str(input(f\"\\nWhat soft drink would you like to add?: \").title())\n if drink_choice == \"\":\n sn_nav_options()\n else:\n drink_quantity = input(f\"What {drink_choice} quantity would you like to save (in ml)?: \")\n new_soft_drink = database_module.SoftDrinks(drink_choice, drink_quantity)\n database_module.save_drinks([new_soft_drink], \"Soft\")\n soft_drinks.append(new_soft_drink)\n elif drink_type == \"3\":\n header(f\"Alcoholic drinks in BrIW\")\n perprint.print_soft_or_alcy_drinks(database_module.load_tables(\"alcoholic_drinks\"))\n print_line()\n drink_choice = str(input(f\"\\nWhat alcoholic drink would you like to save?: \").title())\n if drink_choice == \"\":\n sn_nav_options()\n else:\n drink_quantity = input(f\"What {drink_choice} quantity would you like to save (in ml)?: \")\n new_alcy_drink = database_module.AlcyDrinks(drink_choice, drink_quantity)\n database_module.save_drinks([new_alcy_drink], \"Alcy\")\n alcoholic_drinks.append(new_alcy_drink)\n else:\n sn_nav_options()\n except:\n sn_nav_options()\n\n\ndef s5_delete_person():\n header(\" Delete People\")\n people_list = database_module.load_tables(\"people\")\n delete_person = str(input(\"Type the first and last name of the who you want to delete\\nEnter here: \").capitalize().strip())\n split_delete_person = delete_person.split(\" \")\n first_name = split_delete_person[0]\n surname = split_delete_person[1].capitalize()\n for person in people_list:\n if delete_person == \"\":\n break\n elif (first_name.upper(), surname.upper()) == (person.first_name.upper(), person.surname.upper()):\n delete_confirmation = input(f\"{person.first_name} {person.surname}, {person.age} was located, are you sure \"\n f\"you want to delete them? Enter Y/N \\nEnter here: \")\n if delete_confirmation == \"Y\":\n database_module.delete_person(person)\n else:\n continue\n else:\n continue\n sn_nav_options()\n\n\ndef s6_delete_drinks_type():\n header(\" Delete Drinks\")\n drink_type = input(f\"What type of drink would you like to delete?\\n(Hit ENTER to quit)\\n[1] Hot Drink\\n[2] Soft \"\n f\"Drink\\n[3] Alcoholic Drink\\nEnter here: \")\n if drink_type == \"1\":\n hot_drinks = database_module.load_tables(\"hot_drinks\")\n delete_drink = str(input(\"What hot drink would you like to delete?\").capitalize())\n for hotdrinks in hot_drinks:\n if delete_drink == \"\":\n break\n elif delete_drink == hotdrinks.drink_choice:\n database_module.delete_drinks(\"Hot\", hotdrinks)\n elif drink_type == \"2\":\n soft_drinks = database_module.load_tables(\"soft_drinks\")\n delete_drink = str(input(\"What soft drink would you like to delete?\").capitalize())\n for softdrinks in soft_drinks:\n if delete_drink == \"\":\n break\n elif delete_drink == softdrinks.drink_choice:\n database_module.delete_drinks(\"Soft\", softdrinks)\n elif drink_type == \"3\":\n delete_drink = str(input(\"What alcoholic drink would you like to delete?\").capitalize())\n for alcydrinks in alcoholic_drinks:\n if delete_drink == \"\":\n break\n elif delete_drink == alcydrinks.drink_choice:\n database_module.delete_drinks(\"Alcy\", alcydrinks)\n else:\n sn_nav_options()\n sn_nav_options()\n\n\ndef s7_drinks_pref_menu():\n header(\" Drinks Preferences\")\n preferences_selection = input(f\"What type of drinks preference would you like to add? \\n[A] Hot Drink \\n[B] Soft Drink\"\n f\"\\n[C] Alcoholic Drink \\nEnter Here: \")\n try:\n if preferences_selection == \"A\":\n header(\"Stored Alcoholic Drinks\")\n print(\"To overwrite a preference simply retype their preference as below.\")\n perprint.print_hot_dicts(hot_drinks_pref)\n s7a_add_hot_drink_prefs()\n elif preferences_selection == \"B\":\n os.system(\"Clear\")\n header(\" Stored Soft Drinks\")\n print(\"To overwrite a preference simply retype their preference as below.\")\n perprint.print_soft_or_alcy_dicts(soft_drinks_pref)\n s7b_add_soft_or_alcy_prefs(soft_drinks_pref)\n persave.save_csv_dictionary(\"persistence/softdrinksprefs.csv\", soft_drinks_pref)\n sn_nav_options()\n elif preferences_selection == \"C\":\n header(\"Stored Alcoholic Drinks\")\n print(\"To overwrite a preference simply retype their preference as below.\")\n perprint.print_soft_or_alcy_dicts(alcoholic_drinks_pref)\n s7b_add_soft_or_alcy_prefs(alcoholic_drinks_pref)\n persave.save_csv_dictionary(\"persistence/alcoholicdrinksprefs.csv\", alcoholic_drinks_pref)\n sn_nav_options()\n else:\n os.system(\"Clear\")\n print(\"Sorry, this is an invalid input\")\n sn_nav_options()\n except:\n sn_nav_options()\n\n\ndef s7a_add_hot_drink_prefs():\n while True:\n person = input(\"\\nWho's hot drink preferences would you like to save to the app'?: \\n(Hit ENTER to exit) Enter here: \")\n if person == \"\": # Break out from the loop when user hits ENTER\n break\n else:\n hot_drink_choice = input(f\"What hot drink would {person} like?: \")\n hot_drink_milk = input(f\"To you take that black or white?: \")\n hot_drink_strength = input(f\"What strength would you like that?: \")\n hot_drink_sugar = input(f\"How many sugars would you like?: \")\n hot_drinks_pref[person] = [hot_drink_choice, hot_drink_milk, hot_drink_strength, hot_drink_sugar]\n persave.save_hot_drinks(\"persistence/hotdrinksprefs.csv\", hot_drinks_pref)\n return hot_drinks_pref\n sn_nav_options()\n\n\ndef s7b_add_soft_or_alcy_prefs(dictionary):\n while True:\n preference = input(f\"\\nAdd a person and preference in format of 'person: drink'\\n(Hit ENTER to exit) Enter here: \")\n if preference == \"\": # Break out from the loop when user hits ENTER\n break\n else:\n split_preference = preference.strip().split(\": \")\n person = split_preference[0]\n drink = split_preference[1]\n dictionary[person] = drink\n return dictionary\n\ndef s8_print_functions():\n header(\" Print Functions\")\n print_choice = str(input(f\"| What would you like to print:\\n[A] People list\\n[B] Drinks list\\n[C] Hot Drinks Prefs\"\n f\"\\n[D] Soft Drinks Prefs\\n[E] Alcoholic Drinks Prefs\\n[F] Current Round\\nEnter Here: \").\n upper())\n if print_choice == \"A\":\n os.system(\"Clear\")\n perprint.print_person(people)\n sn_nav_options()\n elif print_choice == \"B\":\n os.system(\"Clear\")\n s8a_print_drinks_lists()\n sn_nav_options()\n elif print_choice == \"C\":\n header(\"Hot Drinks Preferences\")\n perprint.print_hot_dicts(hot_drinks_pref)\n sn_nav_options()\n elif print_choice == \"D\":\n header(\"Soft Drinks Preferences\")\n perprint.print_soft_or_alcy_dicts(soft_drinks_pref)\n sn_nav_options()\n elif print_choice == \"E\":\n header(\"Alcoholic Drinks Preferences\")\n perprint.print_soft_or_alcy_dicts(alcoholic_drinks_pref)\n sn_nav_options()\n elif print_choice == \"F\":\n os.system(\"Clear\")\n header(\" Drinks Round\")\n count = 1\n for key, value in new_round.items():\n print(f\" | {count}. {key}'s would like a {value}\")\n count += 1\n sn_nav_options()\n else:\n os.system(\"Clear\")\n print(\"This is an invalid input\")\n sn_nav_options()\n\n\ndef s8a_print_drinks_lists():\n header(\" Saved Drinks\")\n print(\" Hot Drinks\")\n print_line()\n perprint.print_hot_drinks(database_module.load_tables(\"hot_drinks\"))\n print_line()\n print(\" Soft Drinks\")\n print_line()\n perprint.print_soft_or_alcy_drinks(database_module.load_tables(\"soft_drinks\"))\n print_line()\n print(\" Alcoholic Drinks\")\n print_line()\n perprint.print_soft_or_alcy_drinks(database_module.load_tables(\"alcoholic_drinks\"))\n sn_nav_options()\n\n\ndef s9_round_type():\n header(\" Round Builder 1.0\")\n try:\n brewer = input(\"Who would like to create a round? \\nEnter Here: \")\n if currentDT.hour <= 15:\n print(f\"It is currently {time}, therefore you should have a hot drink.\")\n perround.Drinks(brewer).hot_drinks()\n sn_nav_options()\n elif 16 < currentDT.hour < 18:\n print(f\"It is currently {time}, therefore you should have a soft drink.\")\n perround.Drinks(brewer).soft_drinks()\n sn_nav_options()\n else:\n print(f\"It is currently: {time}, therefore you should have an alcoholic drink.\")\n perround.Drinks(brewer).alcoholic_drinks()\n sn_nav_options()\n except:\n sn_nav_options()\n\n\ndef print_line():\n print(\"+-------------------------------------------------------------+\")\n\n\ndef header(header_text):\n os.system(\"Clear\")\n print(\"+-------------------------------------------------------------+\")\n print(f\" {header_text} \")\n print(\"+-------------------------------------------------------------+\")\n\n\nif __name__ == \"__main__\":\n perload.load_hot_drinks_prefs(\"persistence/hotdrinksprefs.csv\", hot_drinks_pref)\n perload.load_csv_dictionary(\"persistence/alcoholicdrinksprefs.csv\", alcoholic_drinks_pref)\n perload.load_csv_dictionary(\"persistence/softdrinksprefs.csv\", soft_drinks_pref)\n people = database_module.load_tables(\"people\")\n hot_drinks = database_module.load_tables(\"hot_drinks\")\n soft_drinks = database_module.load_tables(\"soft_drinks\")\n alcoholic_drinks = database_module.load_tables(\"alcoholic_drinks\")\n\n\n Inital_options_displayer()\n\n SystemExit","sub_path":"MainApp.py","file_name":"MainApp.py","file_ext":"py","file_size_in_byte":17968,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"450102215","text":"import os.path\nimport random\nimport torchvision.transforms as transforms\nimport torch\nfrom data.base_dataset import BaseDataset, get_transform\nfrom data.image_folder import make_dataset,make_dataset_label\nfrom PIL import Image\nimport numpy as np\n\nclass AlignedDatasetMultiView(BaseDataset):\n def initialize(self, opt):\n self.opt = opt\n self.root = opt.dataroot\n self.dirs = []\n self.paths = []\n\n if self.opt.isTrain:\n self.nv = 0\n for d in os.listdir(self.root):\n if os.path.isdir(os.path.join(self.root,d)):\n self.nv += 1\n\n for i in range(self.nv):\n self.dirs.append(os.path.join(opt.dataroot, \"%d\" %i) )\n self.paths.append(sorted(make_dataset(self.dirs[i]) ) )\n else:\n self.dir = (os.path.join(opt.dataroot))\n self.paths = (sorted(make_dataset(self.dir)))\n\n self.transform = get_transform(opt)\n\n def __getitem__(self, index):\n\n if self.opt.phase == 'test' :\n bg_color = (64, 64, 64)\n A = Image.open(self.paths[index]).convert('RGB')\n A, _ = self.remapping_background(A, bg_color)\n A = self.transform(A)\n return {'A': A, 'A_paths': self.paths[index], }\n\n\n training_view_indexes = range(0,self.nv)\n\n idx_A = np.random.choice(training_view_indexes)\n idx_B = np.random.choice(training_view_indexes)\n delta_choices = [2,1,-1,-2]\n idx_C = idx_B + np.random.choice(delta_choices)\n\n # print idx_A, idx_B, idx_C\n yaw1 = -(idx_B-idx_A) * np.pi/9\n yaw2 = -(idx_B-idx_C) * np.pi/9\n idx_C = np.mod(idx_C,self.nv)\n\n\n\n bg_color = (64,64,64)\n A = Image.open(self.paths[idx_A][index]).convert('RGB')\n A,_ = self.remapping_background(A, bg_color)\n A = self.transform(A)\n\n B = Image.open(self.paths[idx_B][index]).convert('RGB')\n B,_ = self.remapping_background(B, bg_color)\n B = self.transform(B)\n\n C = Image.open(self.paths[idx_C][index]).convert('RGB')\n C,_ = self.remapping_background(C, bg_color)\n C = self.transform(C)\n\n return {'A': A, 'B': B, 'C': C, 'YawAB': torch.Tensor([yaw1]),'YawCB': torch.Tensor([yaw2]), 'A_paths': self.paths[int(self.nv/2)][index], }\n\n def __len__(self):\n if self.opt.phase == 'train':\n return len(self.paths[int(self.nv/2)])\n else:\n return len(self.paths)\n\n def name(self):\n return 'AlignedDatasetMultiView'\n\n def remapping_background(self, image, bg_color):\n data = np.array(image)\n\n r1, g1, b1 = bg_color # Original value\n r2, g2, b2 = 128, 128, 128 # Value that we want to replace it with\n\n red, green, blue = data[:, :, 0], data[:, :, 1], data[:, :, 2]\n mask = (red == r1) & (green == g1) & (blue == b1)\n data[:, :, :3][mask] = [r2, g2, b2]\n\n return Image.fromarray(data),mask\n\n","sub_path":"data/aligned_dataset_multi_view.py","file_name":"aligned_dataset_multi_view.py","file_ext":"py","file_size_in_byte":2979,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"240244593","text":"import theano\nimport theano.tensor as T\nfrom theano import printing\nimport numpy as np\nimport gym\nimport cPickle\n\nrng = np.random.RandomState(23455)\ninput_dim = 80*80\nhidden_dim = 200\ngamma = 0.99\nlearning_rate = 1e-4\ndecay_rate = 0.99 # decay factor for RMSProp leaky sum of grad^2\n\n\nclass PolicyGradient(object):\n def __init__(self, init_file=None):\n w_bound = np.sqrt(80*80)\n W1 = np.asarray( rng.uniform(\n low=-1.0 / w_bound,\n high=1.0 / w_bound,\n size=(input_dim, hidden_dim)),\n dtype=theano.config.floatX)\n W2 = np.asarray( rng.uniform( low=-1.0 / w_bound, high=-1.0 / w_bound,\n size=(hidden_dim, 1)), dtype=theano.config.floatX)\n print(W1)\n print(W2)\n if init_file is not None:\n W3, W4 = self.load_params(init_file)\n print(W3)\n print(W4)\n self.W1 = theano.shared(\n value=W1,\n name=\"W1\", borrow=True\n )\n # self.b1 = theano.shared(\n # value=np.zeros(hidden_dim, dtype=theano.config.floatX),\n # name=\"b1\", borrow=True\n # )\n self.W2 = theano.shared(\n value=W2,\n name=\"W2\", borrow=True\n )\n # self.b2 = theano.shared(\n # value=np.zeros(1, dtype=theano.config.floatX),\n # name=\"b2\", borrow=True\n # )\n\n #self.params = [self.W1, self.b1, self.W2, self.b2]\n self.params = [self.W1, self.W2]\n self.x = T.dvector(\"x\")\n\n def load_params(self, init_file):\n return cPickle.load(open(init_file, 'rb'))\n\n def get_hidden_values(self, x):\n return T.nnet.relu(T.dot(x, self.W1))# + self.b1)\n\n def get_output_values(self, x):\n logP = T.dot(x, self.W2)# + self.b2\n return logP\n\n# def get_cost_updates(self):\n# z = get_hidden_values(self.x)\n# dlogP, y = get_output_values(z)\n#\n# loss = sum(reward*)\n#\n def RMSprop(self, cost, params, lr=0.001, rho=0.9, epsilon=1e-6):\n grads = T.grad(cost=cost, wrt=params)\n updates = []\n for p, g in zip(params, grads):\n acc = theano.shared(p.get_value() * 0.)\n acc_new = rho * acc + (1 - rho) * g ** 2\n gradient_scaling = T.sqrt(acc_new + epsilon)\n g = g / gradient_scaling\n updates.append((acc, acc_new))\n updates.append((p, p - lr * g))\n return updates\n\n def print_weights(self):\n print(\"First layer weights:\")\n print(self.W1)\n print(\"Second layer weights\")\n print(self.W2)\n\n def get_train_fn(self):\n X = T.dmatrix(\"X\")\n Y = self.get_output_values(self.get_hidden_values(X))\n self.f1 = theano.function(\n [X], Y\n )\n\n advantage = T.dmatrix(\"advantage\")\n loss = -T.sum(advantage*Y)\n #updates = self.get_cost_updates(X, loss)\n\n grad_params = T.grad(loss, self.params)\n param_printing_op = printing.Print(\"Param\")\n param_printing = param_printing_op(grad_params[0])\n updates = [\n (param, param - learning_rate*grad_param) for param, grad_param in zip(self.params, grad_params)\n ]\n #updates = self.RMSprop(loss, self.params)\n\n self.f2 = theano.function(\n [X, advantage], [param_printing], updates=updates\n )\n\n\n\ndef prepro(I):\n \"\"\" prepro 210x160x3 uint8 frame into 6400 (80x80) 1D float vector \"\"\"\n I = I[35:195] # crop\n I = I[::2,::2,0] # downsample by factor of 2\n I[I == 144] = 0 # erase background (background type 1)\n I[I == 109] = 0 # erase background (background type 2)\n I[I != 0] = 1 # everything else (paddles, ball) just set to 1\n return I.astype(np.float).ravel()\n\ndef init_env():\n env = gym.make('Pong-v0')\n return env\n\ndef sigmoid(x):\n return 1 / (1 + np.exp(-x))\n\ndef discount_rewards(r):\n \"\"\" take 1D float array of rewards and compute discounted reward \"\"\"\n discounted_r = np.zeros_like(r)\n running_add = 0\n print(np.sum(np.asarray(r)))\n for t in reversed(xrange(0, r.size)):\n if r[t] != 0:\n running_add = 0 # reset the sum, since this was a game boundary (pong specific!)\n running_add = running_add * gamma + r[t]\n discounted_r[t] = running_add\n return discounted_r\n\n\ndef main():\n env = init_env()\n obs = env.reset()\n prev_x = None\n reward_sum = 0\n episode_num = 0\n\n #pg = PolicyGradient(\"models/simple_model.save\")\n pg = PolicyGradient()\n\n num_games = 100\n\n pg.get_train_fn()\n for game in range(num_games):\n print(\"Game number: %d\" % game)\n if game % 5 == 0:\n f = file('models/simple_model.save', 'wb')\n cPickle.dump([param.get_value() for param in pg.params], f, protocol=cPickle.HIGHEST_PROTOCOL)\n\n Action, Reward, X = [], [], []\n obs = env.reset()\n prev_x = None\n done = 0\n while not done:\n env.render()\n # Preprocessing the input\n cur_x = prepro(obs)\n x = cur_x - prev_x if prev_x is not None else np.zeros(input_dim)\n prev_x = cur_x\n\n X.append(x)\n\n ret = pg.f1(x.reshape((1, input_dim)))[0]\n aprob = sigmoid(ret)\n action = 2 if np.random.uniform() < aprob else 3 # roll the dice!\n\n obs, reward, done, info = env.step(action)\n Reward.append(reward)\n Reward = np.asarray(Reward)\n discounted_rewards = discount_rewards(Reward)\n discounted_rewards -= np.mean(discounted_rewards)\n discounted_rewards /= np.std(discounted_rewards)\n #print(discounted_rewards)\n\n pg.f2(X, discounted_rewards.reshape((len(discounted_rewards), 1)))\n\n\nmain()\n","sub_path":"pong_from_pixel_theano.py","file_name":"pong_from_pixel_theano.py","file_ext":"py","file_size_in_byte":5837,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"71444631","text":"# -*- coding: UTF-8 -*-\nimport unittest\nfrom datetime import date\nimport numpy as np\n\nfrom landscape.finance import consts\nfrom landscape.finance.volatility import math\nfrom landscape.finance.quotes.options import OptionQuote\n\n\nclass VolatilityMathTest(unittest.TestCase):\n\n def test_norm_cdf(self):\n self.assertAlmostEqual(math.norm_cdf(-3), 0.00134990, places=6)\n self.assertAlmostEqual(math.norm_cdf(-2), 0.02275013, places=6)\n self.assertAlmostEqual(math.norm_cdf(-1), 0.15865525, places=6)\n self.assertAlmostEqual(math.norm_cdf(0), 0.5, places=6)\n self.assertAlmostEqual(math.norm_cdf(1), 0.84134474, places=6)\n self.assertAlmostEqual(math.norm_cdf(2), 0.97724987, places=6)\n self.assertAlmostEqual(math.norm_cdf(3), 0.99865010, places=6)\n\n def test_calc_vol(self):\n series = np.array([\n 1995.83, 2013.43, 2014.89, 2017.46, 2003.69, 1994.24, 2023.86])\n self.assertAlmostEqual(math.calc_vol(series, 5), 13.3629, places=3)\n\n def test_calc_time_to_expiration(self):\n quote = OptionQuote('ABC', consts.CALL, date(2015, 3, 2), 80,\n date(2015, 1, 1), None, None, None, 81, None, None)\n time_to_expiration = math.calc_time_to_expiration(quote)\n self.assertAlmostEqual(time_to_expiration, 0.1587, places=3)\n\n def test_calc_price_call(self):\n quote = OptionQuote('ABC', consts.CALL, date(2015, 3, 2), 80,\n date(2015, 1, 1), None, None, None, 81, None, None)\n price = math.calc_price(quote, 0.3, interest_rate=0.06)\n self.assertAlmostEqual(price, 4.7614, places=3)\n\n def test_calc_price_put(self):\n quote = OptionQuote('ABC', consts.PUT, date(2015, 3, 2), 80,\n date(2015, 1, 1), None, None, None, 81, None, None)\n price = math.calc_price(quote, 0.3, interest_rate=0.06)\n self.assertAlmostEqual(price, 3.0031, places=3)\n\n def test_calc_iv_call(self):\n quote = OptionQuote('ABC', consts.CALL, date(2015, 1, 31), 60.,\n date(2015, 1, 1), None, None, None, 52, None, None)\n iv = math.calc_iv(quote, 3.00, interest_rate=0.05)\n self.assertAlmostEqual(iv, 0.9780, places=3)\n\n def test_calc_iv_put(self):\n quote = OptionQuote('ABC', consts.PUT, date(2015, 1, 31), 60.,\n date(2015, 1, 1), None, None, None, 52, None, None)\n iv = math.calc_iv(quote, 7.86, interest_rate=0.05)\n self.assertAlmostEqual(iv, 0.2998, places=3)\n\n\nif __name__ == '__main__':\n unittest.main()\n","sub_path":"finance/volatility/tests/test_math.py","file_name":"test_math.py","file_ext":"py","file_size_in_byte":2590,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"338706019","text":"print(\"Leer 10 enteros, almacenarlos en un vector y determinar en qué posición del vector está el mayor número primo leído.\")\r\n\r\ncount=1\r\nlista=[]\r\n\r\nwhile count<11:\r\n num=int(input(\"Introduzca su %d numero:\" %(count)))\r\n lista.append(num)\r\n count=count+1\r\nlistprimo=[]\r\n\r\nfor x in lista: \r\n cousin=0\r\n for num in range(1,x):\r\n if x%num==0:\r\n cousin+=1\r\n if cousin==1:\r\n listprimo.append(x)\r\n\r\npivot=listprimo[0]\r\n\r\nfor element in listprimo:\r\n if element > pivot:\r\n pivot= element\r\n\r\nprint(\"El mayor numero primo leido esta en la posicion %d \" %(lista.index(pivot)))\r\ninput()","sub_path":"Restantes1/ejercicio#3.py","file_name":"ejercicio#3.py","file_ext":"py","file_size_in_byte":617,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"87147484","text":"from typing import cast\n\nfrom graphql.error import GraphQLError, print_error\nfrom graphql.language import parse, ObjectTypeDefinitionNode, Source, SourceLocation\nfrom graphql.pyutils import dedent\n\n\ndef describe_print_error():\n\n # noinspection PyArgumentEqualDefault\n def prints_line_numbers_with_correct_padding():\n single_digit = GraphQLError(\n \"Single digit line number with no padding\",\n None,\n Source(\"*\", \"Test\", SourceLocation(9, 1)),\n [0],\n )\n assert print_error(single_digit) == dedent(\n \"\"\"\n Single digit line number with no padding\n\n Test (9:1)\n 9: *\n ^\n \"\"\"\n )\n\n double_digit = GraphQLError(\n \"Left padded first line number\",\n None,\n Source(\"*\\n\", \"Test\", SourceLocation(9, 1)),\n [0],\n )\n\n assert print_error(double_digit) == dedent(\n \"\"\"\n Left padded first line number\n\n Test (9:1)\n 9: *\n ^\n 10:\\x20\n \"\"\"\n )\n\n def prints_an_error_with_nodes_from_different_sources():\n doc_a = parse(\n Source(\n dedent(\n \"\"\"\n type Foo {\n field: String\n }\n \"\"\"\n ),\n \"SourceA\",\n )\n )\n op_a = doc_a.definitions[0]\n op_a = cast(ObjectTypeDefinitionNode, op_a)\n assert op_a and op_a.kind == \"object_type_definition\" and op_a.fields\n field_a = op_a.fields[0]\n doc_b = parse(\n Source(\n dedent(\n \"\"\"\n type Foo {\n field: Int\n }\n \"\"\"\n ),\n \"SourceB\",\n )\n )\n op_b = doc_b.definitions[0]\n op_b = cast(ObjectTypeDefinitionNode, op_b)\n assert op_b and op_b.kind == \"object_type_definition\" and op_b.fields\n field_b = op_b.fields[0]\n\n error = GraphQLError(\n \"Example error with two nodes\", [field_a.type, field_b.type]\n )\n\n printed_error = print_error(error)\n assert printed_error == dedent(\n \"\"\"\n Example error with two nodes\n\n SourceA (2:10)\n 1: type Foo {\n 2: field: String\n ^\n 3: }\n\n SourceB (2:10)\n 1: type Foo {\n 2: field: Int\n ^\n 3: }\n \"\"\"\n )\n assert str(error) == printed_error\n","sub_path":"tests/error/test_print_error.py","file_name":"test_print_error.py","file_ext":"py","file_size_in_byte":2704,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"81667149","text":"# https://stepik.org/lesson/3373/step/6?unit=956\n\n\"\"\"\nПрограмма должна считывать одну строку со стандартного ввода и выводить для каждого уникального слова\nв этой строке число его повторений (без учёта регистра) в формате \"слово количество\" (см. пример вывода).\n\nПорядок вывода слов может быть произвольным, каждое уникальное слово должно выводиться только один раз.\n\"\"\"\n\ns = [i.lower() for i in input().split()]\nd = {}\nfor elem in s:\n if elem in d:\n d[elem] += 1\n else:\n d[elem] = 1\nfor key, value in d.items():\n print(key, value)","sub_path":"Task_3.2_6.py","file_name":"Task_3.2_6.py","file_ext":"py","file_size_in_byte":815,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"166036533","text":"import rsa\nimport secrets\nimport json\nimport socket\nimport time\nimport math\n\nclass ObviousTransfer:\n def __init__(self, ip, port, keylen):\n self.ip = ip\n self.port = port\n self.keylen = keylen\n self.buffer = keylen * 8\n self.messageLen = int(keylen / 8)\n\n def send(self, option0, option1):\n # Generate rsa key\n # PublicKey(n, e)\n (pub, priv) = rsa.newkeys(self.keylen)\n\n # Generate two random message for placeholder for real message\n randomMessage0 = secrets.token_hex(self.messageLen)\n randomMessage1 = secrets.token_hex(self.messageLen)\n\n\n # Create message\n choiceMessage = {\n \"key\": {\n \"N\": pub.n,\n \"e\": pub.e,\n },\n \"0\": randomMessage0,\n \"1\": randomMessage1\n }\n\n\n # Establish connection to receiver\n s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n s.connect((self.ip, self.port)) \n\n s.send(str.encode(json.dumps(choiceMessage), encoding=\"UTF-8\"))\n\n data = s.recv(self.buffer)\n v = int(str(data)[2:-1])\n\n # Generate k values and add to messages\n k_0 = pow(v - int(randomMessage0,base=16), priv.d, mod=priv.n)\n k_1 = pow(v - int(randomMessage1,base=16), priv.d, mod=priv.n)\n\n # Calcute messages\n mValues = {\n \"0\": int(option0) + k_0,\n \"1\": int(option1) + k_1\n }\n\n s.send(str.encode(json.dumps(mValues), encoding=\"UTF-8\"))\n\n s.close()\n\n def choose(self, decision):\n if decision != \"0\" and decision != \"1\":\n raise Exception(\"decision has to be either 0 or 1\")\n\n # Establish connection to sender\n s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n s.bind((self.ip, self.port))\n s.listen(1)\n\n print(\"Waiting for start...\")\n\n conn, addr = s.accept()\n\n # Await options and public key data\n data = conn.recv(self.buffer)\n\n # Try to deserialize the data\n choice = json.loads(data)\n\n\n # Choose random number\n k = int(secrets.token_hex(self.messageLen), base=16)\n\n\n x_b = int(choice[decision], base=16)\n e = int(choice[\"key\"][\"e\"])\n N = int(choice[\"key\"][\"N\"])\n\n\n # Calculate response\n v = (x_b + k ** e) % N\n\n # Send v\n conn.send(bytes(str(v), encoding=\"UTF-8\"))\n\n # Await k values\n data = conn.recv(self.buffer)\n\n mValues = json.loads(data)\n\n # Get the m value of the selected input\n m_ = mValues[decision]\n\n # Get original message from it\n m = m_ - k\n\n return m","sub_path":"obviousTransfer.py","file_name":"obviousTransfer.py","file_ext":"py","file_size_in_byte":2390,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"66336787","text":"from zope.component import getUtility\nfrom Products.CMFCore.utils import UniqueObject\nfrom Globals import InitializeClass\nfrom OFS.SimpleItem import SimpleItem\nfrom AccessControl import ClassSecurityInfo\nfrom Products.CMFCore.utils import getToolByName\nfrom Products.CMFPlone.utils import safe_unicode\n\ntry:\n from zope.app.component.hooks import getSite\nexcept ImportError:\n from zope.component.hooks import getSite\n\nimport xlrd\nimport sqlite3\nimport Zope2\nimport re\nfrom DateTime import DateTime\nfrom plone.memoize import ram\nfrom time import time\n\n# Cache for 5 minutes\n@ram.cache(lambda *args: time() // (5*60))\n\ndef getCachedCourses(portal_catalog):\n\n results = portal_catalog.searchResults({'portal_type' : ['Topic', 'Folder'], 'Subject' : 'courses', 'sort_on' : 'sortable_title'})\n\n return sorted(list(set([x.Title for x in results])))\n\nclass ExtensionCourseTool(UniqueObject, SimpleItem):\n\n id = 'extension_course_tool'\n meta_type = 'Extension Course Tool'\n \n security = ClassSecurityInfo()\n\n @property\n def portal_catalog(self):\n site = getSite()\n portal_catalog = getToolByName(site, \"portal_catalog\")\n return portal_catalog\n\n security.declarePublic('getCourses')\n def getCourses(self):\n return getCachedCourses(self.portal_catalog)\n\n security.declarePublic('getCourseInfo')\n def getCourseInfo(self, course):\n sanitized_course = course.replace('(', '').replace(')', '')\n courses = []\n for r in self.portal_catalog.searchResults({'portal_type' : ['Topic', 'Folder'], 'Subject' : 'courses', 'sort_on' : 'sortable_title', 'Title' : sanitized_course}):\n if r.Title.lower().strip() == course.lower().strip():\n courses.append(r)\n return courses\n \n\n security.declarePublic('getCourseTopics')\n def getCourseTopics(self, course):\n\n results = self.getCourseInfo(course)\n\n topics = []\n \n for r in results:\n for t in r.extension_topics:\n if ':' in t:\n topics.append(t)\n\n return sorted(topics)\n\n\n security.declarePublic('getCourseSubtopics')\n def getCourseSubtopics(self, course):\n \n results = self.getCourseInfo(course)\n\n topics = []\n \n for r in results:\n for t in r.extension_subtopics:\n if ':' in t:\n topics.append(t)\n\n return sorted(list(set(topics)))\n\n security.declarePublic('getCourseForEvent')\n def getCourseForEvent(self, event_brain, skip_if_exists=True):\n\n if skip_if_exists and hasattr(event_brain, 'extension_courses') and event_brain.extension_courses:\n return event_brain.extension_courses[0]\n \n return self.getCourseForEventTitle(event_brain.Title)\n\n security.declarePublic('getCourseForEventTitle')\n def getCourseForEventTitle(self, event_title):\n\n abbr = {\n 'BKC' : 'Better Kid Care',\n 'Technology Tuesdays' : 'Technology Tuesday Series',\n 'StrongWomen' : u'StrongWomen\\u2122/Growing Stronger',\n 'Strong Women' : u'StrongWomen\\u2122/Growing Stronger',\n 'Growing Stronger' : u'StrongWomen\\u2122/Growing Stronger',\n 'Cooking for Crowds' : 'Cooking for Crowds-Volunteer Food Safety',\n 'Land Use Webinar Series' : 'Land Use Planning',\n 'Master Well Owner' : 'Master Well Owner Network (MWON) Volunteer Training',\n 'MWON' : 'Master Well Owner Network (MWON) Volunteer Training',\n 'Pesticide Testing' : 'Pennsylvania Pesticide Applicator Certification Training',\n 'Pesticide Update Meeting' : 'Pennsylvania Pesticide Applicator Certification Training',\n 'Pesticide Core Credit Recertification' : 'Pennsylvania Pesticide Applicator Certification Training',\n 'Agricultural Rescue Training' : 'PAgricultural Rescue Training',\n 'Fundamentals of HACCP' : 'Fundamentals of Hazard Analysis Critical Control Point (HACCP)',\n 'Principles of HACCP for Meat and Poultry Processors' : 'Hazard Analysis Critical Control Point (HACCP) for Meat and Poultry Processors',\n 'Shale' : 'Shale Gas 101',\n 'Safe Drinking Water Clinic' : 'Safe Drinking Water Clinics',\n 'Sheep Shearing Workshops' : 'Sheep Shearing Instruction',\n 'Six Steps to a Highly Effective Organization' : 'Six Steps to an Effective Organization',\n 'Tools for Equine Health & Soundness' : 'Tools for Equine Health and Soundness',\n 'Social Media Boot Camp' : 'Social Media Boot Camp for Agricultural Businesses',\n 'Raising Chickens' : 'Backyard Poultry',\n 'OMK' : 'Operation Military Kids',\n 'Home Canning Workshops' : 'Home Food Preservation',\n \n }\n\n title = safe_unicode(event_title).lower().strip()\n \n char_regex = re.compile(\"[^a-zA-Z0-9]\", re.I|re.M)\n \n def normalize(i):\n return char_regex.sub('', i).lower()\n \n courses = sorted([x.strip() for x in self.getCourses()], key=lambda x: len(x), reverse=True)\n \n # Check for exact title match\n for c in courses:\n if c.lower() in title:\n return c\n\n # Check for normalized title match\n for c in courses:\n if normalize(c) in normalize(title):\n return c\n\n # Check for abbreviated title match\n for c in courses:\n for a in sorted(abbr.keys(), key=lambda x: len(x), reverse=True):\n if a.lower() in title and abbr[a] == c:\n return c\n\n # Check for abbreviated normalized title match\n for c in courses:\n for a in sorted(abbr.keys(), key=lambda x: len(x), reverse=True):\n if normalize(a) in normalize(title) and abbr[a] == c:\n return c\n \n return ''\n\n def setCourseAttributes(self):\n\n now = DateTime()\n \n portal_catalog = getToolByName(self, \"portal_catalog\")\n\n # Find all upcoming events\n results = portal_catalog.searchResults({'portal_type' : 'Event', 'end' : {'query' : now, 'range' : 'min'}, 'review_state' : ['published', 'published-hidden']})\n \n # Set course for the events\n for r in results:\n \n # Automagically determine course\n course = self.getCourseForEvent(r, skip_if_exists=False)\n \n if course and r.extension_courses and course in r.extension_courses:\n # Skip if the course is already assigned\n continue\n\n title = safe_unicode(r.Title)\n\n if course:\n # Get object\n o = r.getObject()\n\n # Set course for event\n o.extension_courses = (course, )\n o.reindexObject()\n \n # Update the topics and subtopics\n for r in results:\n \n if r.extension_courses:\n # Get object\n o = r.getObject()\n \n course = o.extension_courses[0]\n \n # Automagically determine topics and subtopics\n topics = self.getCourseTopics(course)\n subtopics = self.getCourseSubtopics(course)\n \n # Get existing topics and subtopics\n course_topics = list(r.extension_topics)\n course_subtopics = list(r.extension_subtopics)\n\n # Set topics and subtopics\n for t in topics:\n if t not in course_topics:\n course_topics.append(t)\n\n for t in subtopics:\n if t not in course_subtopics:\n course_subtopics.append(t)\n\n if tuple(course_topics) != tuple(r.extension_topics) or tuple(course_subtopics) != tuple(r.extension_subtopics):\n o.extension_topics = course_topics\n o.extension_subtopics = course_subtopics\n \n o.reindexObject()\n\nInitializeClass(ExtensionCourseTool)\n","sub_path":"agsci/ExtensionExtender/tools/course.py","file_name":"course.py","file_ext":"py","file_size_in_byte":8186,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"294441040","text":"\"\"\"Functional tests using the API with a fake DMAP Apple TV.\"\"\"\n\nimport asyncio\nimport ipaddress\n\nfrom aiohttp.test_utils import unittest_run_loop\n\nimport pyatv\nfrom pyatv import exceptions, interface\nfrom pyatv.conf import (AirPlayService, DmapService, AppleTV)\nfrom pyatv.const import MediaType, DeviceState, RepeatState\nfrom pyatv.dmap import pairing\nfrom tests.dmap.fake_dmap_atv import (FakeAppleTV, AppleTVUseCases)\nfrom tests.airplay.fake_airplay_device import DEVICE_CREDENTIALS\nfrom tests import (zeroconf_stub, common_functional_tests)\n\nHSGID = '12345-6789-0'\nPAIRING_GUID = '0x0000000000000001'\nSESSION_ID = 55555\nREMOTE_NAME = 'pyatv remote'\nPIN_CODE = 1234\n\nARTWORK_BYTES = b'1234'\nARTWORK_MIMETYPE = 'image/png'\nAIRPLAY_STREAM = 'http://stream'\n\n# This is valid for the PAIR in the pairing module and pin 1234\n# (extracted form a real device)\nPAIRINGCODE = '690E6FF61E0D7C747654A42AED17047D'\n\nHOMESHARING_SERVICE_1 = zeroconf_stub.homesharing_service(\n 'AAAA', b'Apple TV 1', '10.0.0.1', b'aaaa')\nHOMESHARING_SERVICE_2 = zeroconf_stub.homesharing_service(\n 'BBBB', b'Apple TV 2', '10.0.0.2', b'bbbb')\n\n\nclass DummyDeviceListener(interface.DeviceListener):\n\n def __init__(self):\n self.closed_sem = asyncio.Semaphore(0)\n self.lost_sem = asyncio.Semaphore(0)\n\n def connection_lost(self, exception):\n self.lost_sem.release()\n\n def connection_closed(self):\n self.closed_sem.release()\n\n\nclass DummyPushListener:\n\n @staticmethod\n def playstatus_update(updater, playstatus):\n updater.stop()\n\n @staticmethod\n def playstatus_error(updater, exception):\n pass\n\n\nclass DMAPFunctionalTest(common_functional_tests.CommonFunctionalTests):\n\n async def setUpAsync(self):\n await super().setUpAsync()\n self.atv = await self.get_connected_device(HSGID)\n\n # TODO: currently stubs internal method, should provide stub\n # for netifaces later\n pairing._get_private_ip_addresses = \\\n lambda: [ipaddress.ip_address('10.0.0.1')]\n\n async def tearDownAsync(self):\n await self.atv.close()\n await super().tearDownAsync()\n\n async def get_application(self, loop=None):\n self.fake_atv = FakeAppleTV(\n HSGID, PAIRING_GUID, SESSION_ID, self)\n self.usecase = AppleTVUseCases(self.fake_atv)\n return self.fake_atv.app\n\n async def get_connected_device(self, hsgid):\n self.dmap_service = DmapService(\n 'dmap_id', hsgid, port=self.server.port)\n self.airplay_service = AirPlayService(\n 'airplay_id', self.server.port, DEVICE_CREDENTIALS)\n self.conf = AppleTV('127.0.0.1', 'Apple TV')\n self.conf.add_service(self.dmap_service)\n self.conf.add_service(self.airplay_service)\n return await pyatv.connect(self.conf, self.loop)\n\n @unittest_run_loop\n async def test_not_supportedt(self):\n with self.assertRaises(exceptions.NotSupportedError):\n await self.atv.remote_control.suspend()\n\n @unittest_run_loop\n async def test_connect_failed(self):\n # Twice since the client will retry one time\n self.usecase.make_login_fail()\n self.usecase.make_login_fail()\n\n with self.assertRaises(exceptions.AuthenticationError):\n await self.atv.connect()\n\n # This test verifies issue #2 (automatic re-login). It uses the artwork\n # API, but it could have been any API since the login code is the same.\n @unittest_run_loop\n async def test_relogin_if_session_expired(self):\n await self.atv.connect()\n\n # Here, we are logged in and currently have a asession id. These\n # usescases will result in being logged out (HTTP 403) and forcing a\n # re-login with a new session id (1234)\n self.usecase.force_relogin(1234)\n self.usecase.artwork_no_permission()\n self.usecase.change_artwork(ARTWORK_BYTES, ARTWORK_MIMETYPE)\n\n artwork = await self.atv.metadata.artwork()\n self.assertEqual(artwork.bytes, ARTWORK_BYTES)\n\n @unittest_run_loop\n async def test_login_with_hsgid_succeed(self):\n session_id = await self.atv.connect()\n self.assertEqual(SESSION_ID, session_id)\n\n @unittest_run_loop\n async def test_login_with_pairing_guid_succeed(self):\n await self.atv.close()\n self.atv = await self.get_connected_device(PAIRING_GUID)\n session_id = await self.atv.connect()\n self.assertEqual(SESSION_ID, session_id)\n\n @unittest_run_loop\n async def test_connection_closed(self):\n self.usecase.video_playing(paused=False, title='video1',\n total_time=40, position=10,\n revision=0)\n\n self.atv.listener = DummyDeviceListener()\n self.atv.push_updater.listener = DummyPushListener()\n await self.atv.push_updater.start()\n\n # Callback is scheduled on the event loop, so a semaphore is used\n # to synchronize with the loop\n await asyncio.wait_for(\n self.atv.listener.closed_sem.acquire(), timeout=3.0)\n\n @unittest_run_loop\n async def test_connection_lost(self):\n self.usecase.server_closes_connection()\n\n self.atv.listener = DummyDeviceListener()\n self.atv.push_updater.listener = DummyPushListener()\n await self.atv.push_updater.start()\n\n # Callback is scheduled on the event loop, so a semaphore is used\n # to synchronize with the loop\n await asyncio.wait_for(\n self.atv.listener.lost_sem.acquire(), timeout=3.0)\n\n # Common tests are below. Move tests that have been implemented to\n # common_functional_tests.py once implemented\n\n # TODO: This should check that device_id is one of the IDs\n # passed to the services into the device.\n def test_metadata_device_id(self):\n self.assertEqual(self.atv.metadata.device_id, 'dmap_id')\n\n @unittest_run_loop\n async def test_metadata_artwork(self):\n self.usecase.change_artwork(ARTWORK_BYTES, ARTWORK_MIMETYPE)\n\n artwork = await self.atv.metadata.artwork()\n self.assertIsNotNone(artwork)\n self.assertEqual(artwork.bytes, ARTWORK_BYTES)\n self.assertEqual(artwork.mimetype, ARTWORK_MIMETYPE)\n\n @unittest_run_loop\n async def test_metadata_artwork_none_if_not_available(self):\n self.usecase.change_artwork(b'', None)\n\n artwork = await self.atv.metadata.artwork()\n self.assertIsNone(artwork)\n\n @unittest_run_loop\n async def test_metadata_none_type_when_not_playing(self):\n self.usecase.nothing_playing()\n\n playing = await self.atv.metadata.playing()\n self.assertEqual(playing.media_type, MediaType.Unknown)\n self.assertEqual(playing.device_state, DeviceState.Idle)\n\n @unittest_run_loop\n async def test_metadata_video_playing(self):\n self.usecase.video_playing(paused=False, title='video',\n total_time=40, position=10)\n\n playing = await self.atv.metadata.playing()\n self.assertEqual(playing.media_type, MediaType.Video)\n self.assertEqual(playing.device_state, DeviceState.Playing)\n self.assertEqual(playing.title, 'video')\n self.assertEqual(playing.total_time, 40)\n self.assertEqual(playing.position, 10)\n\n @unittest_run_loop\n async def test_metadata_music_paused(self):\n self.usecase.music_playing(paused=True, title='music',\n artist='artist', album='album',\n total_time=222, position=49,\n genre='genre')\n\n playing = await self.atv.metadata.playing()\n self.assertEqual(playing.media_type, MediaType.Music)\n self.assertEqual(playing.device_state, DeviceState.Paused)\n self.assertEqual(playing.title, 'music')\n self.assertEqual(playing.artist, 'artist')\n self.assertEqual(playing.album, 'album')\n self.assertEqual(playing.genre, 'genre')\n self.assertEqual(playing.total_time, 222)\n self.assertEqual(playing.position, 49)\n\n @unittest_run_loop\n async def test_metadata_music_playing(self):\n self.usecase.music_playing(paused=False, title='music',\n artist='test1', album='test2',\n total_time=2, position=1,\n genre='genre')\n\n playing = await self.atv.metadata.playing()\n self.assertEqual(playing.media_type, MediaType.Music)\n self.assertEqual(playing.device_state, DeviceState.Playing)\n self.assertEqual(playing.title, 'music')\n self.assertEqual(playing.artist, 'test1')\n self.assertEqual(playing.album, 'test2')\n self.assertEqual(playing.genre, 'genre')\n self.assertEqual(playing.total_time, 2)\n self.assertEqual(playing.position, 1)\n\n @unittest_run_loop\n async def test_push_updates(self):\n\n class PushListener:\n def __init__(self):\n self.playing = None\n\n def playstatus_update(self, updater, playstatus):\n self.playing = playstatus\n updater.stop()\n\n @staticmethod\n def playstatus_error(updater, exception):\n pass\n\n # Prepare two playstatus updates in the fake device. Take note: every\n # time start() is called, revision 0 should be used first. This will\n # make sure that we always get a push update instantly. Otherwise we\n # might hang and wait for an update.\n self.usecase.video_playing(paused=False, title='video1',\n total_time=40, position=10,\n revision=0)\n self.usecase.video_playing(paused=True, title='video2',\n total_time=30, position=20,\n revision=0)\n\n # Poll the first one (\"video1\")\n await self.atv.metadata.playing()\n\n # Setup push updates which will instantly get the next one (\"video2\")\n listener = PushListener()\n self.atv.push_updater.listener = listener\n await self.atv.push_updater.start()\n\n # Check that we got the right one\n self.assertIsNotNone(listener.playing)\n self.assertEqual(listener.playing.title, 'video2')\n\n @unittest_run_loop\n async def test_shuffle_state(self):\n self.usecase.example_video(shuffle=False)\n self.usecase.example_video(shuffle=True)\n\n playing = await self.atv.metadata.playing()\n self.assertFalse(playing.shuffle)\n\n playing = await self.atv.metadata.playing()\n self.assertTrue(playing.shuffle)\n\n @unittest_run_loop\n async def test_repeat_state(self):\n self.usecase.example_video(repeat=RepeatState.Off)\n self.usecase.example_video(repeat=RepeatState.Track)\n self.usecase.example_video(repeat=RepeatState.All)\n\n playing = await self.atv.metadata.playing()\n self.assertEqual(playing.repeat, RepeatState.Off)\n\n playing = await self.atv.metadata.playing()\n self.assertEqual(playing.repeat, RepeatState.Track)\n\n playing = await self.atv.metadata.playing()\n self.assertEqual(playing.repeat, RepeatState.All)\n\n @unittest_run_loop\n async def test_set_shuffle(self):\n await self.atv.remote_control.set_shuffle(1)\n self.assertEqual(self.fake_atv.properties['dacp.shufflestate'], 1)\n\n await self.atv.remote_control.set_shuffle(0)\n self.assertEqual(self.fake_atv.properties['dacp.shufflestate'], 0)\n\n @unittest_run_loop\n async def test_set_repeat(self):\n await self.atv.remote_control.set_repeat(1)\n self.assertEqual(self.fake_atv.properties['dacp.repeatstate'], 1)\n\n await self.atv.remote_control.set_repeat(2)\n self.assertEqual(self.fake_atv.properties['dacp.repeatstate'], 2)\n\n @unittest_run_loop\n async def test_seek_in_playing_media(self):\n await self.atv.remote_control.set_position(60)\n self.assertEqual(self.fake_atv.properties['dacp.playingtime'], 60000)\n\n @unittest_run_loop\n async def test_metadata_loading(self):\n self.usecase.media_is_loading()\n\n playing = await self.atv.metadata.playing()\n self.assertEqual(playing.device_state, DeviceState.Loading)\n\n @unittest_run_loop\n async def test_button_unsupported_raises(self):\n buttons = ['home', 'volume_up', 'volume_down', 'suspend', 'wakeup']\n for button in buttons:\n with self.assertRaises(exceptions.NotSupportedError):\n await getattr(self.atv.remote_control, button)()\n","sub_path":"tests/dmap/test_dmap_functional.py","file_name":"test_dmap_functional.py","file_ext":"py","file_size_in_byte":12659,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"498842497","text":"import sys\n\nimport cv2\nimport qdarkstyle\nfrom PyQt5 import QtWidgets, QtCore, QtGui\nfrom PyQt5.QtCore import QCoreApplication\nfrom PyQt5.QtGui import QImage, QPixmap\n\nfrom Forms.Parents.predict_c2d_gui_parent import Ui_PredictC2DWindow\nfrom Webcam_captures.Video_capture import Video_capture\n\n\nclass PredictC2DWindow(QtWidgets.QMainWindow, Ui_PredictC2DWindow):\n def __init__(self):\n super().__init__()\n self.setupUi(self)\n self.capture = Video_capture()\n self.thread = QtCore.QThread()\n self.capture.moveToThread(self.thread)\n self.capture.signal.connect(self.getImages)\n # подключим сигнал старта потока к методу run у объекта, который должен выполнять код в другом потоке\n self.thread.started.connect(self.capture.run)\n # запустим поток\n self.thread.start()\n\n def closeEvent(self, a0: QtGui.QCloseEvent) -> None:\n self.capture\n self.thread.quit()\n\n def open_c2d_model(self, path: str):\n pass\n\n @QtCore.pyqtSlot(object)\n def getImages(self, image):\n image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)\n height, width, channel = image.shape\n bytesPerLine = 3 * width\n qImg = QImage(image.data, width, height, bytesPerLine, QImage.Format_RGB888)\n self.label.setPixmap(QPixmap.fromImage(qImg))\n pass\n\n\n\nif __name__ == '__main__':\n # Новый экземпляр QApplication\n # app = QtWidgets.QApplication(sys.argv)\n QCoreApplication.setOrganizationName(\"QSoft\")\n # QCoreApplication.setOrganizationDomain(\"Settings\")\n QCoreApplication.setApplicationName(\"NN Family Creater\")\n\n app = QtWidgets.QApplication(sys.argv)\n app.setStyle('Fusion')\n app.setStyleSheet(qdarkstyle.load_stylesheet())\n # app.setSt\n # app.setStyle('windowsvista')\n # app.setStyle('Windows')\n # print(QStyleFactory.keys())\n # Сздание инстанса класса\n # graphviz = GraphvizOutput(output_file='graph.png')\n # with PyCallGraph(GraphvizOutput(output_file=\"graph1.png\")):\n capture_window = PredictC2DWindow()\n capture_window.show()\n # Запуск\n sys.exit(app.exec_())","sub_path":"Forms/predict_c2d_gui.py","file_name":"predict_c2d_gui.py","file_ext":"py","file_size_in_byte":2249,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"327909656","text":"'''6/12/2018 Plot pentad by pentad development of half_shallow Rossby no and precip\n'''\n\nimport numpy as np\nimport xarray as xr\nimport matplotlib.pyplot as plt\nfrom pylab import rcParams\nimport sh\nfrom windspharm.xarray import VectorWind\nimport matplotlib.patches as patches\nfrom data_handling_updates import gradients as gr, model_constants as mc\n\n\ndef plot_vort_dev(run, land_mask=None, lev=200, video=False, threed=True):\n \n data = xr.open_dataset('/scratch/rg419/Data_moist/climatologies/' + run + '.nc')\n data['precipitation'] = (data.precipitation*86400.)\n \n zon_adv = data.ucomp * gr.ddx(data.ucomp)\n merid_adv = data.vcomp * gr.ddy(data.ucomp)\n vert_adv = data.omega * gr.ddp(data.ucomp)\n \n sinphi = np.sin(data.lat * np.pi/180.)\n f = 2.* mc.omega*sinphi\n if threed:\n rossby = (zon_adv + merid_adv + vert_adv)/(f*data.vcomp)\n else:\n rossby = merid_adv/(f*data.vcomp)\n # Start figure with 1 subplots\n rcParams['figure.figsize'] = 10, 5\n rcParams['font.size'] = 14\n\n for i in range(72):\n fig, ax1 = plt.subplots()\n title = 'Pentad ' + str(int(data.xofyear[i]))\n \n f1 = rossby.sel(xofyear=i+1, pfull=lev).plot.contourf(x='lon', y='lat', ax=ax1, add_labels=False, add_colorbar=False, extend='both', zorder=1, levels = np.arange(0.,1.1,0.1))\n \n data.precipitation.sel(xofyear=i+1).plot.contour(x='lon', y='lat', ax=ax1, add_labels=False, extend='both', zorder=1, levels=np.arange(2.,21.,2.), colors='k') \n ax1.grid(True,linestyle=':')\n ax1.set_ylim(-60.,60.)\n ax1.set_yticks(np.arange(-60.,61.,30.))\n ax1.set_xticks(np.arange(0.,361.,90.))\n ax1.set_title(title)\n if not land_mask==None:\n land = xr.open_dataset(land_mask)\n land.land_mask.plot.contour(x='lon', y='lat', ax=ax1, levels=np.arange(-1.,2.,1.), add_labels=False, colors='k') \n ax1.set_ylabel('Latitude')\n ax1.set_xlabel('Longitude')\n \n plt.subplots_adjust(left=0.1, right=0.97, top=0.93, bottom=0.05, hspace=0.25, wspace=0.2)\n cb1=fig.colorbar(f1, ax=ax1, use_gridspec=True, orientation = 'horizontal',fraction=0.05, pad=0.15, aspect=60, shrink=0.5)\n \n vidstr=''\n if video:\n vidstr='video/'\n \n if threed:\n plot_dir = '/scratch/rg419/plots/zonal_asym_runs/gill_development/' + run +'/' + vidstr + '/rossby3d/' \n else:\n plot_dir = '/scratch/rg419/plots/zonal_asym_runs/gill_development/' + run +'/' + vidstr + '/rossby/' \n mkdir = sh.mkdir.bake('-p')\n mkdir(plot_dir)\n \n if video:\n plt.savefig(plot_dir + 'rossby_and_precip_' + str(int(data.xofyear[i])) + '.png', format='png')\n else:\n plt.savefig(plot_dir + 'rossby_and_precip_' + str(int(data.xofyear[i])) + '.pdf', format='pdf')\n plt.close()\n\n\nimport subprocess\ndef make_video(filepattern, output):\n command = 'ffmpeg -framerate 5 -y -start_number 30 -i ' + filepattern + ' -vframes 45 -c:v libx264 -r 6 -pix_fmt yuv420p -vf scale=3200:-2 ' + output \n subprocess.call([command], shell=True)\n \n\nif __name__ == \"__main__\":\n\n plot_vort_dev('half_shallow', land_mask = '/scratch/rg419/Experiments/asym_aquaplanets/input/half_shallow.nc', threed=True)\n plot_vort_dev('half_shallow', land_mask = '/scratch/rg419/Experiments/asym_aquaplanets/input/half_shallow.nc', video=True, threed=True)\n\n make_video('/scratch/rg419/plots/zonal_asym_runs/gill_development/half_shallow/video/rossby3d/rossby_and_precip_%02d.png', \n '/scratch/rg419/plots/zonal_asym_runs/gill_development/half_shallow/video/rossby3d/rossby_and_precip.mp4')\n\n \n ","sub_path":"zonal_asym_runs/rossby_precip.py","file_name":"rossby_precip.py","file_ext":"py","file_size_in_byte":3752,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"585396589","text":"import socket\nimport threading\nPORT = 65432\nIP_Address = '192.168.10.1' # give the ip_adress of peer1\ndef sending (conn):\n while True:\n print(\"->\")\n inp = input(\"\")\n\n conn.send(bytes(inp,\"utf-8\"))\n if inp == \"exit\":\n break\ndef recieving(conn):\n while True:\n msg = conn.recv(1024)\n\n if msg.decode(\"utf-8\") == \"exit\":\n break\n sender,text = msg.decode(\"utf-8\").split(\" \")\n print(sender+\": \"+text)\n\ns = socket.socket(socket.AF_INET,socket.SOCK_STREAM)\ntry:\n s.connect((IP_Address,PORT))\n print(\"Connected to Server!!\")\n print(\"Enter Msg in this format: \")\n t1 = threading.Thread(target=sending, args=(s,))\n t2 = threading.Thread(target=recieving, args=(s,))\n\n t1.start()\n t2.start()\n\n t1.join()\n t2.join()\n\nexcept:\n print(\"Connection is Lost!!\")\n","sub_path":"multiple/client2.py","file_name":"client2.py","file_ext":"py","file_size_in_byte":898,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"542136220","text":"# Вашата задача е да напишете програма, която изчислява, дали дадено закупено количество бутилки от препарат за съдомиялна е достатъчно, за да измие определено количество съдове.\n# Знае се, че всяка бутилка съдържа 750 мл. препарат,\n# за 1 чиния са нужни 5 мл.,\n# а за тенджера 15 мл.\n# Приемете, че на всяко трето зареждане със съдове, съдомиялната се пълни само с тенджери,\n# а останалите пъти с чинии.\n# Докато не получите команда \"End\" ще продължите да получавате бройка съдове, които трябва да бъдат измити.\n\ndetergent = int(input()) * 750\nline = input()\ncounter = 0\ntotal_dishes = 0\ntotal_pots = 0\ndetergent_used = 0\ndiff = 0\nwhile line != \"End\":\n dishes_count = int(line)\n counter += 1\n if counter % 3 == 0:\n detergent_used += dishes_count * 15\n total_pots += dishes_count\n else:\n detergent_used += dishes_count * 5\n total_dishes += dishes_count\n diff = detergent - detergent_used\n if diff < 0 :\n break\n line = input()\nif diff >= 0:\n print(f\"Detergent was enough!\\n{total_dishes} dishes and {total_pots} pots were washed.\\nLeftover detergent {diff} ml.\")\nelse:\n print(f\"Not enough detergent, {abs(diff)} ml. more necessary!\")\n\n\n\n# Изход\n# В случай, че количеството препарат е било достатъчно за измиването на съдовете:\n# \"Detergent was enough!\"\n# \"{брой чисти чинии} dishes and {брой чисти тенджери} pots were washed.\"\n# \"Leftover detergent {количество останал препарат} ml.\"\n# В случай, че количеството препарат не е било достатъчно за измиването на съдов��те:\n# \"Not enough detergent, {количество не достигнал препарат} ml. more necessary!\"\n","sub_path":"Python Basics June 2020/5 while_loop_more_ex/5.3.1. dishwasher.py","file_name":"5.3.1. dishwasher.py","file_ext":"py","file_size_in_byte":2225,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"325698691","text":"import pysnooper\n@pysnooper.snoop()\ndef findDuplicates(nums):\n \"\"\"\n :type nums: List[int]\n :rtype: List[int]\n \"\"\"\n res = []\n for i in range(len(nums)):\n while i != nums[i]-1:\n if nums[i] != nums[nums[i]-1]:\n tmp = nums[i]\n nums[i] = nums[nums[i]-1]\n nums[tmp-1] = tmp\n else:\n res.append(nums[i])\n break\n return res\n\n\nif __name__=='__main__':\n findDuplicates([4,3,2,7,8,2,3,1])\n","sub_path":"debug.py","file_name":"debug.py","file_ext":"py","file_size_in_byte":565,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"633608410","text":"from __future__ import absolute_import, unicode_literals\n\n\nimport os\nfrom datetime import timedelta\n\nos.environ.setdefault('DJANGO_SETTINGS_MODULE', 'IAmBlog.settings')\n\nfrom celery import Celery\n\napp = Celery('iamblog')\n\napp.config_from_object('django.conf:settings', namespace=\"CELERY\")\n\napp.conf.update(\n CELERYBEAT_SCHEDULE= {\n 'add_three_server': {\n 'task': 'blog.tasks.create',\n 'schedule' : timedelta(seconds=1),\n }\n }\n)\n\n\napp.autodiscover_tasks()\n\n\n\n\n","sub_path":"Iamblog/IAmBlog/celerys.py","file_name":"celerys.py","file_ext":"py","file_size_in_byte":501,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"363305269","text":"import random\nclass Geboortedatum:\n __aantal_data = 0\n\n def __init__(self, par_dag, par_maand, par_jaar):\n self.maand = par_maand\n self.dag = par_dag\n self.jaar = par_jaar\n Geboortedatum.__aantal_data += 1\n\n @property\n def dag(self):\n return self.__dag\n\n @dag.setter\n def dag(self, value):\n if isinstance(value, int) and self.__controle_dag(value):\n self.__dag = value\n else:\n self.__dag = -1\n\n @property\n def maand(self):\n return self.__maand\n\n @maand.setter\n def maand(self, value):\n if isinstance(value, int) and value in range(1, 13):\n self.__maand = value\n else:\n self.__maand = -1\n\n @property\n def jaar(self):\n return self.__jaar\n\n @jaar.setter\n def jaar(self, value):\n if isinstance(value, int):\n self.__jaar = value\n else:\n self.__jaar = -1\n\n # hulp methode\n # deze zetten we private zodat die niet buiten de klasse kan gebruikt worden\n def __controle_dag(self, value):\n if self.__maand in [1, 3, 5, 7, 8, 10, 12]:\n if value > 0 and value <= 31:\n return True\n elif self.__maand in [4, 6, 9, 11]:\n if value > 0 and value <= 30:\n return True\n else:\n # nu is het zeker february\n if value > 0 and value <= 29:\n return True\n # als hij nooit return heeft gehad is dit geen geldige datum\n return False\n\n @staticmethod\n def genereer_willerkeurig():\n jaar = random.randint(1900, 2018)\n maand = random.randint(1, 12)\n dag = random.randint(1, 31)\n\n res = Geboortedatum(dag, maand, jaar)\n return res\n\n def __str__(self):\n return \"Geboortedatum : {0}/{1}/{2}\".format(self.dag, self.maand, self.jaar)\n\n\"\"\"\n >>> class Person:\n def __init__(self,name,age):\n self.name=name\n self.age=age\n def __gt__(self,other):\n if self.age>other.age:\n return True\n return False\n def __abs__(self):\n return abs(self.age)\n def __iadd__(self,other):\n return self.age+other.age\n>>> Nick=Person('Nick',7)\n>>> Angela=Person('Angela',5)\n>>> Nick>Angela\n\"\"\"","sub_path":"Week08/model/ploeg/Geboortedatum.py","file_name":"Geboortedatum.py","file_ext":"py","file_size_in_byte":2294,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"401968110","text":"# coding: utf-8\n\nimport logging, struct, socket, re, os, json\nimport ConfigParser\n\nfrom lycustom import LyRequestHandler\nfrom tornado.web import authenticated, asynchronous\n\nfrom sqlalchemy.sql.expression import asc, desc\n\nfrom app.system.models import LuoYunConfig\nfrom app.system.forms import BaseinfoForm, DBForm, \\\n CLCForm, NameserversForm, NetworkPoolForm, DomainForm, \\\n NginxForm, RegistrationProtocolForm, WelcomeNewUserForm\n\n\nfrom lycustom import has_permission\n\nimport settings\n\n\n\nclass Index(LyRequestHandler):\n\n @has_permission('admin')\n def get(self):\n self.render('system/index.html')\n\n\n\nclass DBEdit(LyRequestHandler):\n\n @has_permission('admin')\n def prepare(self):\n\n self.cf = ConfigParser.ConfigParser()\n self.cf.read( settings.LUOYUN_CONFIG_PATH )\n if not self.cf.has_section('db'):\n self.cf.add_section('db')\n\n def get(self):\n\n cf = self.cf\n\n form = DBForm()\n try:\n form.dbname.data = cf.get('db', 'db_name')\n form.dbuser.data = cf.get('db', 'db_user')\n form.dbpass.data = cf.get('db', 'db_password')\n form.dbhost.data = cf.get('db', 'db_host')\n form.dbtype.data = cf.get('db', 'db_type')\n except:\n pass\n\n self.render('system/db_edit.html', form=form)\n\n\n def post(self):\n\n cf = self.cf\n saved = None\n\n form = DBForm( self.request.arguments )\n if form.validate():\n cf.set('db', 'db_host', form.dbhost.data)\n cf.set('db', 'db_type', form.dbtype.data)\n cf.set('db', 'db_name', form.dbname.data)\n cf.set('db', 'db_user', form.dbuser.data)\n cf.set('db', 'db_password', form.dbpass.data)\n cf.write(open(settings.LUOYUN_CONFIG_PATH, 'w'))\n saved = True\n # TODO: Important ! db settings should check for connect !\n\n self.render('system/db_edit.html', form=form, saved = saved)\n\n\n\nclass CLCEdit(LyRequestHandler):\n\n @has_permission('admin')\n def prepare(self):\n\n self.cf = ConfigParser.ConfigParser()\n self.cf.read( settings.LUOYUN_CONFIG_PATH )\n if not self.cf.has_section('clc'):\n self.cf.add_section('clc')\n\n def get(self):\n\n cf = self.cf\n\n form = CLCForm()\n try:\n form.ip.data = cf.get('clc', 'clc_ip')\n form.port.data = cf.get('clc', 'clc_port')\n except:\n pass\n\n self.render('system/clc_edit.html', form=form)\n\n\n def post(self):\n\n cf = self.cf\n saved = None\n\n form = CLCForm( self.request.arguments )\n if form.validate():\n cf.set('clc', 'clc_ip', form.ip.data)\n cf.set('clc', 'clc_port', form.port.data)\n cf.write(open(settings.LUOYUN_CONFIG_PATH, 'w'))\n saved = True\n\n self.render('system/clc_edit.html', form=form, saved = saved)\n\n\n\nclass BaseinfoEdit(LyRequestHandler):\n\n @has_permission('admin')\n def prepare(self):\n\n self.cf = ConfigParser.ConfigParser()\n self.cf.read( settings.LUOYUN_CONFIG_PATH )\n if not self.cf.has_section('base'):\n self.cf.add_section('base')\n\n\n def get(self):\n\n cf = self.cf\n\n form = BaseinfoForm()\n form.app_dir.data = settings.appliance_top_dir\n form.app_url.data = settings.appliance_top_url\n form.admin_email.data = settings.ADMIN_EMAIL\n\n self.render('system/baseinfo_edit.html', form=form)\n\n\n def post(self):\n\n cf = self.cf\n saved = None\n\n form = BaseinfoForm( self.request.arguments )\n if form.validate():\n\n cf.set('base', 'appliance_top_dir', form.app_dir.data)\n cf.set('base', 'appliance_top_url', form.app_url.data)\n cf.set('base', 'admin_email', form.admin_email.data)\n cf.write(open(settings.LUOYUN_CONFIG_PATH, 'w'))\n saved = True\n\n self.render('system/baseinfo_edit.html', form=form, saved=saved)\n\n\n\n\nclass NameserversEdit(LyRequestHandler):\n\n @has_permission('admin')\n def prepare(self):\n\n self.nameservers = self.db2.query(LuoYunConfig).filter_by( key = 'nameservers' ).first()\n\n\n def get(self):\n\n form = NameserversForm()\n if self.nameservers:\n form.nameservers.data = self.nameservers.value\n\n self.render('system/nameservers_edit.html',\n form = form)\n\n\n\n def post(self):\n\n saved = None\n form = NameserversForm( self.request.arguments )\n if form.validate():\n\n nameservers = form.nameservers.data\n\n if self.nameservers:\n self.nameservers.value = nameservers\n else:\n c = LuoYunConfig('nameservers', nameservers)\n self.db2.add( c )\n\n self.db2.commit()\n saved = True\n\n self.render('system/nameservers_edit.html',\n form = form, saved = saved)\n\n\n\nclass NetworkPool(LyRequestHandler):\n\n @has_permission('admin')\n def get(self):\n d = { 'title': _('Network pool of LuoYun'),\n 'NETWORK_POOL': settings.NETWORK_POOL[0], }\n\n if not d['NETWORK_POOL']:\n url = self.reverse_url('system:networkpool:edit')\n if self.get_argument('ajax', None):\n url += '?ajax=1'\n return self.redirect( url )\n\n from app.system.models import IpAssign\n def get_ipassign(ip):\n return self.db2.query(IpAssign).filter_by( ip = ip ).first()\n \n d['get_ipassign'] = get_ipassign\n\n self.render('system/networkpool.html', **d)\n\n\n\nclass NetworkPoolEdit(LyRequestHandler):\n\n @has_permission('admin')\n def prepare(self):\n\n self.networkpool = self.db2.query(LuoYunConfig).filter_by( key = 'networkpool' ).first()\n self.nameservers = self.db2.query(LuoYunConfig).filter_by( key = 'nameservers' ).first()\n\n\n def get(self):\n\n form = NetworkPoolForm()\n if self.networkpool:\n networkpool = json.loads(self.networkpool.value)\n if len(networkpool) > 0:\n networkpool = networkpool[0]\n form.start.data = networkpool['start']\n form.end.data = networkpool['end']\n form.netmask.data = networkpool['netmask']\n form.gateway.data = networkpool['gateway']\n if networkpool.has_key('nameservers'):\n form.nameservers.data = networkpool['nameservers']\n else:\n nameservers = self.db2.query(LuoYunConfig).filter_by( key = 'nameservers' ).first()\n if nameservers:\n form.nameservers.data = nameservers\n if networkpool.has_key('exclude_ips'):\n form.exclude_ips.data = networkpool['exclude_ips']\n\n self.render('system/networkpool_edit.html',\n form = form)\n\n\n\n def post(self):\n\n saved = None\n form = NetworkPoolForm( self.request.arguments )\n if form.validate():\n\n networkpool = {\n 'start': form.start.data,\n 'end': form.end.data,\n 'netmask': form.netmask.data,\n 'gateway': form.gateway.data,\n 'nameservers': form.nameservers.data,\n 'exclude_ips': form.exclude_ips.data,\n }\n\n\n nameservers = form.nameservers.data\n\n if self.nameservers:\n #self.nameservers.value = nameservers\n pass\n else:\n c = LuoYunConfig('nameservers', nameservers)\n self.db2.add( c )\n\n networkpool = json.dumps( [networkpool, ] )\n if self.networkpool:\n self.networkpool.value = networkpool\n else:\n c = LuoYunConfig('networkpool', networkpool)\n self.db2.add(c)\n\n self.db2.commit()\n from tool.network import set_network_pool\n # set settings.NETWORK_POOL\n set_network_pool(self.db2)\n saved = True\n # TODO: redirect to ?\n url = self.reverse_url('system:networkpool')\n self.redirect( url )\n\n self.render('system/networkpool_edit.html',\n form = form, saved = saved)\n\n\n\nclass DomainEdit(LyRequestHandler):\n\n @has_permission('admin')\n def prepare(self):\n\n self.domain = self.db2.query(LuoYunConfig).filter_by( key = 'domain' ).first()\n\n def get(self):\n\n form = DomainForm()\n if self.domain:\n domain = json.loads(self.domain.value)\n if domain > 0:\n form.topdomain.data = domain['topdomain']\n form.prefix.data = domain['prefix']\n form.suffix.data = domain['suffix']\n\n self.render('system/domain_edit.html', form = form)\n\n\n def post(self):\n\n saved = None\n form = DomainForm( self.request.arguments )\n if form.validate():\n\n domain = json.dumps( {\n 'topdomain': form.topdomain.data,\n 'prefix': form.prefix.data,\n 'suffix': form.suffix.data } )\n\n if self.domain:\n self.domain.value = domain\n else:\n c = LuoYunConfig('domain', domain)\n self.db2.add(c)\n\n self.db2.commit()\n saved = True\n\n self.render('system/domain_edit.html', form = form, saved = saved)\n\n\n\nclass NginxEdit(LyRequestHandler):\n\n @has_permission('admin')\n def prepare(self):\n\n self.nginx = self.db2.query(LuoYunConfig).filter_by(key='nginx').first()\n\n def get(self):\n\n form = NginxForm()\n if self.nginx:\n nginx = json.loads(self.nginx.value)\n else:\n nginx = {}\n\n form.confdir.data = nginx.get(\n 'conf_dir', settings.DEFAULT_NGINX_CONF_PATH )\n form.logdir.data = nginx.get(\n 'log_dir', settings.DEFAULT_NGINX_LOG_PATH )\n form.binpath.data = nginx.get(\n 'bin_path', settings.DEFAULT_NGINX_BIN_PATH )\n\n self.render('system/nginx_edit.html', form = form)\n\n\n def post(self):\n\n saved = None\n form = NginxForm( self.request.arguments )\n if form.validate():\n\n nginx = json.dumps( {\n 'conf_dir': form.confdir.data,\n 'log_dir': form.logdir.data,\n 'bin_path': form.binpath.data } )\n\n if self.nginx:\n self.nginx.value = nginx\n else:\n c = LuoYunConfig('nginx', nginx)\n self.db2.add(c)\n\n self.db2.commit()\n saved = True\n\n self.render('system/nginx_edit.html', form = form, saved = saved)\n\n\n\nfrom markdown import Markdown\nYMK = Markdown(extensions=['fenced_code', 'tables'])\nclass RegistrationProtocolEdit(LyRequestHandler):\n\n @has_permission('admin')\n def prepare(self):\n\n self.protocol = self.db2.query(LuoYunConfig).filter_by(key='protocol').first()\n\n def get(self):\n\n form = RegistrationProtocolForm()\n\n # TODO: needed give a default protocol ?\n if self.protocol:\n protocol = json.loads(self.protocol.value)\n form.text.data = protocol.get('text')\n\n self.render('system/registration_protocol_edit.html', form = form)\n\n\n def post(self):\n\n saved = None\n form = RegistrationProtocolForm( self.request.arguments )\n if form.validate():\n\n protocol = json.dumps({\n 'text': form.text.data,\n 'html': YMK.convert(form.text.data) })\n\n if self.protocol:\n self.protocol.value = protocol\n else:\n c = LuoYunConfig('protocol', protocol)\n self.db2.add(c)\n\n self.db2.commit()\n saved = True\n\n self.render('system/registration_protocol_edit.html', form = form, saved = saved)\n\n\n\nclass WelcomeNewUserEdit(LyRequestHandler):\n\n @has_permission('admin')\n def prepare(self):\n\n self.welcome = self.db2.query(LuoYunConfig).filter_by(key='welcome_new_user').first()\n\n def get(self):\n\n form = WelcomeNewUserForm()\n\n # TODO: needed give a default welcome info ?\n if self.welcome:\n welcome = json.loads(self.welcome.value)\n form.text.data = welcome.get('text')\n\n self.render('system/welcome_new_user_edit.html', form = form)\n\n\n def post(self):\n\n saved = None\n form = WelcomeNewUserForm( self.request.arguments )\n if form.validate():\n\n welcome = json.dumps({\n 'text': form.text.data,\n 'html': YMK.convert(form.text.data) })\n\n if self.welcome:\n self.welcome.value = welcome\n else:\n c = LuoYunConfig('welcome_new_user', welcome)\n self.db2.add(c)\n\n self.db2.commit()\n saved = True\n\n self.render('system/welcome_new_user_edit.html', form = form, saved = saved)\n","sub_path":"lyweb/app/system/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":13118,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"492177428","text":"import pandas as pd\nfrom intermine.webservice import Service\nimport subprocess\nimport numpy as np\nimport csv\nimport Levenshtein\n\nroman_dict = {\n '01':'I',\n '02':'II',\n '03':'III',\n '04':'IV',\n '05':'V',\n '06':'VI',\n '07':'VII',\n '08':'VIII',\n '09':'IX',\n '10':'X',\n '11':'XI',\n '12':'XII',\n '13':'XIII',\n '14':'XIV',\n '15':'XV',\n '16':'XVI',\n 'mt':'mt'\n}\n\nCHROMO_DICT = {\n 'tpg|BK006937.2|': 'chr03',\n 'tpg|BK006938.2|': 'chr04',\n 'tpg|BK006948.2|': 'chr15',\n 'ref|NC_001224|': 'chrmt',\n 'tpg|BK006941.2|': 'chr07',\n 'tpg|BK006944.2|': 'chr11',\n 'tpg|BK006947.3|': 'chr14',\n 'tpg|BK006936.2|': 'chr02',\n 'tpg|BK006939.2|': 'chr05',\n 'tpg|BK006942.2|': 'chr09',\n 'tpg|BK006935.2|': 'chr01',\n 'tpg|BK006940.2|': 'chr06',\n 'tpg|BK006945.2|': 'chr12',\n 'tpg|BK006943.2|': 'chr10',\n 'tpg|BK006949.2|': 'chr16',\n 'tpg|BK006934.2|': 'chr08',\n 'tpg|BK006946.2|': 'chr13',\n}\n\ndef get_gene2(row):\n # returns \"in Gene\" if it is in a gene\n # returns \"near Gene\" if it isn't, and gene is the gene that the insertion is closest to the start of the gene\n if str(row['Gene_ORF']) != 'NA':\n return 'in ' + row['Gene_ORF']\n elif str(row['Gene_ORF.nearby']) != 'NA':\n nearby_list = row['Gene_ORF.nearby'].split('|')\n else:\n return ''\n # if we have a list of nearby genes:\n if len(nearby_list) == 0:\n return 'nearby ' + nearby_list[0]\n else:\n strands = [int(i) for i in row['orf_strand.nearby'].split('|')]\n starts = [int(i) for i in row['start.nearby'].split('|')]\n ends = [int(i) for i in row['end.nearby'].split('|')]\n nearest = None\n nearest_dist = np.inf\n insertion_loc = int(row['insertion_edge'])\n for i in range(len(nearby_list)):\n if strands[i] == -1:\n actual_start = ends[i]\n else:\n actual_start = starts[i]\n if np.abs(actual_start - insertion_loc) < nearest_dist:\n nearest_dist = np.abs(actual_start - insertion_loc)\n nearest = nearby_list[i]\n return 'nearby ' + nearest\n\ndef format_bowtie_row(raw_row):\n tmp_dict = {'bowtie_code': raw_row[1], 'chromosome': CHROMO_DICT.setdefault(raw_row[2], 'NA'), \n 'mapq': raw_row[4], 'cigar_match': raw_row[5]}\n # changes insertion location depending on the orientation of alignment\n if raw_row[1] == '16':\n tmp_dict['insertion_strand'] = '-'\n tmp_dict['insertion_edge'] = int(raw_row[3]) + len(raw_row[0]) - 1\n else:\n tmp_dict['insertion_strand'] = '+'\n tmp_dict['insertion_edge'] = int(raw_row[3])\n return tmp_dict\n\ndef run_bowtie(seqs, bowtie_base_directory):\n with open('tmp.fasta', 'w') as outfile:\n for seq in seqs:\n outfile.write('>' + seq + '\\n' + seq + '\\n')\n subprocess.call(['bowtie2', '-x', bowtie_base_directory, '-U', 'tmp.fasta', '-S', 'tmp_bowtie_output.tsv', '-f', '--local'])\n align_info = dict()\n with open('tmp_bowtie_output.tsv', 'r') as infile:\n reader = csv.reader(infile, delimiter='\\t')\n reading = False\n for row in reader:\n if not reading:\n if row[0] == '@PG':\n reading = True\n else:\n align_info[row[0]] = format_bowtie_row(row)\n return align_info\n\ndef gene_orfer(row):\n if str(row['Gene']) == 'None':\n return row['ORF']\n else:\n return row['Gene']\n\ndef get_all_gene_annotations():\n service = Service(\"https://yeastmine.yeastgenome.org:443/yeastmine/service\")\n query = service.new_query(\"Gene\")\n col_names = [\"briefDescription\", \"description\", \"functionSummary\",\n \"chromosome.primaryIdentifier\", \"secondaryIdentifier\", \"symbol\",\n \"phenotypeSummary\", \"locations.strand\", \"locations.end\", \"locations.start\"]\n query.add_view(col_names)\n seen_orfs = set()\n col_dicts = {c: [] for c in col_names}\n for row in query.rows():\n # for some reason rows are repeated in the yeastmine output, so I deduplicate them here\n if row['secondaryIdentifier'] not in seen_orfs:\n for c in col_names:\n col_dicts[c].append(row[c])\n seen_orfs.add(row['secondaryIdentifier'])\n name_shortener = {\n 'chromosome.primaryIdentifier': 'chromosome',\n 'secondaryIdentifier': 'ORF',\n 'symbol': 'Gene',\n 'locations.start': 'start',\n 'locations.end': 'end',\n 'locations.strand': 'orf_strand'\n }\n td = pd.DataFrame(col_dicts).rename(columns=name_shortener)\n td['Gene_ORF'] = td.apply(lambda row: gene_orfer(row), axis=1)\n return td\n\ndef get_genes_hit(chromo, location, ann_df, buf_range):\n chromo_fix = chromo[:3] + roman_dict.setdefault(chromo[3:], 'NA')\n return ann_df.loc[ann_df['chromosome'] == chromo_fix].loc[(ann_df['start'] - buf_range) < location].loc[(ann_df['end'] + buf_range) > location]\n\ndef align_and_annotate(edge_set, bowtie_path):\n aligned = run_bowtie(edge_set, bowtie_path)\n ann = get_all_gene_annotations()\n alignment_col_names = ['bowtie_code', 'chromosome', 'insertion_edge', 'insertion_strand', 'mapq', 'cigar_match']\n annotation_col_names = ['briefDescription', 'description', 'functionSummary', \n 'Gene_ORF', 'phenotypeSummary', 'orf_strand', 'end', 'start']\n dat_dicts = {col: [] for col in ['Edge'] + alignment_col_names + annotation_col_names + [i + '.nearby' for i in annotation_col_names]}\n for edge in edge_set:\n dat_dicts['Edge'].append(edge)\n tmp_align_dict = aligned[edge]\n annotations = get_genes_hit(tmp_align_dict['chromosome'], tmp_align_dict['insertion_edge'], ann, 0)\n ann_nearby = get_genes_hit(tmp_align_dict['chromosome'], tmp_align_dict['insertion_edge'], ann, 500)\n for col in alignment_col_names:\n dat_dicts[col].append(tmp_align_dict[col])\n for col in annotation_col_names:\n if len(annotations) > 0:\n dat_dicts[col].append('|'.join([str(i) for i in annotations[col]]))\n else:\n dat_dicts[col].append('NA' )\n for col in annotation_col_names:\n if len(ann_nearby) > 0:\n dat_dicts[col + '.nearby'].append('|'.join([str(i) for i in ann_nearby[col]]))\n else:\n dat_dicts[col + '.nearby'].append('NA' )\n return pd.DataFrame(dat_dicts)\n \ncontrol_edges = set(['CTAAGTGTGAAGGAGTTGTCTTCTTGCGCT', 'CTGATTTGTGCTGTCTTAGGACCCTCTGAA', 'GCTGCTTATGAGGATATGGATTTAGAGCTA'])\nall_edges = set(pd.read_csv('../../BT_Bioinformatic_Work/BT1_output/BT_BC_Assoc/BT_BC_Assoc_filtered_clusters.csv')['Edge'])\nall_edges.update(control_edges)\nann_edges = align_and_annotate(all_edges, '../accessory_files/bowtie_build_S288C/S288C')\ntncs = list(pd.read_csv('TnCS_Final_Rearray_Data.csv')['Edge.Bases'].str[:30])\nann_edges['Expected.From.TnCS'] = ann_edges['Edge'].isin(tncs)\ntp_d = pd.read_csv('../../BT_Bioinformatic_Work/TP_output/TP_BC_Assoc/TP_CS_BC_Assoc_filtered_clusters.csv')[['Edge', 'Total.Counts']].groupby('Edge', as_index=False).sum()\ntp = list(tp_d.loc[tp_d['Total.Counts'] > 50000]['Edge'])\nann_edges['In.TP'] = ann_edges['Edge'].str[:15].isin(tp)\nann_edges['TP.Control'] = ann_edges['Edge'].isin(control_edges)\ntn96_rearray = pd.read_csv('../accessory_files/Tn96_edges_chosen_final.csv')\ntp_chosen = list(tn96_rearray['Edge']) + [i for i in control_edges]\ntp_ref = list(tn96_rearray.loc[tn96_rearray['Is.Reference']]['Edge'])\nann_edges['Expected.In.TP'] = ann_edges['Edge'].isin(tp_chosen)\nann_edges['TP.Reference'] = ann_edges['Edge'].isin(tp_ref)\nann_edges['Gene.Use'] = ann_edges.apply(lambda row: get_gene2(row), axis=1)\nann_edges['Edge.ID'] = [i for i in range(len(ann_edges))]\nann_edges.to_csv('../../Mutation_Annotation/Edges_annotated.csv', index=False)\n","sub_path":"scripts/.ipynb_checkpoints/annotate-checkpoint.py","file_name":"annotate-checkpoint.py","file_ext":"py","file_size_in_byte":7877,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"317440836","text":"#!/usr/bin/env python\n\nimport copy\nimport logging\nimport time\n\nfrom switcher.utils import timeconverter2seconds\n\n\nclass TimeInterval(object):\n \"\"\"\n class to manipulate time intervals\n \"\"\"\n def __init__(self, start_t, end_t, extend_sec=0):\n \"\"\"\n :param int start_t: starting time in seconds since epoch\n :param int end_t: starting time in seconds since epoch\n :param int extend_sec: number of seconds to advance the start_t\n \"\"\"\n self.log = logging.getLogger('timeinterval')\n self.log.debug('TimeInterval object with inputs %s, %s.' %(start_t, end_t))\n self.original_start_t = start_t # we keep record, for extended intervals\n self.start_t = start_t - extend_sec\n self.end_t = end_t\n self.log.debug('TimeInterval object initialized.')\n \n\n def belongs(self, t_epoch=None):\n \"\"\"\n returns if time t_epoch, \n in seconds since epoch, in between the \n start time and end time of this interval\n :param int t_epoch: time in seconds since epoch.\n If no value is passed, \n current time will be calculated.\n :return boolean:\n \"\"\"\n self.log.debug('Starting for time %s.' %t_epoch)\n if not t_epoch:\n t_epoch = int(time.time()) # now\n out = t_epoch >= self.start_t and\\\n t_epoch < self.end_t\n self.log.debug('Leaving, returning %s.' %out)\n return out\n\n\n def extend(self, extend_sec):\n \"\"\"\n returns a new TimeInterval object where the start_t\n has moved to take into account when the downtime starts having\n real impact\n :param int extend_sec: number of seconds to advance the start time\n :return TimeInterval:\n \"\"\"\n self.log.debug('Starting with extend_sec %s.' %extend_sec)\n out = TimeInterval(self.start_t, self.end_t, extend_sec)\n self.log.debug('Leaving, returing %s.' %out)\n return out\n\n \n def overlap(self, other):\n \"\"\"\n returns a new TimeInterval object with only the overlapping time intevals.\n :param TimeInterval other: another TimeInterval object\n :return TimeInterval or None:\n \"\"\"\n self.log.debug('Starting with other %s.' %other)\n max_start_t = max(self.start_t, other.start_t)\n min_end_t = min(self.end_t, other.end_t)\n if max_start_t >= min_end_t:\n self.log.info('Original itervals do not overlap.')\n return None\n else:\n out = TimeInterval(max_start_t, min_end_t)\n self.log.info('Leaving, returning %s.' %out)\n return out\n\n\n def expired(self):\n \"\"\"\n check if the end time of this TimeInterval\n is already in the past\n :return bool:\n \"\"\"\n self.log.debug('Starting.')\n now = int(time.time())\n out = now <= self.end_t\n self.log.debug('Leaving, returning %s.' %out)\n return out\n\n\n def shorter_than(self, seconds):\n \"\"\"\n checks if the TimeInterval is shorter than \n a given amount of time\n :param int seconds: numbrer of seconds to compare with\n :return bool:\n \"\"\"\n self.log.debug('Starting.')\n out = (self.end_t - self.start_t) < seconds\n self.log.debug('Leaving, returning %s.' %out)\n return out\n\n\n\n# ===================================================================\n\nclass TimeIntervalCollection(object):\n \"\"\"\n container for a list of TimeInterval objects.\n For example, to handle together all TimeIntervals for a given CE object.\n \"\"\"\n def __init__(self, t_interval_l):\n \"\"\"\n :param list t_interval_l: a list of TimeInterval objects\n \"\"\"\n self.t_interval_l = t_interval_l\n\n\n\n# ===================================================================\n# \n# utils to manage multiple TimeIntervals at once\n# \n# ===================================================================\n\ndef overlap2collections(t_intervalcollection_1, t_intervalcollection_2):\n \"\"\"\n given 2 objects TimeIntervalCollection, \n returns another TimeIntervalCollection with the \n overlappings\n \"\"\"\n new_t_interval_l = []\n for i in t_intervalcollection_1.t_interval_l:\n for j in t_intervalcollection_2.t_interval_l:\n new_t_interval = i.overlap(j)\n if new_t_interval:\n new_t_interval_l.append(new_t_interval)\n return TimeIntervalCollection(new_t_interval_l)\n\n\ndef overlapNcollections(*t_intervalcollection_l):\n \"\"\"\n given a list of TimeIntervalCollection objects, \n returns a new TimeIntervalCollection with the overall overlappings \n \"\"\"\n return reduce(overlap2collections, t_intervalcollection_l)\n \n\n\n\n\n\n\n\n\n\n\n\n###\n### sorting TimeInterval events by time may become helpful in the future\n### for performance reasons\n### but for now I am not using it\n###\n### # def __eq__(self, other):\n### # return self.start_t == other.start_t and\\\n### # self.end_t == other.end_t and\\\n### # self.type == other.type and\\\n### # self.endpoint == other.endpoint\n### \n### def __eq__(self, other):\n### return self.start_t == other.start_t and\\\n### self.end_t == other.end_t\n### \n### \n### def __ne__(self, other):\n### return not self.__eq__(other)\n### \n### \n### def __lt__(self, other):\n### return (self.start_t < other.start_t) or\\\n### (self.start_t == other.start_t and\\\n### self.end_t < other.end_t)\n### \n### \n### def __le__(self, other):\n### return self.__lt__(other) or self.__eq__(other)\n### \n### \n### def __gt__(self, other):\n### return not self.__le__(other)\n### \n### \n### def __ge__(self, other):\n### return self.__gt__(other) or self.__eq__(other)\n","sub_path":"attic/timeinterval.py","file_name":"timeinterval.py","file_ext":"py","file_size_in_byte":6048,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"6407423","text":"import os\nimport re\nimport copy\nimport datetime\nimport subprocess\nimport logging\nimport unicodedata\nfrom importlib.machinery import SourceFileLoader\n\nlogger = logging.getLogger(__name__)\n\n\ndef reduce_float(f):\n \"\"\"\n Reduce a float to only 1 decimal place\n\n args:\n - float\n returns:\n float\n \"\"\"\n\n return float(\"%0.1f\" % f)\n\n\ndef convert_celsius(val, reduce=True):\n \"\"\"\n convert celsius to fahrenheit\n\n args:\n - int or float\n - bool: Reduce/round floats to only 1 decimal place? (default=True)\n\n returns:\n a float\n \"\"\"\n\n f = float(val) * 1.8 + 32\n if reduce:\n return reduce_float(f)\n else:\n return f\n\n\ndef convert_fahrenheit(val, reduce=True):\n \"\"\"\n convert fahrenheit to celsius\n\n args:\n - int or float\n - bool: Reduce/round floats to only 1 decimal place? (default=True)\n\n returns:\n a float\n \"\"\"\n\n c = (float(val) - 32) / 1.8\n if reduce:\n return reduce_float(c)\n else:\n return c\n\n\ndef datetime_for_js(obj):\n \"\"\"\n Convert a datetime object to format that javascript can handle.\n\n args:\n - datetime object\n returns:\n str\n raises:\n TypeError if arg is not a DateTime\n \"\"\"\n\n if hasattr(obj, 'isoformat'):\n obj = obj.replace(tzinfo=datetime.timezone.utc)\n return obj.isoformat()\n else:\n raise TypeError(\"Expected a Datetime. Got {}\".format(type(obj)))\n\n\ndef rehydrate_plugin_instance(parent_class, class_name, options):\n \"\"\"\n rehydrate an instance of a plugin based on JSON data\n\n as I document this, I wonder if it might be better to pickle the objects and\n store them in the db that way? I will consider this...\n\n args:\n - the parent class the child instance inherits from. Usually either\n ActionPluginBase or BlePluginBase.\n - the class name of the child we are rehydrating\n - init options for the child instance\n returns:\n an instance of a class, or None if instance could not be created.\n raises:\n none\n \"\"\"\n for plugin in parent_class.plugins:\n if plugin.__name__ == class_name:\n obj = plugin(**options)\n return obj\n\n return None\n\n\ndef subprocess_cmd(cmd, shell=False):\n \"\"\"\n run a system command as a subprocess\n\n args:\n - list (commands, like ['ls','-la'])\n - bool, Run with shell.\n If set to true, the command list will be flattened to a string.\n returns:\n tuple (exit-code, stdout, stderr)\n raises:\n none\n \"\"\"\n\n # cast all values to str, because ints will cause errors\n if type(cmd) is list:\n cmd = [str(c) for c in cmd]\n\n # with shell=True, we flatten commands to a str\n if shell is True and type(cmd) is list:\n cmd = \" \".join(cmd)\n\n child = subprocess.Popen(cmd, shell=shell,\n stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n\n output, errors = child.communicate()\n return (child.returncode, output, errors)\n\n\ndef load_plugins(path):\n \"\"\"\n Load any plugin files from the named path.\n\n Yes, arbitrarily loading source files found lying around in filesystem is\n a risky thing. But, our ecosystem is quite closed so, -meh-\n\n \"Strange women lying in ponds distributing swords is no basis for a\n system of government!\"\n\n args:\n - str (a folder path)\n returns:\n none\n raises:\n none\n \"\"\"\n if not path:\n raise ValueError(\"path required for plugin loader\")\n\n if not os.path.exists(path):\n raise RuntimeError(\"path {} not found\".format(path))\n\n files = [f for f in os.listdir(path)\n if f.endswith('.py') and f not in ['__init__.py']]\n\n for f in files:\n mod = f.split(\".\")[0]\n fname = os.path.join(path, f)\n try:\n result = SourceFileLoader(mod, fname).load_module()\n except Exception:\n logger.exception(\"failed to load '{}'\".format(fname))\n\n return\n\n\ndef blescan_devices():\n \"\"\"\n Scan for all BLE devices.\n\n args:\n none\n returns:\n list (like: [{'name': NAME, 'address': ADDRESS}, ])\n \"\"\"\n\n logger.debug(\"scanning BLE devices\")\n command = ['sudo', 'blescan', '-a']\n rval, output, errors = subprocess_cmd(command, False)\n if not rval:\n buf = clean_blescan_output(output)\n return parse_blescan_buffer(buf)\n else:\n logger.warning(\"blescan_devices(): {}\".format(errors))\n return None\n\n\ndef clean_blescan_output(buf):\n \"\"\"\n Clean escape chars and hex chars from the output of the blescan cmd.\n\n args:\n - str (a block of binary-like text)\n return:\n list\n \"\"\"\n\n results = []\n for line in buf.splitlines():\n ansi_escape = re.compile(r'(\\x9B|\\x1B\\[)[0-?]*[ -/]*[@-~]')\n clean = line.decode(\"utf-8\")\n clean = ansi_escape.sub('', clean)\n results.append(clean)\n\n return results\n\n\ndef parse_blescan_buffer(data):\n \"\"\"\n parse the blescan stdout buffer and extract valid device names/addresses\n\n args:\n - list (*sanitized* lines from the output of the blescan command)\n returns:\n list (like [{'address': ADDRESS, 'name': NAME}, ])\n \"\"\"\n\n devices = []\n buf = {}\n for line in data:\n if re.search(r'^\\s*Device', line):\n if buf:\n devices.append(copy.deepcopy(buf))\n buf = {}\n\n match = re.search(r'(?P
\\w{2}:\\w{2}:\\w{2}:\\w{2}:\\w{2}:\\w{2})', line)\n if match:\n buf['address'] = match.group('address')\n continue\n\n match = re.search(r\"^\\s*Complete Local Name: '(?P.+)'\", line)\n if match:\n buf['name'] = match.group('name')\n\n # one last check of the buffer\n if buf:\n devices.append(copy.deepcopy(buf))\n\n return devices\n\n\ndef eval_condition(val, stmt):\n \"\"\"\n Evaluate a conditional str statement against a value.\n like;\n (22, \"value gt 80\") # False\n\n args:\n - float (our reference value)\n - str (a conditional statement, like \"value lt 100\")\n returns:\n bool\n raises:\n none\n \"\"\"\n\n atoms = re.split(r'\\s+', stmt)\n if atoms[0] == 'value':\n atoms = atoms[1:]\n\n oper, threshold = atoms\n threshold = float(threshold)\n\n if oper == 'lt' and val < threshold:\n return True\n elif oper == 'le' and val <= threshold:\n return True\n elif oper == 'gt' and val > threshold:\n return True\n elif oper == 'ge' and val >= threshold:\n return True\n elif oper == 'eq' and val == threshold:\n return True\n elif oper == 'ne' and val != threshold:\n return True\n\n return False\n","sub_path":"potnanny_core/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":6746,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"515011227","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nZhangYu Editor\r\n\r\ncrawler3:中国大学排名\r\n\"\"\"\r\n\r\nimport requests\r\nfrom bs4 import BeautifulSoup\r\nfrom bs4 import element\r\n\r\ndef getHTMLText(url):\r\n '''\r\n 根据url获取htmls\r\n '''\r\n try:\r\n r = requests.get(url, timeout=30)\r\n r.raise_for_status()\r\n r.encoding = r.apparent_encoding\r\n return r.text \r\n except:\r\n return ''\r\n\r\ndef fillUinfoList(uinfo, htmls):\r\n '''\r\n 从htmls中提取大学信息,放入uinfo\r\n '''\r\n soup = BeautifulSoup(htmls, 'html.parser')\r\n for tr in soup.find('tbody').children:\r\n if isinstance(tr, element.Tag):\r\n tds = tr('td')\r\n uinfo.append([tds[0].string, tds[1].string, tds[3].string])\r\n\r\ndef printUinfoList(uinfo, num):\r\n '''\r\n 输出uinfo的前num行\r\n '''\r\n tplt1 = \"{0:^10}\\t{1:{3}^8}\\t{2:^9}\"\r\n tplt2 = \"{0:^10}\\t{1:{3}^10}\\t{2:^10}\"\r\n print(tplt1.format(\"排名\",\"学校名称\",\"总分\",chr(12288)))\r\n for i in range(num):\r\n u = uinfo[i]\r\n print(tplt2.format(u[0],u[1],u[2],chr(12288)))\r\n \r\ndef main():\r\n uinfo = []\r\n url = 'http://www.zuihaodaxue.cn/zuihaodaxuepaiming2016.html'\r\n htmls = getHTMLText(url)\r\n fillUinfoList(uinfo, htmls)\r\n printUinfoList(uinfo, 20)\r\n\r\nmain()","sub_path":"crawler3.py","file_name":"crawler3.py","file_ext":"py","file_size_in_byte":1311,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"8170177","text":"import os\r\nclass config:\r\n def __init__(self,mode='conv',nfilt=26,mfcc=13,nfft=512,rate=16000):\r\n self.mode=mode\r\n self.nfilt=nfilt\r\n self.mfcc=mfcc\r\n self.nfft=nfft\r\n self.rate=rate\r\n self.step=int(rate/10)\r\n self.model_path=os.path.join('model',mode +'.model')\r\n #self.p_path=os.path.join('pickle',mode +'.pkl')\r\n ","sub_path":"cfg.py","file_name":"cfg.py","file_ext":"py","file_size_in_byte":382,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"289242271","text":"from my_web.models import *\n\n\ndef CreateBaseInfo(data, cards):\n \"\"\"\n 开始游戏时调用一次作为一局的初始信息记录\n :param data: 网页前端传进的json,主要包含位置信息\n :param cards: 九张已抽好的牌\n :return: none\n \"\"\"\n base_info = BaseInfo(\n my_seat=data[\"seat\"],\n my_card1=cards[0],\n my_card2=cards[1],\n opp_card1=cards[2],\n opp_card2=cards[3],\n flop_card1=cards[4],\n flop_card2=cards[5],\n flop_card3=cards[6],\n turn_card=cards[7],\n river_card=cards[8]\n )\n\n base_info.save()\n","sub_path":"djangoProject/DataManager/BaseInfo.py","file_name":"BaseInfo.py","file_ext":"py","file_size_in_byte":609,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"339067260","text":"\nimport torch\nimport tqdm\nimport utils\nimport collections\n\ntorch.random.manual_seed(0)\n\n\nclass Trainer:\n\n def __init__(self,\n model,\n dataloader_train,\n dataloader_test,\n batch_size,\n loss_function,\n optimizer):\n self.dataloader_train = dataloader_train\n self.dataloader_test = dataloader_test\n self.batch_size = batch_size\n\n self.model = model\n self.loss_function = loss_function\n self.optimizer = optimizer\n\n def train(self, num_epochs):\n tracked_train_loss = collections.OrderedDict()\n tracked_test_loss = collections.OrderedDict()\n global_step = 0\n for epoch in range(num_epochs):\n avg_loss = 0\n for batch_it, (images, target) in enumerate(\n tqdm.tqdm(self.dataloader_train,\n desc=f\"Training epoch {epoch}\")):\n # images has shape: [batch_size, 1, 28, 28]\n # target has shape: [batch_size]\n # Transfer batch to GPU VRAM if a GPU is available.\n images, target = utils.to_cuda([images, target])\n # Perform forward pass\n logits = self.model(images)\n\n # Compute loss\n loss = self.loss_function(logits, target)\n\n avg_loss += loss.cpu().detach().item()\n # Perform backpropagation\n loss.backward()\n\n # Update our parameters with gradient descent\n self.optimizer.step()\n\n # Reset our model parameter gradients to 0\n self.optimizer.zero_grad()\n\n # Track the average loss for every 500th image\n if batch_it % (500//self.batch_size) == 0 and batch_it != 0:\n avg_loss /= (500//self.batch_size)\n tracked_train_loss[global_step] = avg_loss\n avg_loss = 0\n global_step += self.batch_size\n # Compute loss and accuracy on the test set\n test_loss, val_acc = utils.compute_loss_and_accuracy(\n self.dataloader_test, self.model, self.loss_function\n )\n tracked_test_loss[global_step] = test_loss\n return tracked_train_loss, tracked_test_loss\n\n def save_model(self, savepath):\n torch.save(savepath, self.model.state_dict())\n\n def load_model(self, model_path):\n state_dict = torch.load(model_path)\n self.model.load_state_dict(state_dict)","sub_path":"assignment1/trainer.py","file_name":"trainer.py","file_ext":"py","file_size_in_byte":2577,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"316502085","text":"LEFT = 0\r\nTOP = 1\r\nRIGHT = 2\r\nBOTTOM = 3\r\nAREA = 4\r\nPAGE = 5\r\nTEXT = 6\r\nHEIGHT = 7\r\nWIDTH = 8\r\nID = 9\r\nMATCH_ID = 10\r\nMATCH_MSE = 11\r\nHEADER = [\"left\", \"top\", \"right\", \"bottom\", \"area\", \"page\", \"text\", \"height\", \"width\", \"ID\", \"matchID\",\"match_MSE\"]\r\n\r\nROW = 0\r\nCOLUMN = 1\r\n\r\nPAGE_WEIGHTING = 100","sub_path":"ocr_constants.py","file_name":"ocr_constants.py","file_ext":"py","file_size_in_byte":296,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"164724744","text":"# Author: Matthew Armstrong\n# Date: 6/22/21\n# Description: Write a program that asks the user for five numbers and then prints out the average of those numbers.\nprint(\"Please enter five numbers.\")\nnum_1 = float(input())\nnum_2 = float(input())\nnum_3 = float(input())\nnum_4 = float(input())\nnum_5 = float(input())\nsum_result = num_1 + num_2 + num_3 + num_4 + num_5\navg_result = sum_result / 5\nprint(\"The average of those numbers is:\")\nprint(avg_result)\n","sub_path":"Project 2/average.py","file_name":"average.py","file_ext":"py","file_size_in_byte":451,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"356996501","text":"import requests\r\nimport bs4\r\n\r\nurl = \"https://www.cointracker.io/price\"\r\nreq = requests.get(url)\r\nscrap = bs4.BeautifulSoup(req.text, 'html.parser')\r\n\r\n#getting list of individual sites to get price data\r\nindividual_sites = [tag['href'] for tag in scrap.find_all('a', {'class':\"d-flex no-underline\"})]\r\n\r\nfor i in range(len(individual_sites)):\r\n\t#appending the string for each \"i\" to \"https://www.cointracker.io\".\r\n\turl2 = \"https://www.cointracker.io\" + individual_sites[i]\r\n\treq2 = requests.get(url2)\r\n\tscrap2 = bs4.BeautifulSoup(req2.text, 'html.parser')\r\n\r\n\t#getting the cryptocurreny name from the string\r\n\tcrypto_name = individual_sites[i][7:]\r\n\r\n\t#getting the data where the price is present and converting it to string\r\n\tfindClass = scrap2.find('div', {'class':'my-auto h4'})\r\n\tfindClassStr = str(findClass)\r\n\t\r\n\t#finding index from which price of cryptocurrency starts\r\n\tidx = findClassStr.find(\"data-price-container-symbol=\")\r\n\tidx = findClassStr.find(\">\", idx, len(findClassStr))\r\n\tidx = idx+1\r\n\tprice_str = \"\"\r\n\r\n\t#looping till I get the complete price as a string\r\n\twhile(findClassStr[idx]!=\"<\"):\r\n\t\tprice_str = price_str + findClassStr[idx]\r\n\t\tidx = idx+1\r\n\tprint(\"crptocurrency : \" + crypto_name + \" and 1 \" + crypto_name + \" = \" + price_str)\r\n\t\r\n\r\n","sub_path":"WebScrapingScripts/crypto_price_checker/crypto_price_checker.py","file_name":"crypto_price_checker.py","file_ext":"py","file_size_in_byte":1263,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"364667960","text":"import unittest\nfrom Paginator import Paginator\nfrom Response import Response\nimport requests\nclass TestPaginator(unittest.TestCase):\n def test_Paginate(self):\n response = Response()\n paginator = Paginator(response)\n \n # GitHubの全ユーザを取得する\n url = 'https://api.github.com/users'\n limit = 2 # 2ページ分までしか取得しない\n kwargs = {'headers': {'Time-Zone': 'Asia/Tokyo', 'Accept': 'application/vnd.github.v3+json', 'User-Agent': ''}}\n res = paginator.Paginate(url, limit=limit)\n self.assertTrue(1 < len(res))\n self.assertEqual((30*limit), len(res)) # 1ページ30件(per_page=30)がデフォルト値のはず https://developer.github.com/v3/#pagination\n \n","sub_path":"TestPaginator.py","file_name":"TestPaginator.py","file_ext":"py","file_size_in_byte":765,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"239549159","text":"import http\nimport mimetypes\nimport re\nfrom html import escape\nfrom http.cookies import SimpleCookie\nfrom pathlib import Path\nfrom typing import Any\nfrom typing import Callable\nfrom typing import Dict\nfrom typing import Optional\nfrom typing import Tuple\nfrom urllib.parse import parse_qs\n\nfrom framework import settings\nfrom framework.consts import DIR_STATIC\nfrom framework.consts import METHODS_WITH_REQUEST_BODY\nfrom framework.consts import USER_COOKIE\nfrom framework.consts import USER_TTL\nfrom framework.db import find_user\nfrom framework.errors import NotFound\nfrom framework.types import RequestT\nfrom framework.types import StaticT\nfrom framework.types import UserT\n\nHeaderTupleT = Tuple[str, Any]\n\n\ndef http_first(value: HeaderTupleT) -> Tuple[int, HeaderTupleT]:\n \"\"\"\n Key function for `sorted`.\n Orders a header tuple such that\n the header which starts with HTTP_ will be assigned a value 0,\n meaning that the tuple will be the first\n when under sorting by function `sorted`, or kind of.\n\n :param value: header tuple (name, value)\n :return: ordered header (int, (name, value))\n \"\"\"\n\n order = 0 if value[0].startswith(\"HTTP\") else 1\n return order, value\n\n\ndef format_env_var(name: str, value: str) -> str:\n \"\"\"\n Formats environment variable value.\n Formatter is chosen according to the kind of variable.\n\n :param name: name of environment variable\n :param value: value of environment variable\n :return: string representation of value in appropriate format\n \"\"\"\n\n formatter = get_formatter(name)\n new = str(value)\n new = formatter(new)\n new = escape(new)\n new = re.sub(\"\\n\", \"
\", new)\n\n return new\n\n\ndef get_formatter(env_var_name: str) -> Callable[[str], str]:\n \"\"\"\n Returns a formatter function for given environment variablel.\n\n :param env_var_name: name of environment variable\n :return: formatting function\n \"\"\"\n\n if env_var_name.endswith(\"PATH\"):\n return lambda _value: \"\\n\".join(_value.split(\":\"))\n\n if \"ACCEPT\" in env_var_name:\n return lambda _v: \"\\n\".join(re.split(r\"[\\s,]+\", _v))\n\n return lambda _v: _v\n\n\ndef read_static(file_name: str) -> StaticT:\n \"\"\"\n Reads the content of the static file from given path.\n If file name is in relative form, reads data from static dir.\n If file name is in absolute form, reads data from the file itself.\n\n :param file_name: path to the file\n :return: content and content type\n \"\"\"\n\n if file_name.startswith(\"/\"): # TODO: enhance to support Windows\n file_obj = Path(file_name).resolve()\n else:\n file_obj = (DIR_STATIC / file_name).resolve()\n\n if not file_obj.exists():\n raise NotFound\n\n with file_obj.open(\"rb\") as fp:\n content = fp.read()\n\n content_type = mimetypes.guess_type(file_name)[0]\n\n return StaticT(content=content, content_type=content_type)\n\n\ndef get_request_headers(environ: dict) -> dict:\n \"\"\"\n Returns request headers from WSGI environment.\n Headers names are adopted: \"HTTP_\" part is cut of.\n\n :param environ: WSGI environ\n :return: dict with HTTP headers\n \"\"\"\n\n environ_headers = filter(lambda _kv: _kv[0].startswith(\"HTTP_\"), environ.items())\n request_headers = {key[5:]: value for key, value in environ_headers}\n return request_headers\n\n\ndef get_request_query(environ: dict) -> dict:\n \"\"\"\n Returns parsed query from WSGI environ.\n Returns empty dict if no query provided in HTTP request.\n\n :param environ: WSGI environ\n :return: dict with query params\n \"\"\"\n\n qs = environ.get(\"QUERY_STRING\")\n query = parse_qs(qs or \"\")\n return query\n\n\ndef build_status(code: int) -> str:\n \"\"\"\n Builds a string with HTTP status code and reason for given code.\n\n :param code: integer HTTP code\n :return: string with code and reason\n \"\"\"\n\n status = http.HTTPStatus(code)\n\n def _process_word(_word: str) -> str:\n if _word == \"OK\":\n return _word\n return _word.capitalize()\n\n reason = \" \".join(_process_word(word) for word in status.name.split(\"_\"))\n\n text = f\"{code} {reason}\"\n return text\n\n\ndef build_form_data(body: bytes) -> Dict[str, Any]:\n \"\"\"\n Builds a dict with form names&values against request body.\n Returns empty dict if request body is empty.\n\n :param body: HTTP request body\n :return: dict with form data\n \"\"\"\n\n if not body:\n return {}\n\n qs = body.decode()\n form_data = parse_qs(qs or \"\")\n return form_data\n\n\ndef get_request_body(environ: dict) -> Optional[bytes]:\n \"\"\"\n Returns a HTTP request body against given WSGI environment.\n Returns None if it is impossible to obtain a request body.\n\n :param environ: WSGI environment\n :return: bytes of HTTP request body or None\n \"\"\"\n\n method = get_request_method(environ)\n if method not in METHODS_WITH_REQUEST_BODY:\n return None\n\n fp = environ.get(\"wsgi.input\")\n if not fp:\n return None\n\n content_length = int(environ.get(\"CONTENT_LENGTH\") or 0)\n if not content_length:\n return None\n\n content = fp.read(content_length)\n\n return content\n\n\ndef get_request_method(environ: dict) -> str:\n \"\"\"\n Returns a method name of HTTP request.\n\n :param environ: WSGI environment\n :return: HTTP method name\n \"\"\"\n\n method = environ[\"REQUEST_METHOD\"]\n return method\n\n\ndef get_request_path(environ: dict) -> str:\n \"\"\"\n Returns a path part of the HTTP request.\n\n :param environ: WSGI environment\n :return: request path\n \"\"\"\n\n path = environ[\"PATH_INFO\"]\n return path\n\n\ndef build_cookies(headers: Dict) -> SimpleCookie:\n \"\"\"\n Builds a cookies jar against given headers.\n\n :param headers: dict with HTTP request headers\n :return: SimpleCookie jar\n \"\"\"\n\n cookies = SimpleCookie(headers.get(\"COOKIE\", \"\"))\n return cookies\n\n\ndef authenticate(request: RequestT) -> None:\n \"\"\"\n Sets an user up in the given request.\n\n :param request: HTTP request\n \"\"\"\n\n request.user = UserT()\n\n if USER_COOKIE not in request.cookies:\n return\n\n user_id = request.cookies[USER_COOKIE].value\n user = find_user(user_id)\n request.user = user\n\n\ndef _build_session_header_generic(user_id, max_age):\n \"\"\"\n Builds a generic header value for user session.\n\n :param user_id: user ID\n :param max_age: Max-Age value of cookie\n :return: session header\n \"\"\"\n\n jar = SimpleCookie()\n\n jar[USER_COOKIE] = user_id\n\n cookie = jar[USER_COOKIE]\n cookie[\"Domain\"] = settings.HOST\n cookie[\"HttpOnly\"] = True\n cookie[\"Max-Age\"] = max_age\n cookie[\"Path\"] = \"/\"\n\n header = jar.output(header=\"\").strip()\n return header\n\n\ndef build_session_header(user_id: str) -> str:\n \"\"\"\n Builds a header value for user session.\n\n :param user_id: user ID\n :return: session header\n \"\"\"\n\n return _build_session_header_generic(user_id, USER_TTL.total_seconds())\n\n\ndef build_reset_session_header(user_id: str) -> str:\n \"\"\"\n Builds a reset header value for user session.\n\n :param user_id: user ID\n :return: session header\n \"\"\"\n\n return _build_session_header_generic(user_id, 0)\n\n\ndef host_is_local(host: str) -> bool:\n \"\"\"\n Tells whether given host is local.\n\n :param host: host name or address\n :return: True if host is local otherwise False\n \"\"\"\n\n local_names = {\n \"localhost\",\n \"127.0.0.1\",\n }\n\n is_local = any(local_name in host for local_name in local_names)\n return is_local\n\n\ndef build_absolute_url(resource: str, **kwargs: dict) -> str:\n \"\"\"\n Builds an absolute URL to given resource according to the active host.\n\n :param resource: name of the resource (path)\n :param kwargs: kwargs part of the URL\n :return: an absolute URL\n \"\"\"\n\n port = settings.PORT\n if port in (80, 443):\n port = \"\"\n else:\n port = f\":{port}\"\n\n host = settings.HOST\n\n schema = \"http\" if host_is_local(host) else \"https\"\n\n if resource.startswith(\"/\"):\n resource = resource[1:]\n\n url = f\"{schema}://{host}{port}/{resource}\"\n if kwargs:\n url = url.format(**kwargs)\n\n return url\n","sub_path":"src/framework/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":8154,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"255453198","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('Select', '0004_user_team_name'),\n ]\n\n operations = [\n migrations.AddField(\n model_name='game_pick',\n name='won',\n field=models.CharField(default=b'P', max_length=1),\n preserve_default=True,\n ),\n ]\n","sub_path":"Select/migrations/0005_game_pick_won.py","file_name":"0005_game_pick_won.py","file_ext":"py","file_size_in_byte":443,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"379767784","text":"#!/usr/bin/env python\n#-*- coding=utf-8 -*-\n\"\"\"\n@author:Wllen\n@file:shopping_cart.py\n@time:2018/5/4 22:39\n\"\"\"\nproducts = [ ['Iphone8',6888],['MacPro',14800], ['小米6',2499],['Coffee',31],['Book',80],['Nike Shoes',799] ]\nshopping_cart = []\nexit_flag = False #标志位\nwhile not exit_flag:\n print(\"---------商品列表----------\")\n for index,p in enumerate(products):\n print(\"%s. %s %s\"%(index, p[0], p[1])) # 格式化输出\n choice = input(\"请输入想买的商品编号:\")\n if choice.isdigit(): # 判断输入的是否是数字\n choice = int(choice)\n if choice < len(products) and choice >= 0:\n shopping_cart.append(products[choice])\n print(\"已将您选中的商品 %s 加入购物车中\"%(products[choice]))\n else:\n print(\"对不起,没有此商品!\")\n elif choice == \"q\" or choice == \"Q\":\n if len(shopping_cart) > 0:\n print(\"-------您已经购买以下商品------\")\n for index,p in enumerate(shopping_cart):\n print(\"%s. %s %s\" % (index, p[0], p[1]))\n exit_flag = True\n else:\n print(\"您的输入有误,请重新输入商品编号\")","sub_path":"learning/shopping_cart.py","file_name":"shopping_cart.py","file_ext":"py","file_size_in_byte":1196,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"30068772","text":"from Common import inputMatrix\n\ndef getBoundary(arr):\n m = len(arr)\n n = len(arr[0])\n res = []\n last = []\n for i in range(m):\n if i == 0:\n res.extend(arr[i])\n elif i == m - 1:\n res.extend(arr[i][::-1])\n else:\n res.append(arr[i][-1])\n if n > 1:\n last.insert(0, arr[i][0])\n res.extend(last)\n return res\n\narr = inputMatrix()\nres = getBoundary(arr)\nfor el in res:\n print(el, end=' ')\n","sub_path":"Matrix/Boundary of Matrix.py","file_name":"Boundary of Matrix.py","file_ext":"py","file_size_in_byte":484,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"133605622","text":"# Copyright 2016 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Leapfrog Integrator for TensorFlow.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport collections\n\nimport tensorflow as tf\nimport numpy as np\nfrom tensorflow.python.framework import dtypes\nfrom tensorflow.python.framework import ops\nfrom tensorflow.python.ops import array_ops\nfrom tensorflow.python.ops import control_flow_ops\nfrom tensorflow.python.ops import functional_ops\nfrom tensorflow.python.ops import math_ops\nfrom tensorflow.python.ops import tensor_array_ops\n\ndef _assert_increasing(t):\n assert_increasing = control_flow_ops.Assert(\n math_ops.reduce_all(t[1:] > t[:-1]), ['`t` must be monotonic increasing'])\n return ops.control_dependencies([assert_increasing])\n\n\ndef _check_input_types(t, y0):\n if not (y0.dtype.is_floating or y0.dtype.is_complex):\n raise TypeError('`y0` must have a floating point or complex floating '\n 'point dtype')\n if not t.dtype.is_floating:\n raise TypeError('`t` must have a floating point dtype')\n\n\ndef _leap_frog_step(step_size, k, leapfrog_state, name=None):\n \"\"\"Take an arbitrary Runge-Kutta step and estimate error.\n Args:\n step_size: step size of each leap frog call\n k: spring constant for potential\n leapfrog_state: collection.namedtuple for current state of leapfrog\n name: optional name for the operation.\n Returns:\n Tuple `(position1, momentum1, grad1, total_time1)` giving the estimated position, momentum,\n gradient of the potential, and time after the leap.\n \"\"\"\n #position = leapfrog_state.position1\n #momentum = leapfrog_state.momentum1\n #grad = leapfrog_state.grad1\n #total_time = leapfrog_state.total_time1\n\n position, momentum, grad, total_time = leapfrog_state\n with ops.name_scope(name, 'leap_frog_step',\n [step_size, position, momentum, grad, k, total_time]) as scope:\n\n step_size = ops.convert_to_tensor(step_size, name='step_size')\n momentum = ops.convert_to_tensor(leapfrog_state.momentum1, name = 'momentum')\n position = ops.convert_to_tensor(leapfrog_state.position1, name = 'position')\n grad = ops.convert_to_tensor(leapfrog_state.grad1, name = 'grad')\n k = ops.convert_to_tensor(k, name = 'k')\n total_time = ops.convert_to_tensor(total_time, name='total_time')\n\n momentumi = momentum - np.float64(0.5)* step_size * grad\n positioni = position + step_size * momentumi\n potentiali, gradi = potential_and_grad(positioni, k)\n momentumi = momentumi - np.float64(0.5) * step_size * gradi\n total_timei = total_time + step_size\n\n momentum1 = array_ops.identity(momentumi, name='{0}/momentum1'.format(scope))\n position1 = array_ops.identity(positioni, name='{0}/position1'.format(scope))\n potential1 = array_ops.identity(potentiali, name='{0}/potential1'.format(scope))\n grad1 = array_ops.identity(gradi, name='{0}/grad1'.format(scope))\n total_time1 = array_ops.identity(total_timei, name='{0}/total_time1'.format(scope))\n return _Leap_Frog_State(position1, momentum1, grad1, total_time1)\n\nclass _Leap_Frog_State(\n collections.namedtuple('_Leap_Frog_State',\n 'position1, momentum1, grad1, total_time1')):\n \"\"\"Saved state of the Leap Frog solver.\n Attributes:\n position1: the position at the end of the last time step.\n momentum1: the momentum at the end of the last time step.\n grad1: the gradient of the potential at the end of the last time step.\n total_time1: total time the system has been integrated at the end of the last time step.\n \"\"\"\n\ndef potential_and_grad(position, k):\n #function that returns the potential and it's gradient at a given position\n return 0.5 * k * tf.square(position), k * position\n\n\ndef _leapfrog(x0,\n v0,\n k,\n t_obs,\n step_size, name=None):\n \"\"\"Model the positions and velocities at t_obs in a potential described in function potential_and_grad.\"\"\"\n\n with ops.name_scope(name, 'leapfrog',\n [x0, v0, k, t_obs, step_size]) as scope:\n\n def leapfrog_wrapper(step_size, time, k, leapfrog_state, l):\n #input is call from while statement, must be same as counter_fn\n leapfrog_state = _leap_frog_step(step_size, k, leapfrog_state)\n return step_size, time, k, leapfrog_state, l + 1\n\n def time_fn(step_size, time, k, leapfrog_state, l): # pylint: disable=unused-argument\n return leapfrog_state.total_time1 + step_size < time\n\n def leap_to_tobs(x, v, leapfrog_state, num_times, step_size, k, i):\n \"\"\"Integrate to next t_obs point.\"\"\"\n\n with ops.name_scope('leapfrog_to_obs'):\n #loop through stepsize to t_obs\n step_size, time, k, leapfrog_state, count = control_flow_ops.while_loop(\n time_fn, leapfrog_wrapper, (step_size, tf.gather(t_obs, i), k, leapfrog_state, 0.0),\n name='leapfrog_loop')\n dt_tiny = tf.gather(t_obs, i) - leapfrog_state.total_time1\n xt, vt, _, _ = _leap_frog_step(dt_tiny, k, leapfrog_state)\n x = x.write(i, xt)\n v = v.write(i, vt)\n #solution = solution.write(i, y)\n return x, v, leapfrog_state, num_times, step_size, k, i + 1\n\n def tobs_fn(x, v, leapfrog_state, num_times, step_size, k, i):\n return i < num_times\n\n num_times = array_ops.size(t_obs)\n x = tensor_array_ops.TensorArray(\n x0.dtype, size=num_times, name='x')\n v = tensor_array_ops.TensorArray(\n v0.dtype, size=num_times, name='v')\n\n potential0, grad0 = potential_and_grad(x0, k)\n total_time0 = np.float64(0.0)\n leapfrog_state = _Leap_Frog_State(\n x0, v0, grad0, total_time0)\n\n #loop through t_obs\n #print(x, v, leapfrog_state, num_times)\n x, v, leapfrog_state, _, _, _, _ = control_flow_ops.while_loop(\n tobs_fn, leap_to_tobs, (x, v, leapfrog_state, num_times, step_size, k, 0),\n name='tobs_loop')\n\n xreturn = x.stack(name='{0}/xvalues'.format(scope))\n vreturn = v.stack(name='{0}/vvalues'.format(scope))\n return (xreturn, vreturn)\n\n\ndef leapfrog(x0,\n v0,\n k,\n t_obs,\n step_size, name=None):\n \"\"\"Integrate a system in a potential and observe at t_obs times.\n Implements leapfrog integration\n Args:\n x0: The initial position of the system\n v0: The initial velocity of the system\n grad0: The initial gradient of the system\n k: spring constant of the system\n t_obs: array of times to observe the system\n step_size: the step size for each leap\n Returns:\n x: the observed positions\n v: the observed velocities\n \"\"\"\n\n with ops.name_scope(name, 'integrate', [x0, v0, k, t_obs, step_size]) as scope:\n\n #x0 = ops.convert_to_tensor(x0, dtype=x0.dtype, name='x0')\n #v0 = ops.convert_to_tensor(v0, dtype=v0.dtype, name='v0')\n #k = ops.convert_to_tensor(k, dtype=k.dtype, name = 'k')\n #t_obs = ops.convert_to_tensor(t_obs, dtype=t_obs.dtype, name='t_obs')\n\n return _leapfrog(\n x0,\n v0,\n k,\n t_obs,\n step_size)\n","sub_path":"test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":7740,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"128384822","text":"#!/usr/bin/env python3\r\n# -*- coding: utf-8 -*-\r\n\r\nfrom App import db\r\nfrom sqlalchemy.dialects.postgresql import BOOLEAN,INTEGER,VARCHAR,JSONB,ARRAY\r\nfrom sqlalchemy.sql import func,text\r\nfrom sqlalchemy import Column,Table\r\nfrom App.util.security import getNewID\r\n\r\n\r\n\r\n#平台用户类\r\nclass BUUSER(db.Model):\r\n #用户表名\r\n __tablename__='buuser'\r\n # flask-login相关\r\n #id,主键,采用uuid\r\n id=db.Column(VARCHAR,primary_key=True)\r\n #是否可用\r\n isenable=db.Column(BOOLEAN)\r\n #是否匿名\r\n isanonymous=db.Column(BOOLEAN)\r\n\r\n # 个人相关\r\n #用户名:可登录\r\n nickname=db.Column(VARCHAR)\r\n #用户姓名\r\n realname=db.Column(VARCHAR)\r\n # 工号:可登录\r\n staffcode=db.Column(VARCHAR)\r\n # 手机号:可登录\r\n mobile=db.Column(VARCHAR)\r\n #电子邮件:可登录\r\n email=db.Column(VARCHAR)\r\n #密码\r\n pw=db.Column(VARCHAR)\r\n # 组织机构码\r\n e0122=db.Column(VARCHAR)\r\n\r\n # 平台相关\r\n #角色\r\n roles=db.Column(JSONB)\r\n #组织机构\r\n dept=db.Column(JSONB)\r\n #职能组\r\n cgroup=db.Column(JSONB)\r\n \r\n # 单位\r\n unit_name=db.Column(VARCHAR)\r\n # 部门\r\n dep_name=db.Column(VARCHAR)\r\n # 处室\r\n office_name=db.Column(VARCHAR)\r\n\r\n # 微信相关\r\n # 全局唯一ID:可登录\r\n openid=db.Column(VARCHAR)\r\n # 微信名\r\n wxname=db.Column(VARCHAR)\r\n #微信号\r\n wxcode=db.Column(VARCHAR)\r\n #头像\r\n ava=db.Column(VARCHAR)\r\n #收货地址列表\r\n addrs=db.Column(JSONB)\r\n #购物车列表\r\n lovers=db.Column(JSONB)\r\n #位置\r\n tmpsite=db.Column(JSONB)\r\n #是否删除\r\n isdelete=db.Column(BOOLEAN)\r\n # 是否企业用户\r\n iscompany=db.Column(BOOLEAN)\r\n # 企业版购物车\r\n c_lovers=db.Column(JSONB)\r\n #企业信息\r\n c_info=db.Column(JSONB)\r\n #是否申请企业用户\r\n c_isapply=db.Column(BOOLEAN)\r\n\r\n\r\n\r\n #使用flask-login模块需要实现的相关属性\r\n @property\r\n def is_authenticated(self):\r\n return self.isenable\r\n @property\r\n def is_active(self):\r\n return self.isenable\r\n @property\r\n def is_anonymous(self):\r\n return self.isanonymous \r\n def get_id(self):\r\n return self.id \r\n #将类实例数据转换为json格式\r\n #参数:用户实例\r\n def tojson(data):\r\n if type(data)==list:\r\n jsondata=[]\r\n for item in data:\r\n jsondata.append({\r\n 'id':item.id,\r\n 'isenable':item.isenable,\r\n 'isanonymous':item.isanonymous,\r\n 'nickname':item.nickname,\r\n 'realname':item.realname,\r\n 'staffcode':item.staffcode,\r\n 'mobile':item.mobile,\r\n 'email':item.email,\r\n 'e0122':item.e0122,\r\n 'roles':item.roles,\r\n 'dept':item.dept,\r\n 'cgroup':item.cgroup,\r\n 'wxname':item.wxname,\r\n 'wxcode':item.wxcode,\r\n 'ava':item.ava,\r\n 'addrs':item.addrs,\r\n 'lovers': item.lovers,\r\n 'tmpsite':item.tmpsite,\r\n 'isdelete':item.isdelete,\r\n 'openid':item.openid,\r\n 'iscompany':item.iscompany,\r\n 'c_lovers':item.c_lovers,\r\n 'c_info':item.c_info,\r\n 'c_isapply':item.c_isapply,\r\n 'unit_name':item.unit_name,\r\n 'dep_name':item.dep_name,\r\n 'office_name':item.office_name,\r\n\r\n })\r\n return jsondata\r\n else:\r\n return {\r\n 'id':data.id,\r\n 'isenable':data.isenable,\r\n 'isanonymous':data.isanonymous,\r\n 'nickname':data.nickname,\r\n 'realname':data.realname,\r\n 'staffcode':data.staffcode,\r\n 'mobile':data.mobile,\r\n 'email':data.email,\r\n 'e0122':data.e0122,\r\n 'roles':data.roles,\r\n 'dept':data.dept,\r\n 'cgroup':data.cgroup,\r\n 'wxname':data.wxname,\r\n 'wxcode':data.wxcode,\r\n 'ava':data.ava,\r\n 'addrs':data.addrs,\r\n 'lovers': data.lovers,\r\n 'tmpsite':data.tmpsite,\r\n 'isdelete':data.isdelete,\r\n 'openid':data.openid,\r\n 'iscompany':data.iscompany,\r\n 'c_lovers':data.c_lovers,\r\n 'c_info':data.c_info,\r\n 'c_isapply':data.c_isapply,\r\n 'unit_name':data.unit_name,\r\n 'dep_name':data.dep_name,\r\n 'office_name':data.office_name,\r\n }\r\n\r\n\r\n\r\n# from flask import json \r\ndef listdata():\r\n db.create_all()\r\n\r\n \r\n\r\n\r\n","sub_path":"safe/App/model/buuser.py","file_name":"buuser.py","file_ext":"py","file_size_in_byte":4988,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"390632342","text":"# compute annual area burned in \nimport rasterio, os, glob\nfrom rasterio.features import rasterize\nimport pandas as pd\nimport geopandas as gpd\nimport numpy as np\n\nfirehistory_fn = '/workspace/Shared/Tech_Projects/ALFRESCO_Inputs/project_data/Fire/FireHistory/raw_downloaded/FireHistoryPerimeters_1940_2017.gdb'\nlayername = 'FireHistoryPerimeters'\noutput_filename = '/workspace/Shared/Tech_Projects/ALFRESCO_Inputs/project_data/Fire/FireHistory/Alaska_AnnualAreaBurned_km2_1940-2017.csv'\n\n# read the data and make sure it is in 3338 --> units: meters\ndf = gpd.read_file( firehistory_fn, layer=layername )\ndf = df.to_crs( epsg=3338 )\n\n# groupby year and get the total area burned for each / convert to km2 / round it / convert to int / output as dataframe\naab = df.geometry.groupby( df['FIREYEAR'] ).apply( lambda x: x.apply( lambda y: y.area).sum() ).div( 1000000 ).round( 0 ).astype( np.int32 ).to_frame( name='AnnualAreaBurned' )\naab.to_csv( output_filename, sep=',' ) # write file to csv\n\n# to read the csv file back in with pandas -- then you can plot and do all kinds of stuff with this DataFrame object\ndf = pd.read_csv( output_filename, index_col=0 )\n\n\n\n# # # # # CANADA DATA -- Still stuck in 2016...\n\nfirehistory_fn = '/workspace/Shared/Tech_Projects/ALFRESCO_Inputs/project_data/Fire/FireHistory/raw_downloaded/NFDB_poly_20171106.shp'\ncan_df = gpd.read_file( firehistory_fn )\ncan_df = can_df.to_crs( epsg=3338 )\naab_canada = df.geometry.groupby( df['YEAR'] ).apply( lambda x: x.apply( lambda y: y.area).sum() ).div( 1000000 ).round( 0 ).astype( np.int32 ).to_frame( name='AnnualAreaBurned' )\n\n# # # # # # # # # # # # # # # # # # # # # # # # # # # # \n","sub_path":"fire_history/compute_aab_from_FireHistoryPerimeters_GDB.py","file_name":"compute_aab_from_FireHistoryPerimeters_GDB.py","file_ext":"py","file_size_in_byte":1659,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"168668455","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu May 9 00:03:01 2019\n\n@author: bcheung\n\"\"\"\nimport uuid\nimport pandas as pd\nimport glob as glob\nimport PIL\nfrom PIL import ImageFont\nfrom PIL import Image\nfrom PIL import ImageDraw\n\nCHARS ='AaBbCcDdEeFfGgHhIiJjKkLlMmNnOoPpQqRrSsTtUuVvWwXxYyZz0123456789.:,;\"(!?)+-*/='\n\nfont_list = glob.glob('./fonts/*.ttf')\n\ntrain_labels = {}\nfor f in font_list:\n font_name = f.split('\\\\')[1].split('.')[0]\n font = ImageFont.truetype(f,14)\n for c in CHARS:\n id_key = uuid.uuid4()\n img=Image.new(\"L\", (64,64))\n draw = ImageDraw.Draw(img)\n draw.text((32, 32),c,(255),font=font)\n draw = ImageDraw.Draw(img)\n img.save(\"./train_labels/{}.jpg\".format(id_key), \"JPEG\")\n label_description = {'font':font_name,\n 'char':c}\n train_labels[id_key] = label_description\n \ntrain_labels_df = pd.DataFrame.from_dict(train_labels,orient='index').reset_index()\ntrain_labels_df.columns = ['id_key','font','target']\ntrain_labels_df.to_csv('train_labels.csv',index=False)","sub_path":"generate_train_data.py","file_name":"generate_train_data.py","file_ext":"py","file_size_in_byte":1078,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"435205442","text":"#!/usr/bin/env python\n\n\"\"\"Main entrypoint for DAQ. Handles command line parsing and other\nmisc setup tasks.\"\"\"\n\nimport logging\nimport os\nimport sys\n\nfrom ConfigParser import ConfigParser\nfrom StringIO import StringIO\n\nfrom mininet import log as minilog\n\nimport runner\n\nLOGGER = logging.getLogger('daq')\nALT_LOG = logging.getLogger('mininet')\n\nFLAG_MAP = {\n 's': 'single_shot',\n 'e': 'event_trigger',\n 'f': 'flap_ports',\n 'l': 'result_linger',\n 'd': 'debug_mode',\n 'c': 'use_console'\n}\n\ndef _stripped_alt_logger(self, level, msg, *args, **kwargs):\n #pylint: disable=unused-argument\n \"\"\"A logger for messages that strips whitespace\"\"\"\n stripped = msg.strip()\n if stripped:\n #pylint: disable=protected-access\n ALT_LOG._log(level, stripped, *args, **kwargs)\n\ndef _configure_logging(config):\n log_def = 'debug' if config.get('debug_mode') else 'info'\n daq_env = config.get('daq_loglevel', log_def)\n logging.basicConfig(level=minilog.LEVELS.get(daq_env, minilog.LEVELS['info']))\n\n mininet_env = config.get('mininet_loglevel')\n minilog.setLogLevel(mininet_env if mininet_env else 'info')\n\n #pylint: disable=protected-access\n minilog.MininetLogger._log = _stripped_alt_logger\n\ndef _write_pid_file():\n pid = os.getpid()\n LOGGER.info('DAQ pid is %d', pid)\n with open('inst/daq.pid', 'w') as pid_file:\n pid_file.write(str(pid))\n\ndef _read_config_into(filename, config):\n parser = ConfigParser()\n with open(filename) as stream:\n stream = StringIO(\"[top]\\n\" + stream.read())\n parser.readfp(stream)\n for item in parser.items('top'):\n config[item[0]] = item[1]\n\ndef _parse_args(args):\n config = {}\n for arg in args[1:]:\n if arg:\n if arg[0] == '-':\n if arg[1:] in FLAG_MAP:\n config[FLAG_MAP[arg[1:]]] = True\n else:\n raise Exception('Unknown command line arg %s' % arg)\n elif '=' in arg:\n parts = arg.split('=', 1)\n config[parts[0]] = parts[1]\n else:\n _read_config_into(arg, config)\n return config\n\ndef _execute():\n config = _parse_args(sys.argv)\n _configure_logging(config)\n\n _write_pid_file()\n\n daq_runner = runner.DAQRunner(config)\n daq_runner.initialize()\n daq_runner.main_loop()\n daq_runner.cleanup()\n\n result = daq_runner.finalize()\n LOGGER.info('DAQ runner returned %d', result)\n\n return result\n\n\nif __name__ == '__main__':\n assert os.getuid() == 0, 'Must run DAQ as root.'\n sys.exit(_execute())\n","sub_path":"daq/daq.py","file_name":"daq.py","file_ext":"py","file_size_in_byte":2595,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"485838064","text":"\nimport json\nimport ast\nimport collections\n\ndef get_class_member_value(obj, levels, subset=False):\n final_value = obj\n for item in levels:\n final_value = getattr(final_value, item)\n if isinstance(final_value, list):\n sub_levels = levels[levels.index(item)+1:]\n if not isinstance(sub_levels, list):\n sub_levels = [sub_levels]\n final_value = [get_class_member_value(sub_value,sub_levels,subset=True) for sub_value in final_value]\n break\n \n if not isinstance(final_value, list) and not subset:\n final_value = [final_value]\n\n return final_value\n\ndef check_for_match(value_selected, hit_values):\n # check if the values for the hit match one or more of the values selected\n\n match = False\n if isinstance(value_selected, list):\n for item in value_selected:\n if item in hit_values:\n match = True\n break\n elif value_selected in hit_values:\n match = True\n\n return match\n\ndef filter_hits(hits, filters, object_name):\n # Filter the hits based on the json string provided\n\n if not filters:\n return hits\n \n all_filtered_sets = []\n for content in filters[\"content\"]: \n field=content[\"content\"][\"field\"]\n value_selected=content[\"content\"][\"value\"]\n\n # check if this filter is for this object\n levels = field.split(\".\")\n top_level = levels.pop(0)\n if top_level != object_name:\n if not top_level in dir(hits[0]):\n all_filtered_sets.append(set(hits))\n else:\n filtered_set=set()\n for item in hits:\n # find object that contains the search values\n hit_values = []\n for search_item in getattr(item, top_level).hits:\n hit_values+=get_class_member_value(search_item, levels)\n if check_for_match(value_selected, hit_values):\n filtered_set.add(item)\n all_filtered_sets.append(list(filtered_set))\n else:\n filtered_set=set()\n for item in hits:\n hit_values=get_class_member_value(item,levels)\n if check_for_match(value_selected, hit_values):\n filtered_set.add(item)\n all_filtered_sets.append(list(filtered_set))\n\n # reduce sets\n final_set = set(all_filtered_sets.pop(0))\n for next_set in all_filtered_sets:\n final_set = final_set.intersection(next_set)\n\n return list(final_set)\n\ndef sort_hits(hits, sort):\n # Sort the hits based on the string provided\n\n if not sort:\n return hits\n \n sorted_hits = hits\n for content in sort:\n eval_content = ast.literal_eval(content) \n field=eval_content[\"field\"]\n levels=field.split(\".\")\n\n # reverse sort order if set\n order=eval_content[\"order\"]\n reverse_sort_order = False\n if order != \"asc\":\n reverse_sort_order = True\n\n # get the values for all of the hits\n hit_value_pairs = []\n for item in sorted_hits:\n hit_value_pairs.append((item, get_class_member_value(item,levels)[0]))\n\n hit_values = collections.OrderedDict(hit_value_pairs)\n\n sorted_hits=sorted(hit_values, key=hit_values.get, reverse=reverse_sort_order)\n\n return sorted_hits\n\n# The functions below were based on code from\n# https://github.com/nderkach/python-grahql-api\n\ndef _json_object_hook(d):\n return collections.namedtuple('X', d.keys())(*d.values())\n\ndef json2obj(data):\n return json.loads(data, object_hook=_json_object_hook)\n\n","sub_path":"utilities.py","file_name":"utilities.py","file_ext":"py","file_size_in_byte":3682,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"476156539","text":"#Write a function that accepts two (matrices) 2 dimensional lists a and b of unknown lengths and returns their product. \n#Hint: Two matrices a and b can be multiplied together only if the number of columns of the first matrix(a) is the same \n#as the number of rows of the second matrix(b). Hint: You may import and use the numpy module but your return must be a \n#python list not a numpy array. The input for this function will be two 2 Dimensional lists.\n\n# I couldnt install numpy, so I use the answer in the excercise.\n\ndef mult_matrix (list_one_2d, list_two_2d):\n\timport numpy\n\tproduct = (numpy.mat(a) * numpy.mat(b))\n\tproduct_to_list = product.tolist()\n\treturn product_to_list\n\na = [[2, 3, 4],\n [3, 4, 5]]\nb = [[4, -3, 12],\n [1, 1, 5],\n [1, 3, 2]]\n\nprint(mult_matrix(a,b))","sub_path":"Weeks/Week7/Multidimensional List/Excercise11.py","file_name":"Excercise11.py","file_ext":"py","file_size_in_byte":789,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"6594419","text":"import json\n\nimport os\nfrom django.contrib.auth import get_user_model\nfrom django.contrib.auth.password_validation import validate_password\nfrom django.core.validators import RegexValidator\nfrom django.http import Http404\nfrom django.utils import timezone\nfrom rest_framework import serializers, status\nfrom rest_framework.compat import MinValueValidator, MaxValueValidator\nfrom rest_framework.exceptions import ValidationError\nfrom rest_framework.validators import UniqueValidator\n\nfrom api.exceptions import GenericAPIException\nfrom api.models import *\nfrom api.utils.diffhelper import diffhelper\nfrom api.utils.imagehelper import convert_image\n\n\nUser = get_user_model()\n\n\nclass FollowSerializer(serializers.Serializer):\n follow = serializers.CharField()\n\n def validate(self, data):\n user = self.context['user']\n\n try:\n follow = User.objects.get(username=data.get('follow', None))\n except Exception:\n raise ValidationError({\"follow\": \"User does not exist.\"})\n\n instance = Follow.objects.filter(user=user, follow=follow)\n\n if instance.exists():\n raise ValidationError({\"follow\": \"You're already following this user.\"})\n\n if user.id == follow.id:\n raise ValidationError({\"follow\": \"You can't follow yourself.\"})\n\n data['follow'] = follow\n\n return data\n\n def create(self, validated_data):\n return Follow.objects.create(**validated_data)\n\n def update(self, instance, validated_data):\n instance.save()\n return instance\n\n\nclass UserSerializer(serializers.Serializer):\n username = serializers.CharField(\n required=True,\n validators=[\n UniqueValidator(\n queryset=User.objects.all()\n ),\n RegexValidator(\n regex='^[a-zA-Z0-9_]+$',\n message=\"Only use letters, numbers and '_'\"\n )\n ]\n )\n password = serializers.CharField(write_only=True, required=True)\n email = serializers.CharField(required=True, validators=[UniqueValidator(queryset=User.objects.all())])\n country_code = serializers.CharField(required=True)\n first_language = serializers.CharField(required=True)\n\n def validate(self, data):\n username = data.get('username', None)\n email = data.get('email', None)\n password = data.get('password', None)\n country_code = data.get('country_code', None)\n first_language = data.get('first_language', None)\n\n if username:\n username = username.lower()\n else:\n raise ValidationError({\"username\": [\"Invalid username.\"]})\n\n with open(os.path.join(os.path.dirname(__file__), 'json/reserved-usernames.json')) as json_data:\n if username in json.load(json_data) or User.objects.filter(username__iexact=username).exists():\n raise ValidationError({\"username\": [\"Username already exists.\"]})\n\n if email:\n email = email.lower()\n else:\n raise ValidationError({\"email\": [\"Invalid email.\"]})\n\n if User.objects.filter(email__iexact=email).exists():\n raise ValidationError({\"email\": [\"This email is already in use.\"]})\n\n try:\n validate_password(password=password, user=User)\n except Exception as e:\n raise ValidationError({\"password\": list(e.messages)})\n\n try:\n Country.objects.get(code=country_code)\n except Country.DoesNotExist:\n raise ValidationError({\"country_code\": [\"Invalid country_code.\"]})\n\n try:\n Language.objects.get(code=first_language)\n except Language.DoesNotExist:\n raise ValidationError({\"first_language\": [\"Invalid first_language.\"]})\n\n data['username'] = username\n data['email'] = email\n\n return data\n\n def create(self, validated_data):\n user = User.objects.create(\n username=validated_data['username'],\n email=validated_data['email']\n )\n user.set_password(validated_data['password'])\n user.first_name = validated_data['username']\n user.country_code = validated_data['country_code']\n user.first_language = validated_data['first_language']\n user.save()\n\n return user\n\n def update(self, instance, validated_data):\n instance.save()\n return instance\n\n\nclass LanguageSerializer(serializers.Serializer):\n code = serializers.CharField(validators=[UniqueValidator(queryset=Language.objects.all())])\n name = serializers.CharField()\n\n def create(self, validated_data):\n return Language.objects.create(**validated_data)\n\n def update(self, instance, validated_data):\n instance.save()\n return instance\n\n\nclass LanguageDetailsSerializer(serializers.Serializer):\n id = serializers.UUIDField(required=False)\n language = LanguageSerializer(required=False)\n level = serializers.IntegerField(validators=[MinValueValidator(0), MaxValueValidator(5)])\n is_first = serializers.BooleanField(required=False)\n is_primary = serializers.BooleanField()\n\n def validate(self, data):\n request_method = self.context['request_method']\n is_primary = data['is_primary']\n\n try:\n language = Language.objects.get(code=self.context['code'])\n except Language.DoesNotExist:\n raise Http404\n\n if not self.instance:\n user_language = UserLanguage.objects.filter(user=self.context['user'], language=language)\n\n if user_language.exists():\n raise GenericAPIException(status.HTTP_409_CONFLICT, 'User language already exists.')\n elif self.instance.is_first:\n raise ValidationError({\"first_language\": [\"You can't modify first language.\"]})\n\n if request_method == 'POST':\n secondary_user_languages = UserLanguage.objects.filter(user=self.context['user'], is_primary=False)\n\n if is_primary:\n raise GenericAPIException(status.HTTP_400_BAD_REQUEST, 'Maximum primary language count is reached.')\n\n if not is_primary and secondary_user_languages.count() >= 2:\n raise GenericAPIException(status.HTTP_400_BAD_REQUEST, 'Maximum secondary language count is reached.')\n\n if is_primary and data['level'] < 3:\n raise ValidationError({\"level\": [\"Primary language can't have level below 3.\"]})\n\n data['language'] = language\n\n return data\n\n def create(self, validated_data):\n return UserLanguage.objects.create(**validated_data)\n\n def update(self, instance, validated_data):\n instance.level = validated_data.get('level', instance.level)\n instance.is_primary = validated_data.get('is_primary', instance.is_primary)\n instance.save()\n return instance\n\n\nclass CountrySerializer(serializers.Serializer):\n code = serializers.CharField()\n name = serializers.CharField()\n\n def create(self, validated_data):\n return Country.objects.create(**validated_data)\n\n def update(self, instance, validated_data):\n instance.save()\n return instance\n\n\nclass UserProfileSerializer(serializers.Serializer):\n uid = serializers.IntegerField(source=\"user.id\", required=False)\n username = serializers.StringRelatedField(source=\"user.username\")\n first_name = serializers.StringRelatedField(source=\"user.first_name\")\n last_name = serializers.StringRelatedField(source=\"user.last_name\")\n country = CountrySerializer(read_only=True)\n photo = serializers.ImageField(required=False)\n about_me = serializers.CharField(allow_blank=True, required=False)\n reputation = serializers.IntegerField(required=False)\n is_following = serializers.BooleanField(required=False, default=False)\n friends_count = serializers.IntegerField(required=False)\n followers_count = serializers.IntegerField(required=False)\n languages = LanguageDetailsSerializer(source=\"user.languages\", many=True, read_only=True)\n\n def validate(self, data):\n if 'photo' in data.keys():\n photo = data['photo']\n data['photo'] = convert_image(photo, True)\n\n first_name = self.context['first_name']\n last_name = self.context['last_name']\n\n if first_name and len(first_name.strip()) == 0:\n raise ValidationError({\"first_name\": [\"First name should not be blank.\"]})\n\n if last_name and len(last_name.strip()) == 0:\n raise ValidationError({\"last_name\": [\"Last name should not be blank.\"]})\n\n self.instance.user.first_name = first_name.strip() if first_name else self.instance.user.first_name\n self.instance.user.last_name = last_name.strip() if last_name else self.instance.user.last_name\n self.instance.user.save()\n\n return data\n\n def create(self, validated_data):\n return UserProfile.objects.create(**validated_data)\n\n def update(self, instance, validated_data):\n instance.photo = validated_data.get('photo', instance.photo)\n instance.about_me = validated_data.get('about_me', instance.about_me)\n instance.save()\n return instance\n\n\nclass FriendSerializer(serializers.Serializer):\n uid = serializers.IntegerField(source=\"follow.id\", required=False)\n username = serializers.StringRelatedField(source=\"follow.username\")\n first_name = serializers.StringRelatedField(source=\"follow.first_name\")\n last_name = serializers.StringRelatedField(source=\"follow.last_name\")\n country = CountrySerializer(source=\"follow.userprofile.country\", read_only=True)\n photo = serializers.ImageField(source=\"follow.userprofile.photo\", required=False)\n about_me = serializers.CharField(source=\"follow.userprofile.about_me\", allow_blank=True, required=False)\n reputation = serializers.IntegerField(source=\"follow.userprofile.reputation\", required=False)\n is_following = serializers.BooleanField(required=False)\n languages = LanguageDetailsSerializer(source=\"user.languages\", many=True, read_only=True)\n\n\nclass FollowerSerializer(serializers.Serializer):\n uid = serializers.IntegerField(source=\"user.id\", required=False)\n username = serializers.StringRelatedField(source=\"user.username\")\n first_name = serializers.StringRelatedField(source=\"user.first_name\")\n last_name = serializers.StringRelatedField(source=\"user.last_name\")\n country = CountrySerializer(source=\"user.userprofile.country\", read_only=True)\n photo = serializers.ImageField(source=\"user.userprofile.photo\", required=False)\n about_me = serializers.CharField(source=\"user.userprofile.about_me\", allow_blank=True, required=False)\n reputation = serializers.IntegerField(source=\"user.userprofile.reputation\", required=False)\n is_following = serializers.BooleanField(required=False)\n languages = LanguageDetailsSerializer(source=\"user.languages\", many=True, read_only=True)\n\n\nclass FileSerializer(serializers.Serializer):\n id = serializers.UUIDField(required=False)\n type = serializers.CharField(required=False)\n file = serializers.FileField()\n\n def validate(self, data):\n file = data['file']\n\n data['file'] = convert_image(file)\n data['type'] = 'image'\n\n return data\n\n def create(self, validated_data):\n return File.objects.create(**validated_data)\n\n def update(self, instance, validated_data):\n instance.save()\n return instance\n\n\nclass CorrectionSerializer(serializers.Serializer):\n id = serializers.UUIDField(required=False)\n user = UserProfileSerializer(source=\"user.userprofile\", read_only=True)\n created_at = serializers.DateTimeField(required=False)\n is_voted = serializers.BooleanField(read_only=True, required=False)\n points = serializers.IntegerField(read_only=True, required=False)\n vote = serializers.IntegerField(read_only=True, required=False)\n sentence_id = serializers.UUIDField()\n sentence = serializers.StringRelatedField(source=\"sentence.body\")\n correction = serializers.CharField()\n correction_html = serializers.CharField(required=False)\n comment_count = serializers.IntegerField(source=\"comments.count\", required=False)\n\n def validate(self, data):\n correction = data.get('correction', None)\n sentence_id = data.get('sentence_id', None)\n sentence = Sentence.objects.get(id=sentence_id)\n diff = diffhelper()\n diffs = diff.diff_main(sentence.body, correction)\n data['correction_html'] = diff.diff_prettyHtml(diffs)\n data['comment'] = self.context.get('comment', None)\n data['user'] = self.context.get('user', None)\n\n return data\n\n def create(self, validated_data):\n return Correction.objects.create(**validated_data)\n\n def update(self, instance, validated_data):\n instance.save()\n return instance\n\n\nclass CorrectionsSerializer(serializers.Serializer):\n corrections = CorrectionSerializer(many=True)\n\n def create(self, validated_data):\n corrections = [Correction(**item) for item in validated_data['corrections']]\n correction_list = Correction.objects.bulk_create(corrections)\n serializer = CorrectionSerializer(correction_list, many=True)\n return {\n 'corrections': serializer.data\n }\n\n def update(self, instance, validated_data):\n return instance\n\n\nclass CorrectionCommentSerializer(serializers.Serializer):\n id = serializers.UUIDField(required=False)\n parent = serializers.UUIDField(source=\"parent.id\", required=False)\n user = UserProfileSerializer(source=\"user.userprofile\", read_only=True)\n created_at = serializers.DateTimeField(required=False)\n modified_at = serializers.DateTimeField(required=False)\n is_reacted = serializers.BooleanField(read_only=True, required=False)\n like_count = serializers.IntegerField(read_only=True, required=False)\n correction = serializers.UUIDField(source=\"correction.id\")\n body = serializers.CharField()\n\n def create(self, validated_data):\n correction_data = validated_data.pop('correction', None)\n parent_data = validated_data.pop('parent', None)\n\n if correction_data:\n try:\n correction = Correction.objects.get(id=correction_data['id'])\n validated_data['correction'] = correction\n except Correction.DoesNotExist:\n raise Http404\n\n if parent_data:\n try:\n parent = Correction.objects.get(id=parent_data['id'])\n validated_data['parent'] = parent\n except Correction.DoesNotExist:\n raise Http404\n\n return CorrectionComment.objects.create(**validated_data)\n\n def update(self, instance, validated_data):\n instance.save()\n return instance\n\n\nclass SentenceSerializer(serializers.Serializer):\n id = serializers.UUIDField(required=False)\n body = serializers.CharField()\n correction_count = serializers.IntegerField(read_only=True, required=False)\n\n def create(self, validated_data):\n return Sentence.objects.create(**validated_data)\n\n def update(self, instance, validated_data):\n instance.save()\n return instance\n\n\nclass CommentSerializer(serializers.Serializer):\n id = serializers.UUIDField(required=False)\n parent = serializers.UUIDField(source=\"parent.id\", required=False)\n user = UserProfileSerializer(source=\"user.userprofile\", read_only=True)\n created_at = serializers.DateTimeField(required=False)\n modified_at = serializers.DateTimeField(required=False)\n is_reacted = serializers.BooleanField(read_only=True, required=False)\n like_count = serializers.IntegerField(read_only=True, required=False)\n reply_count = serializers.IntegerField(read_only=True, required=False)\n post = serializers.UUIDField(source=\"post.id\", required=False)\n body = serializers.CharField()\n corrections = CorrectionSerializer(many=True, read_only=True)\n\n def validate(self, data):\n post_data = data.pop('post', None)\n parent_data = data.pop('parent', None)\n\n if parent_data:\n try:\n parent = Comment.objects.get(id=parent_data['id'])\n if parent.post:\n data['parent'] = parent\n data['post'] = parent.post\n except Comment.DoesNotExist:\n raise Http404\n\n if not parent_data and post_data:\n try:\n post = Post.objects.get(id=post_data['id'])\n data['post'] = post\n except Post.DoesNotExist:\n raise Http404\n\n return data\n\n def create(self, validated_data):\n return Comment.objects.create(**validated_data)\n\n def update(self, instance, validated_data):\n instance.save()\n return instance\n\n\nclass PostReactionSerializer(serializers.Serializer):\n id = serializers.UUIDField(required=False)\n reaction = serializers.IntegerField()\n user = UserProfileSerializer(source=\"user.userprofile\", read_only=True)\n post = serializers.UUIDField(source=\"post.id\")\n created_at = serializers.DateTimeField(required=False)\n\n def validate(self, data):\n user = self.context['user']\n\n try:\n PostReaction.objects.get(user=user,\n post=data['post']['id'])\n raise GenericAPIException(status.HTTP_409_CONFLICT,\n \"You have already reacted this post.\")\n except PostReaction.DoesNotExist:\n pass\n\n data['user'] = user\n\n return data\n\n def create(self, validated_data):\n post_data = validated_data.pop('post', None)\n\n if post_data:\n try:\n post = Post.objects.get(id=post_data['id'])\n validated_data['post'] = post\n except Post.DoesNotExist:\n raise Http404\n\n return PostReaction.objects.create(**validated_data)\n\n def update(self, instance, validated_data):\n instance.save()\n return instance\n\n\nclass CommentReactionSerializer(serializers.Serializer):\n id = serializers.UUIDField(required=False)\n reaction = serializers.IntegerField()\n comment = serializers.UUIDField(source=\"comment.id\")\n\n def validate(self, data):\n try:\n CommentReaction.objects.get(user=self.context['user'],\n comment=data['comment']['id'])\n raise GenericAPIException(status.HTTP_409_CONFLICT,\n \"You have already reacted this comment.\")\n except CommentReaction.DoesNotExist:\n pass\n\n return data\n\n def create(self, validated_data):\n comment_data = validated_data.pop('comment', None)\n\n if comment_data:\n try:\n comment = Comment.objects.get(id=comment_data['id'])\n validated_data['comment'] = comment\n except Comment.DoesNotExist:\n raise Http404\n\n return CommentReaction.objects.create(**validated_data)\n\n def update(self, instance, validated_data):\n instance.save()\n return instance\n\n\nclass CorrectionVoteSerializer(serializers.Serializer):\n id = serializers.UUIDField(required=False)\n vote = serializers.IntegerField(validators=[MinValueValidator(-1), MaxValueValidator(1)])\n correction = serializers.UUIDField(source=\"correction.id\")\n username = serializers.StringRelatedField(source=\"correction.user.username\")\n sentence = serializers.StringRelatedField(source=\"correction.sentence.body\")\n\n def validate(self, data):\n try:\n CorrectionVote.objects.get(user=self.context['user'],\n correction=data['correction']['id'])\n raise GenericAPIException(status.HTTP_409_CONFLICT,\n \"You have already voted this correction.\")\n except CorrectionVote.DoesNotExist:\n pass\n\n return data\n\n def create(self, validated_data):\n correction_data = validated_data.pop('correction', None)\n\n if correction_data:\n try:\n correction = Correction.objects.get(id=correction_data['id'])\n validated_data['correction'] = correction\n except Correction.DoesNotExist:\n raise Http404\n\n return CorrectionVote.objects.create(**validated_data)\n\n def update(self, instance, validated_data):\n instance.save()\n return instance\n\n\nclass PostSerializer(serializers.Serializer):\n id = serializers.UUIDField(required=False)\n base64_id = serializers.SlugField(required=False)\n user = UserProfileSerializer(source=\"user.userprofile\", read_only=True)\n created_at = serializers.DateTimeField(required=False)\n modified_at = serializers.DateTimeField(required=False)\n body = serializers.CharField()\n slug = serializers.SlugField(required=False)\n is_correctable = serializers.BooleanField(required=False)\n is_reacted = serializers.BooleanField(read_only=True, required=False)\n like_count = serializers.IntegerField(read_only=True, required=False)\n sentences = SentenceSerializer(many=True, read_only=True, required=False)\n correction_count = serializers.IntegerField(read_only=True, required=False)\n comment_count = serializers.IntegerField(read_only=True, required=False)\n file_ids = serializers.CharField(required=False)\n files = FileSerializer(many=True, read_only=True, required=False)\n\n def validate(self, data):\n body = data.get('body', None)\n\n if slugify(body) == '':\n raise ValidationError({\"body\": \"Invalid post.\"})\n\n return data\n\n def create(self, validated_data):\n file_ids = validated_data.pop('file_ids', None)\n\n post = Post.objects.create(**validated_data)\n\n if file_ids:\n file_ids = file_ids.split(',')\n images = File.objects.filter(id__in=file_ids,\n user=self.context['user'],\n post=None,\n is_deleted=False)\n\n for image in images:\n image.post = post\n image.save()\n return post\n\n def update(self, instance, validated_data):\n instance.body = validated_data.get('body', instance.text)\n instance.save()\n return instance\n\n\nclass PostListSerializer(serializers.Serializer):\n id = serializers.UUIDField(required=False)\n base64_id = serializers.SlugField(required=False)\n user = UserProfileSerializer(source=\"user.userprofile\", read_only=True)\n name = serializers.CharField(required=False)\n slug = serializers.SlugField(required=False)\n description = serializers.CharField(required=False, allow_blank=True)\n created_at = serializers.DateTimeField(required=False)\n modified_at = serializers.DateTimeField(required=False)\n order = serializers.IntegerField(required=False)\n item_count = serializers.IntegerField(required=False)\n has_item = serializers.BooleanField(read_only=True, required=False)\n\n def validate(self, data):\n name = data.get('name', None)\n\n if not name and not self.instance:\n raise ValidationError({\"name\": \"This field is required.\"})\n\n return data\n\n def create(self, validated_data):\n return PostList.objects.create(**validated_data)\n\n def update(self, instance, validated_data):\n instance.name = validated_data.get('name', instance.name)\n instance.description = validated_data.get('description', instance.description)\n instance.modified_at = timezone.now()\n instance.save()\n return instance\n\n\nclass PostListItemSerializer(serializers.Serializer):\n id = serializers.UUIDField(required=False)\n list = PostListSerializer(read_only=True)\n post = PostSerializer(read_only=True)\n created_at = serializers.DateTimeField(required=False)\n modified_at = serializers.DateTimeField(required=False)\n order = serializers.IntegerField(required=False)\n\n def validate(self, data):\n list = self.context.get('list', None)\n\n try:\n post = Post.objects.get(id=self.context['post_id'],\n is_deleted=False)\n except Post.DoesNotExist:\n raise ValidationError({\"post\": \"Invalid post.\"})\n\n try:\n PostListItem.objects.get(post=self.context['post_id'],\n list=list)\n raise ValidationError({\"post\": \"You've already added this post.\"})\n except PostListItem.DoesNotExist:\n pass\n\n data['list'] = list\n data['post'] = post\n\n return data\n\n def create(self, validated_data):\n return PostListItem.objects.create(**validated_data)\n\n def update(self, instance, validated_data):\n instance.save()\n return instance\n\n\nclass NotificationSerializer(serializers.Serializer):\n id = serializers.UUIDField(required=False)\n user = UserProfileSerializer(source=\"user.userprofile\", read_only=True)\n json = serializers.JSONField()\n read = serializers.BooleanField()\n created_at = serializers.DateTimeField(required=False)\n\n def create(self, validated_data):\n return Notification.objects.create(**validated_data)\n\n def update(self, instance, validated_data):\n instance.save()\n return instance\n\n\nclass PlanSerializer(serializers.Serializer):\n id = serializers.UUIDField(required=False)\n name = serializers.CharField()\n monthly = serializers.FloatField()\n yearly = serializers.FloatField()\n\n def create(self, validated_data):\n return Plan.objects.create(**validated_data)\n\n def update(self, instance, validated_data):\n instance.save()\n return instance\n","sub_path":"api/serializers.py","file_name":"serializers.py","file_ext":"py","file_size_in_byte":25812,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"432198544","text":"import logging\n\n\ndef init():\n\n add_logger('db_logger', 'db.log')\n add_logger('request_logger', 'request.log')\n\n\ndef add_logger(logger_name: str, log_name: str):\n formatter = logging.Formatter('%(asctime)s %(filename)s[line:%(lineno)d] %(levelname)s %(message)s')\n logger = logging.getLogger(logger_name)\n handle = logging.FileHandler('log/%s' % (log_name))\n handle.setFormatter(formatter)\n logger.addHandler(handle)","sub_path":"log/log_init.py","file_name":"log_init.py","file_ext":"py","file_size_in_byte":436,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"280219893","text":"# -*- coding: utf-8 -*-\nimport json\nimport re\n\nfrom scrapy import Request\nfrom scrapy_redis.spiders import RedisSpider\nfrom distrbuteSpider.city_by_zhilian import CITY_LIST\nfrom distrbuteSpider.items import DistrbutespiderItem\n\n\nclass ZhilianSpider(RedisSpider):\n name = 'zhilian'\n allowed_domains = ['zhaopin.com']\n is_first = True\n temp = 'https://fe-api.zhaopin.com/c/i/sou?start={}&pageSize=100&cityId={}&kw=python&kt=3'\n redis_key = 'zhilian:start_urls'\n\n @staticmethod\n def extract_url(data):\n result = data['data']['results']\n if result:\n for item in result:\n yield item.get('positionURL')\n\n def parse(self, response):\n if ZhilianSpider.is_first:\n ZhilianSpider.is_first = False\n for city in CITY_LIST:\n for i in range(11):\n start_url = ZhilianSpider.temp.format(i * 100, city.get('code'))\n print('Download is: ', start_url)\n yield Request(url=start_url, callback=self.parse, dont_filter=True)\n else:\n print('parse is:', response.url)\n data = json.loads(response.body.decode(), encoding='utf-8')\n result = self.extract_url(data)\n for url in result:\n yield Request(url=url, callback=self.parse_detail, dont_filter=True)\n\n def parse_detail(self, response):\n item = DistrbutespiderItem()\n html_str = response.text\n item['info_source'] = '2'\n regex = re.compile(\n r'.*?)诚聘(?P.*?)\\d+人,工作地点位于(?P.*?),薪资待遇(?P.*?),学历要求(?P.*?)或以上,工作经验(?P.*?)等更多招聘')\n item['company_name'], item['job_name'], item['city'], money, item['education'], item['work_years'] = \\\n regex.findall(html_str)[0]\n if '面议' not in money:\n item['min_salary'] = money.split('-')[0]\n item['max_salary'] = money.split('-')[1][:-3]\n else:\n item['min_salary'] = money\n item['max_salary'] = money\n item['address'] = response.xpath('//p[@class=\"add-txt\"]/text()').extract_first()\n item['welfare'] = re.findall(\"var JobWelfareTab\\s=\\s['\\\"](.*?)['\\\"];\", html_str)[0]\n job_detail_result = response.xpath('//div[@class=\"pos-ul\"]//text()').extract()\n item['job_detail'] = ''.join([i for i in job_detail_result if not i.isspace() and i != '展开'])\n yield item\n","sub_path":"distrbuteSpider/spiders/zhilian.py","file_name":"zhilian.py","file_ext":"py","file_size_in_byte":2541,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"651799424","text":"#Santos did the hand and field limits and Kevin did the rest\nimport pygame\nimport Card_Class\n\n\nclass player():\n\n def __init__(self):\n self.Name = \"Player\"\n self.Health = 2000\n self.Deck = []\n self.Hand = []\n self.Field = []\n\n#when the player's health reaches zero, the program prints Game Over and restarts\n def player_death(self,Health):\n if self.Health == 0:\n print (\"Game Over\")\n #end game - redirect user to different window\n\n#actions\n#1st stage of turn. Function draws cards from deck by appending the first object from Deck list to Hand list\n def draw(self):\n while len(self.Hand) >= 0 and len(self.Hand) < 6:\n self.Hand.append(self.Deck.pop(0))\n#2nd stage of turn. Function puts cards into play by appending cards from Hand list to in_play list\n\n def discard(i):\n grave.append(Hand.pop(i))\n\n def attack_card(self,p1Card,p2Card):\n if p1Card.attack:\n if p1Card.attack_power > p2Card.attack_power and p2Card.attack:\n p2Card.in_play = False\n return p1Card.attack_power - p2Card.attack_power\n if p1Card.attack_power > p2Card.defense and not p2Card.attack:\n p2Card.in_play = False\n return 0\n#retard case\n if p1Card.attack_power < p2Card.attack_power and p2Card.attack:\n p1Card.in_play = False\n return p1Card.attack_power - p2Card.attack_power\n if p1Card.attack_power == p2Card.attack_power and p2Card.attack:\n p1Card.in_play = False\n p2Card.in_play = False\n if p1Card.attack_power < p2Card.defense and not p2Card.attack:\n p1Card.in_play = False\n return 0\n#return attack difference for player card\n\n def set_battle_state(self,current_battle_state):\n if current_battle_state:\n self.attack = True\n if not current_battle_state:\n self.attack = False\n\n def Field_Limit(self):\n if len(self.Field) >= 0 and len(self.Field) < 3:\n return True\n return False\n\n def Hand_Limit(self):\n if len(self.Hand) >= 0 and len(self.Hand) < 6:\n return True\n return False\n","sub_path":"player_class.py","file_name":"player_class.py","file_ext":"py","file_size_in_byte":2274,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"224587363","text":"from appium.webdriver.common.touch_action import TouchAction\nfrom selenium.common.exceptions import NoSuchElementException\nimport environment\nimport unittest\nfrom appium import webdriver\nfrom behave import Given, When, Then\nfrom time import sleep\nimport traceback\n\n#call emulator\n@Given('The \"{title}\"')\ndef title2(self, title):\n sleep(1)\n\n@Given('User navigate to Calculator application')\ndef start(self):\n environment.driver.reset()\n sleep(10)#wait for app to load\n\n@Then('User must see title with \"{Title}\"')\ndef title(self, Title):\n try:\n environment.driver.find_element_by_xpath(\"//android.view.View[@text='{}']\".format(Title))\n #environment.screenshot_pass()\n except NoSuchElementException:\n #environment.screenshot_fail()\n assert False, \"Dont see message\"\n\n@When('User press the first number with \"{number1}\"')\ndef fill_usernames(self, number1):\n try:\n #temp = tuple(number1)\n for i in number1:\n if i == '0':\n user = environment.driver.find_element_by_xpath(\"//*[@text='O']\")\n user.click()\n else:\n user = environment.driver.find_element_by_xpath(\"//*[@text='{}']\".format(i))\n user.click()\n #sleep(1)\n environment.screenshot_pass()\n except NoSuchElementException as e: #check locator exist or not\n #environment.screenshot_pass()\n assert False, \"Could not find number1\"\n\n@When('User press \"{symbol}\" symbol')\ndef fill_usernames(self, symbol):\n try:\n user = environment.driver.find_element_by_xpath(\"//*[@text='{}']\".format(symbol))\n user.click()\n sleep(1)\n # environment.screenshot_pass()\n except NoSuchElementException as e: #check locator exist or not\n #environment.screenshot_pass()\n assert False, \"Could not find symbol\"\n\n@When('User press the second number with \"{number2}\"')\ndef fill_usernames(self, number2):\n try:\n #temp = tuple(number2)\n for y in number2:\n if y == '0':\n user = environment.driver.find_element_by_xpath(\"//*[@text='O']\")\n user.click()\n else:\n user = environment.driver.find_element_by_xpath(\"//*[@text='{}']\".format(y))\n user.click()\n #sleep(1)\n environment.screenshot_pass()\n except NoSuchElementException as e: #check locator exist or not\n #environment.screenshot_pass()\n assert False, \"Could not find number2\"\n\n@When('User press \"{result}\" button')\ndef fill_usernames(self, result):\n try:\n user = environment.driver.find_element_by_xpath(\"//*[@text='{}']\".format(result))\n user.click()\n #sleep(1)\n environment.screenshot_pass()\n except NoSuchElementException as e: #check locator exist or not\n #environment.screenshot_pass()\n assert False, \"Could not find result button\"\n\n@Then('User must see the result with \"{message}\"')\ndef title(self, message):\n try:\n environment.driver.find_element_by_xpath(\"//android.view.View[@text='{}']\".format(message))\n #environment.screenshot_pass()\n except NoSuchElementException:\n #environment.screenshot_fail()\n assert False, \"Dont see final result\"","sub_path":"automationTest/steps/mainPage.py","file_name":"mainPage.py","file_ext":"py","file_size_in_byte":3244,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"218144303","text":"\n\n#calss header\nclass _SILLY():\n\tdef __init__(self,): \n\t\tself.name = \"SILLY\"\n\t\tself.definitions = [u'showing little thought or judgment: ', u'embarrassed; afraid that people will laugh at you: ', u'not important, serious, or practical: ']\n\n\t\tself.parents = []\n\t\tself.childen = []\n\t\tself.properties = []\n\t\tself.jsondata = {}\n\n\n\t\tself.specie = 'adjectives'\n\n\n\tdef run(self, obj1, obj2):\n\t\tself.jsondata[obj2] = {}\n\t\tself.jsondata[obj2]['properties'] = self.name.lower()\n\t\treturn self.jsondata\n","sub_path":"xai/brain/wordbase/adjectives/_silly.py","file_name":"_silly.py","file_ext":"py","file_size_in_byte":491,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"391136281","text":"#!/usr/bin/env python3\nimport argparse\nfrom email.utils import mktime_tz, parsedate_tz, parseaddr\nfrom mailbox import Maildir, MaildirMessage\nimport re\nimport sys\n\nfrom orator import Model\nfrom orator.exceptions.orm import ModelNotFound\n\nimport config\nfrom models.folder import Folder\nfrom models.address import Address\nfrom models.message import Message\n\ndef start_logger(conn):\n conn.enable_query_log()\n logger = logging.getLogger('orator.connection.queries')\n logger.setLevel(logging.DEBUG)\n formatter = logging.Formatter('%(query)s')\n handler = logging.StreamHandler()\n handler.setFormatter(formatter)\n logger.addHandler(handler)\n\ndef log_tick(char='.', flush=True):\n print(char, end='')\n if flush:\n sys.stdout.flush()\n\ndef log_line(string):\n print()\n print(string)\n\ndef flush_database(conn):\n conn.statement('TRUNCATE TABLE `addresses_messages`')\n conn.statement('TRUNCATE TABLE `messages`')\n conn.statement('TRUNCATE TABLE `addresses`')\n conn.statement('TRUNCATE TABLE `folders`')\n\ndef parse_date(date):\n if date == None:\n return None\n else:\n return mktime_tz(parsedate_tz(date))\n\ndef insert_addresses(mail):\n tos = [('to', addr) for addr in mail.get_all('to', [])]\n ccs = [('cc', addr) for addr in mail.get_all('cc', [])]\n froms = [('from', addr) for addr in mail.get_all('from', [])]\n for type_, addr in tos + ccs + froms:\n _, addr = parseaddr(str(addr))\n \n if addr == '':\n continue\n\n addrs = Address.where('email', addr)\n if addrs.count() == 0:\n yield (type_, Address.create(email=addr, domain=''))\n log_tick('a')\n else:\n yield (type_, addrs.first())\n\ndef main():\n pars = argparse.ArgumentParser('Import Maildir messages to a database.')\n \n pars.add_argument(\n 'location', metavar='LOCATION', type=str,\n help='Location of the Maildir')\n pars.add_argument(\n '-v', dest='verbose', action='store_true',\n help='Verbose output')\n pars.add_argument(\n '-xf', '--exclude-folders', dest='exclude_folders', type=str,\n help='Exclude folders matching a regular expression')\n\n args = pars.parse_args()\n\n if args.verbose:\n start_logger(config.db)\n\n Model.set_connection_resolver(config.db)\n\n flush_database(config.db)\n\n md = Maildir(args.location)\n for folder in md.list_folders():\n if 'exclude_folders' in args:\n if re.match(args.exclude_folders, folder):\n log_line('Skipping %s' % folder)\n continue\n\n # Insert folder\n Folder.create(name=folder)\n log_line(folder)\n\n for key, mail in md.get_folder(folder).iteritems():\n # Insert message\n msg = Message.create(\n filename=key,\n folder=folder,\n subject=str(mail.get('Subject')).encode('utf8')[:1022],\n date=parse_date(mail.get('Date')),\n content_type=mail.get_content_type()\n )\n\n # Insert addresses\n addrs = list(insert_addresses(mail))\n for type_, addr in addrs:\n msg.addresses().save(addr, {'type': type_})\n\n log_tick('.')\n\n print()\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"stats.py","file_name":"stats.py","file_ext":"py","file_size_in_byte":3315,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"506174044","text":"# wpsync server api module\n\nimport requests\nimport json\nimport threading\n\n\nclass ServerApi(object):\n\n def __init__(self, host, port):\n self.addr = ''.join(['http://', host, ':', port])\n\n def set_token(self, token):\n self.token = token\n\n def introduce(self):\n url = ''.join([self.addr, '/api/introduce'])\n result = requests.post(url).json()\n return result\n\n def standby(self):\n headers = {'content-type': 'application/json'}\n url = ''.join([self.addr, '/api/standby'])\n data = {'from': self.token}\n result = requests.post(\n url,\n data=json.dumps(data),\n headers=headers).json()\n return result\n\n def message(self, receivers, data):\n headers = {'content-type': 'application/json'}\n url = ''.join([self.addr, '/api/message'])\n data = {'from': self.token,\n 'to': receivers,\n 'data': data}\n result = requests.post(\n url,\n data=json.dumps(data),\n headers=headers).json()\n return result\n\n def broadcast(self, data):\n headers = {'content-type': 'application/json'}\n url = ''.join([self.addr, '/api/broadcast'])\n data = {'from': self.token,\n 'data': data}\n result = requests.post(\n url,\n data=json.dumps(data),\n headers=headers).json()\n return result\n\nif __name__ == \"__main__\":\n import time\n import threading\n wpsync = ServerApi('127.0.0.1', '8080')\n introduced = wpsync.introduce()\n if introduced['authorized']:\n wpsync.set_token(introduced['token'])\n print('recieve id ', introduced['id'])\n\n def listen():\n while True:\n print(wpsync.standby())\n\n listener = threading.Thread(target=listen)\n listener.start()\n\n time.sleep(1)\n sendResult = wpsync.message(['1'], 'foo')\n print(sendResult)\n broadcastResult = wpsync.broadcast('bar')\n print(broadcastResult)\n","sub_path":"server_api.py","file_name":"server_api.py","file_ext":"py","file_size_in_byte":2058,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"194642495","text":"\n# Imports\nimport keras\nfrom keras.preprocessing.image import ImageDataGenerator\nfrom keras.models import Sequential\nfrom keras.layers import Dense, Dropout, Activation, Flatten, Reshape\nfrom keras.layers import Conv2D, MaxPooling2D, AveragePooling2D\nfrom keras.preprocessing.image import ImageDataGenerator\nfrom keras import regularizers\nfrom keras.losses import mean_squared_error\nimport glob\nimport matplotlib.patches as patches\nimport json\nimport numpy as np\nfrom matplotlib.path import Path\nimport dicom\nimport cv2\n\nfrom utils import *\n\ndef create_model(activation, input_shape=(64, 64)):\n model = Sequential()\n model.add(Conv2D(100, (11,11), activation=activation, padding='valid', strides=(1, 1), input_shape=(input_shape[0], input_shape[1], 1)))\n model.add(AveragePooling2D((6,6)))\n model.add(Reshape([-1, 8100]))\n model.add(Dense(1024, activation='sigmoid', kernel_regularizer=regularizers.l2(0.0001)))\n model.add(Reshape([-1, 32, 32]))\n return model\n\ndef create_model_maxpooling(activation, input_shape=(64, 64)):\n model = Sequential()\n model.add(Conv2D(100, (11,11), activation=activation, padding='valid', strides=(1, 1), input_shape=(input_shape[0], input_shape[1], 1)))\n model.add(MaxPooling2D((6,6)))\n model.add(Reshape([-1, 8100]))\n model.add(Dense(1024, activation='sigmoid', kernel_regularizer=regularizers.l2(0.0001)))\n model.add(Reshape([-1, 32, 32]))\n return model\n\ndef create_model_larger(activation, input_shape=(64, 64)):\n model = Sequential()\n model.add(Conv2D(200, (11,11), activation=activation, padding='valid', strides=(1, 1), input_shape=(input_shape[0], input_shape[1], 1)))\n model.add(AveragePooling2D((6,6)))\n model.add(Reshape([-1, 16200]))\n model.add(Dense(1024, activation='sigmoid', kernel_regularizer=regularizers.l2(0.0001)))\n model.add(Reshape([-1, 32, 32]))\n return model\n\ndef create_model_deeper(activation, input_shape=(64, 64)):\n model = Sequential()\n model.add(Conv2D(64, (11,11), activation=activation, padding='valid', strides=(1, 1), input_shape=(input_shape[0], input_shape[1], 1)))\n model.add(AveragePooling2D((2,2)))\n model.add(Conv2D(128, (10, 10), activation=activation, padding='valid', strides=(1, 1)))\n model.add(AveragePooling2D((2,2)))\n model.add(Reshape([-1, 128*9*9]))\n model.add(Dense(1024, activation='sigmoid', kernel_regularizer=regularizers.l2(0.0001)))\n model.add(Reshape([-1, 32, 32]))\n return model\n\ndef create_model_full(activation, input_shape=(64, 64)):\n model = Sequential()\n model.add(Conv2D(64, (11,11), activation=activation, padding='valid', strides=(1, 1), input_shape=(input_shape[0], input_shape[1], 1)))\n model.add(MaxPooling2D((2,2)))\n model.add(Conv2D(128, (10, 10), activation=activation, padding='valid', strides=(1, 1)))\n model.add(MaxPooling2D((2,2)))\n model.add(Reshape([-1, 128*9*9]))\n model.add(Dense(1024, activation='sigmoid', kernel_regularizer=regularizers.l2(0.0001)))\n model.add(Reshape([-1, 32, 32]))\n return model\n\ndef training(m, X, Y, verbose, batch_size=16, epochs=20, data_augm=False):\n if data_augm:\n datagen = ImageDataGenerator(\n featurewise_center=False, \n samplewise_center=False, \n featurewise_std_normalization=False, \n samplewise_std_normalization=False, \n zca_whitening=False, \n rotation_range=50, \n width_shift_range=0.1, \n height_shift_range=0.1, \n horizontal_flip=True, \n vertical_flip=False) \n datagen.fit(X)\n history = m.fit_generator(datagen.flow(X, Y,\n batch_size=batch_size),\n steps_per_epoch=X.shape[0] // batch_size,\n epochs=epochs,\n verbose=verbose) \n else:\n history = m.fit(X, Y, batch_size=batch_size, epochs=epochs, verbose=verbose)\n return history, m\n\ndef run(model='simple', X_to_pred=None, history=False, verbose=0, activation=None, epochs=20, data_augm=False):\n X, X_fullsize, Y, contour_mask = create_dataset()\n if model == 'simple':\n m = create_model(activation=activation)\n elif model == 'larger':\n m = create_model_larger(activation=activation)\n elif model == 'deeper':\n m = create_model_deeper(activation=activation)\n elif model == 'maxpooling':\n m = create_model_maxpooling(activation=activation)\n elif model =='full':\n m = create_model_full(activation=activation)\n\n m.compile(loss='mean_squared_error', optimizer='adam', metrics=['accuracy'])\n if verbose > 0:\n print('Size for each layer :\\nLayer, Input Size, Output Size')\n for p in m.layers:\n print(p.name.title(), p.input_shape, p.output_shape)\n h, m = training(m, X, Y, verbose=verbose, batch_size=16, epochs=epochs, data_augm=data_augm)\n\n if not X_to_pred:\n X_to_pred = X\n y_pred = m.predict(X_to_pred, batch_size=16)\n \n if history:\n return X, X_fullsize, Y, contour_mask, y_pred, h, m\n else:\n return X, X_fullsize, Y, contour_mask, y_pred, m\n\ndef inference(model):\n X_test, X_fullsize_test, Y_test, contour_mask_test = create_dataset(n_set='test')\n y_pred = model.predict(X_test, batch_size=16)\n return X_test, X_fullsize_test, Y_test, contour_mask_test, y_pred\n","sub_path":"01 Introdução/Webnar Projeto Especial 01/train_cnn.py","file_name":"train_cnn.py","file_ext":"py","file_size_in_byte":5403,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"28514018","text":"import numpy as np\r\nimport cv2\r\nimport time\r\nimport sys\r\nnp.set_printoptions(threshold = np.inf)\r\n##################### NOTE: I did not check boundary overflow ###################\r\n\r\nzero_set = ()\r\ndatatype = 'int'\r\n# narrow band width\r\nband_width = 5\r\nband_range = ()\r\ndifferential_range = ()\r\nimg_shape = ()\r\ndist_mask = np.array([])\r\ncross_mask = (np.array([-1, 0, 1, 0], dtype='int'), np.array([0, 1, 0, -1], dtype='int'))\r\n# lookup_mask_list = []\r\nthresh_hold = 0.05\r\n\r\n\r\nclass Initial_img:\r\n\tdef __init__(self):\r\n\t\tself.window_name = \"\"\r\n\t\tself.up_left = [0, 0]\r\n\t\tself.bot_right = [0, 0]\r\n\t\t# self.center = ()\r\n\t\t# self.border = ()\r\n\t\tself.in_img = np.array([])\r\n\r\ndef onMouse(event, x, y, flags, param):\r\n\tdst = param.in_img.copy()\r\n\r\n\tif event == cv2.EVENT_LBUTTONDOWN:\r\n\r\n\t\tcur_point = (x, y)\r\n\t\tparam.up_left = list(cur_point)\r\n\t\tcv2.circle(dst, cur_point, 2, (0, 255, 0), cv2.FILLED)\r\n\t\tcv2.imshow(param.window_name, dst)\r\n\telif (event == cv2.EVENT_MOUSEMOVE) and (flags & cv2.EVENT_FLAG_LBUTTON):\r\n\t\tcur_point = (x, y)\r\n\t\tcv2.rectangle(dst, tuple(param.up_left), cur_point, (0, 255, 0), 2)\r\n\t\tcv2.imshow(param.window_name, dst)\r\n\telif event == cv2.EVENT_LBUTTONUP:\r\n\t\tparam.up_left[0], param.bot_right[0] = np.sort([param.up_left[0], x])\r\n\t\tparam.up_left[1], param.bot_right[1] = np.sort([param.up_left[1], y])\r\n\t# if event == cv2.EVENT_LBUTTONDOWN:\r\n\t# \tparam.center = (x, y)\r\n\t# if event == cv2.EVENT_LBUTTONUP:\r\n\t# \tparam.border = (x, y)\r\n\t# \tdist = np.round(np.linalg.norm(np.array(param.center) - np.array(param.border)))\r\n\t# \tshape = (int(dist) * 2 + 1, int(dist) * 2 + 1)\r\n\t# \ttemp = np.zeros(shape)\r\n\t# \tindex = np.where(temp == 0)\r\n\t# \ttemp = np.round(np.sqrt((index[0] - dist)**2 + (index[1] - dist)**2)).reshape(shape)\r\n\t# \tindex = np.where(temp == dist)\r\n\t# \tarray1 = index[0]\r\n\t# \tarray2 = index[1]\r\n\t# \tarray1 += param.center[1] - int(dist)\r\n\t# \tarray2 += param.center[0] - int(dist)\r\n\t# \tglobal zero_set\r\n\t# \tzero_set = (array1, array2)\r\n\t# \tdst[zero_set] = [255, 0, 0]\r\n\t# \tcv2.imshow(param.window_name, dst)\r\n\t# if event == cv2.EVENT_RBUTTONDOWN:\r\n\t# \tdst = param.in_img.copy()\r\n\t# \tcv2.imshow(param.window_name, dst)\r\n\r\n\r\n'''\r\nInitial distance small mask\r\n'''\r\ndef DistMaskMatrix(band_width):\r\n\tmask = np.zeros([band_width * 2 + 1, band_width * 2 + 1], dtype = datatype)\r\n\tfor i in range(band_width * 2 + 1):\r\n\t\tfor j in range(band_width * 2 + 1):\r\n\t\t\tmask[i][j] = np.round(np.sqrt(np.power(i - band_width, 2) + pow(j - band_width, 2)))\r\n\tmask[np.where(mask > band_width)] = band_width + 1\r\n\treturn mask\r\n\r\n# def LookupMaskList(band_width):\r\n# \tmask_list = []\r\n# \tfor l in range(1, band_width + 1):\r\n# \t\tmask = np.zeros((l * 2 + 1, l * 2 + 1), dtype = 'uint8')\r\n# \t\tfor i in range(band_width * 2 + 1):\r\n# \t\t\tfor j in range(band_width * 2 + 1):\r\n# \t\t\t\tmask[i][j] = np.round(np.sqrt(np.power(i - band_width, 2) + pow(j - band_width, 2)))\r\n# \t\tmask\r\n# \t\tmask_list.append(mask)\r\n# \treturn mask_list\r\n\r\n'''\r\nInitial narrow band\r\nfirst overload\r\n@param rect_ul means the up left of rectangular\r\n@param rect_br means the bottom right of rectangular\r\n@pamra psi is unmasked\r\n'''\r\ndef InitNarrowBand(band_width, rect_ul, rect_br):\r\n\tpsi = (band_width + 1) * np.ones(img_shape)\r\n\t# height and width of rectangle\r\n\theight = rect_br[1] - rect_ul[1]\r\n\twidth = rect_br[0] - rect_ul[0]\r\n\t# set interface to 0\r\n\tpsi[rect_ul[1], rect_ul[0] + 1 : rect_br[0]] = 0\r\n\tpsi[rect_br[1], rect_ul[0] + 1 : rect_br[0]] = 0\r\n\tpsi[(rect_ul[1] + 1) : (rect_br[1]), rect_ul[0]] = 0\r\n\tpsi[(rect_ul[1] + 1) : (rect_br[1]), rect_br[0]] = 0\r\n\tglobal zero_set\r\n\tzero_set = np.where(psi == 0)\r\n\t# print(rect_ul, rect_br)\r\n\r\n\tfor w in range(band_width, 0, -1):\r\n\t\t# for i, j in zip(zero_set[0], zero_set[1]):\r\n\t\tindex = np.where(dist_mask == w)\r\n\t\tfor i, j in zip(index[0], index[1]):\r\n\t\t\tpsi[(zero_set[0] - band_width + i, zero_set[1] - band_width + j)] = w\r\n\t\t\t# temp = psi[i - band_width : i + band_width + 1, j - band_width : j + band_width + 1]\r\n\t\t\t# index = np.where(temp >= dist_mask)\r\n\t\t\t# temp[index] = dist_mask[index]\r\n\t\t\t# print(psi[i - band_width : i + band_width + 1, j - band_width : j + band_width + 1])\r\n\tpsi[zero_set] = 0\r\n\tglobal band_range\r\n\tband_range = np.where(psi <= band_width)\r\n\tglobal differential_range\r\n\tdifferential_range = np.where(psi < band_width)\r\n\treturn psi\r\n\r\n'''\r\nInitial narrow band\r\nSecond overload\r\n'''\r\ndef InitNarrowBand1(band_width, zero_set):\r\n\tpsi = (band_width + 1) * np.ones(img_shape)\r\n\r\n\tfor w in range(band_width, 0, -1):\r\n\t\t# for i, j in zip(zero_set[0], zero_set[1]):\r\n\t\tindex = np.where(dist_mask == w)\r\n\t\tfor i, j in zip(index[0], index[1]):\r\n\t\t\tpsi[(zero_set[0] - band_width + i, zero_set[1] - band_width + j)] = w\r\n\t\t# \ttemp = psi[i - band_width : i + band_width + 1, j - band_width : j + band_width + 1]\r\n\t\t# \tindex = np.where(temp >= dist_mask)\r\n\t\t# \ttemp[index] = dist_mask[index]\r\n\t\t\t# print(psi[i - band_width : i + band_width + 1, j - band_width : j + band_width + 1])\r\n\tpsi[zero_set] = 0\r\n\tglobal band_range\r\n\tband_range = np.where(psi <= band_width)\r\n\tglobal differential_range\r\n\tdifferential_range = np.where((psi < band_width) & (psi > -band_width))\r\n\treturn psi\r\n\r\n\r\ndef InitSDF(unmasked_psi):\r\n\tsdf_mask = np.ones(unmasked_psi.shape, dtype = datatype)\r\n\tband_set = np.where(unmasked_psi <= band_width)\r\n\t# index of unmasked psi whose distance to front is band width\r\n\tmax_dist_index = np.where(unmasked_psi == band_width)\r\n\trow_min = np.min(max_dist_index[0])\r\n\trow_max = np.max(max_dist_index[0])\r\n\r\n\tfor i in range(row_min, row_max + 1):\r\n\t\tlast_sign = 1\r\n\t\tsign_change = False\r\n\t\tinside_band = True\r\n\t\tinside_front = False\r\n\t\t# next grid will be outside the narrow band\r\n\t\tflag_out = False\r\n\r\n\t\tup = False\r\n\t\tdown = False\r\n\t\tsub_col_min = np.min(max_dist_index[1][np.where(max_dist_index[0] == i)])\r\n\t\tsub_col_max = np.max(max_dist_index[1][np.where(max_dist_index[0] == i)])\r\n\t\tfor j in range(sub_col_min, sub_col_max + 1):\r\n\t\t\t# if boundary point\r\n\t\t\tif unmasked_psi[i][j] == 0:\r\n\t\t\t\tsdf_mask[i][j] = 0\r\n\t\t\t\t# if not inside_front:\r\n\t\t\t\t# \tinside_front = True\r\n\t\t\t\tif np.count_nonzero(unmasked_psi[i - 1, j - 1: j + 2]) != 3:\r\n\t\t\t\t\tup = True\r\n\t\t\t\tif np.count_nonzero(unmasked_psi[i + 1, j - 1: j + 2]) != 3:\r\n\t\t\t\t\tdown = True\r\n\t\t\t\t# \tif unmasked_psi[i][j + 1] == 0:\r\n\t\t\t\t# \t\tcontinue\r\n\t\t\t\t# \telse:\r\n\t\t\t\t# \t\tinside_front = False\r\n\t\t\t\t# \t\tsign_change = not sign_change\r\n\t\t\t\t# else:\r\n\t\t\t\t# \tif unmasked_psi[i][j + 1] == 0:\r\n\t\t\t\t# \t\tcontinue\r\n\t\t\t\t# \telse:\r\n\t\t\t\t# \t\tinside_front = False\r\n\t\t\t\tif up and down:\r\n\t\t\t\t\tsign_change = not sign_change\r\n\t\t\t\t\tup = False\r\n\t\t\t\t\tdown = False\r\n\t\t\t# not boundary point\r\n\t\t\t# elif unmasked_psi[i][j] == band_width:\r\n\t\t\t# \tif not inside_band:\r\n\t\t\t# \t\tinside_band = True\r\n\t\t\t# \t# since values outside the narrow band are maxsize\r\n\t\t\t# \tif inside_band and unmasked_psi[i][j + 1] > band_width:\r\n\t\t\t# \t\tinside_band = False\r\n\t\t\t#\r\n\t\t\t#\r\n\t\t\t# \tif not sign_change:\r\n\t\t\t# \t\tsdf_mask[i][j] = last_sign\r\n\t\t\t# \telse:\r\n\t\t\t# \t\tlast_sign = -last_sign\r\n\t\t\t# \t\tsdf_mask[i][j] = last_sign\r\n\t\t\telse:\r\n\t\t\t\t# if inside_band:\r\n\t\t\t\tif not sign_change:\r\n\t\t\t\t\tsdf_mask[i][j] = last_sign\r\n\t\t\t\telse:\r\n\t\t\t\t\tlast_sign = -last_sign\r\n\t\t\t\t\tsdf_mask[i][j] = last_sign\r\n\t\t\t\t\tsign_change = False\r\n\t\t\t\t# else:\r\n\t\t\t\t# \tcontinue\r\n\treturn sdf_mask\r\n\r\n'''\r\nZero level set initialization\r\n\r\n'''\r\ndef InitLevelSet(unmasked_psi, sdf_mask):\r\n\treturn unmasked_psi * sdf_mask\r\n\r\n\r\ndef ExtensionPhi(psi, phi, phi_gradient):\r\n\text_phi = np.zeros(img_shape)\r\n\text_phi_gradient = [np.zeros(img_shape), np.zeros(img_shape)]\r\n\tfor i, j in zip(band_range[0], band_range[1]):\r\n\t\t# if it is boundary\r\n\t\tif psi[i][j] == 0:\r\n\t\t\text_phi[i][j] = phi[i][j]\r\n\t\t# if it is not boundary\r\n\t\telse:\r\n\t\t\tdistance = np.fabs(psi[i][j])\r\n\t\t\t# find the close front\r\n\t\t\tindex = np.where(dist_mask == distance)\r\n\t\t\tshift_i = int(i - band_width)\r\n\t\t\tshift_j = int(j - band_width)\r\n\t\t\t#index[0] -= band_width\r\n\t\t\t# np.where(psi[index] == 0)\r\n\t\t\tfor ii, jj in zip(index[0], index[1]):\r\n\t\t\t\tif psi[shift_i + ii][shift_j + jj] == 0:\r\n\t\t\t\t\text_phi[i][j] = phi[shift_i + ii][shift_j + jj]\r\n\t\t\t\t\text_phi_gradient[0][i][j] = phi_gradient[0][shift_i + ii][shift_j + jj]\r\n\t\t\t\t\text_phi_gradient[1][i][j] = phi_gradient[1][shift_i + ii][shift_j + jj]\r\n\t\t\t\t\tbreak;\r\n\treturn ext_phi, ext_phi_gradient\r\n\r\n\r\n\r\ndef Evolve(old_psi, phi, phi_gradient):\r\n\tpsi = old_psi.copy()\r\n\tpsi_next = old_psi.copy()\r\n\t# psi_next = (band_width + 1) * np.ones(img_shape)\r\n\tbegin = time.time()\r\n\text_phi, ext_phi_gradient = ExtensionPhi(old_psi, phi, phi_gradient)\r\n\tend = time.time()\r\n\tprint (\"extension cost\", end - begin)\r\n\thit_border = False\r\n\t# delta that satisfies CFL condition\r\n\tdelta_t = 1 / (np.fabs(phi_gradient[1]).max() + np.fabs(phi_gradient[0]).max()) - 0.9\r\n\titeration = 0\r\n\twhile not hit_border:\r\n\t\tbegin = time.time()\r\n\t\tfor i, j in zip(differential_range[0], differential_range[1]):\r\n\t#!!!!!!!!! problem to be fiex: what if the next evolution exceeds the narrow band boundary\r\n\t\t\t# geomatric heat equation item\r\n\t\t\tghe_item = 0\r\n\t\t\tpsi_dx = (psi[i][j + 1] - psi[i][j - 1]) / 2\r\n\t\t\tpsi_dy = (psi[i + 1][j] - psi[i - 1][j]) / 2\r\n\t\t\tif psi_dx != 0 and psi_dy != 0:\r\n\t\t\t\tpsi_dxy = (psi[i + 1][j + 1] - psi[i - 1][j + 1] - psi[i + 1][j - 1] + psi[i - 1][j - 1]) / 4\r\n\t\t\t\tpsi_dxx = psi[i][j + 1] + psi[i][j - 1] - 2 * psi[i][j]\r\n\t\t\t\tpsi_dyy = psi[i + 1][j] + psi[i - 1][j] - 2 * psi[i][j]\r\n\t\t\t\tghe_item = ext_phi[i][j] * (psi_dx * psi_dx * psi_dyy - 2 * psi_dx * psi_dy * psi_dxy + psi_dy * psi_dy * psi_dxx) / (psi_dx * psi_dx + psi_dy * psi_dy)\r\n\r\n\t\t\t# transport pde item\r\n\t\t\ttpde_item = 0\r\n\t\t\text_phi_dx = ext_phi_gradient[1][i][j]\r\n\t\t\text_phi_dy = ext_phi_gradient[0][i][j]\r\n\t\t\t# determin the sign of phi_dx\r\n\t\t\tif (ext_phi_dx > 0):\r\n\t\t\t\tpsi_dx = psi[i][j + 1] - psi[i][j]\r\n\t\t\t\ttpde_item += ext_phi_dx * psi_dx\r\n\t\t\telif ext_phi_dx < 0:\r\n\t\t\t\tpsi_dx = psi[i][j] - psi[i][j - 1]\r\n\t\t\t\ttpde_item += ext_phi_dx * psi_dx\r\n\t\t\t# determin sign of phi_dy\r\n\t\t\tif ext_phi_dy > 0:\r\n\t\t\t\tpsi_dy = psi[i + 1][j] - psi[i][j]\r\n\t\t\t\ttpde_item += ext_phi_dy * psi_dy\r\n\t\t\telif ext_phi_dy < 0:\r\n\t\t\t\tpsi_dy = psi[i][j] - psi[i - 1][j]\r\n\t\t\t\ttpde_item += ext_phi_dy * psi_dy\r\n\r\n\t\t\tdelta = (ghe_item + tpde_item)\r\n\t\t\tpsi_next[i][j] = (psi[i][j] + delta_t * delta)\r\n\t\t\tif (abs(old_psi[i][j]) == band_width - 1 and psi_next[i][j] * old_psi[i][j] <= 0):\r\n\t\t\t\thit_border = True\r\n\t\t# test_show = np.zeros((100, 100), dtype = 'uint8')\r\n\t\t# for i in range(band_width + 1):\r\n\t\t# \ttest_show[np.where((np.fabs(psi_next) >= i - 0.5) & (np.fabs(psi_next) < i + 0.5))] = 255 - i * 40\r\n\t\t# cv2.namedWindow(\"test\", 0)\r\n\t\t# cv2.imshow(\"test\", test_show)\r\n\t\t# cv2.waitKey(0)\r\n\t\tif hit_border:\r\n\t\t\tbreak\r\n\t\tpsi_next, psi = psi, psi_next\r\n\t\t# iteration += 1\r\n\t\tend = time.time()\r\n\t\tprint(\"Iteration cost\", end - begin)\r\n\r\n\r\n\tprobable_front_index = np.where((psi_next >= 0) & (psi_next < band_width))\r\n\t# for writing convinience\r\n\ta = probable_front_index\r\n\tindex_of_a = np.where((psi_next[(a[0] + cross_mask[0][0], a[1] + cross_mask[1][0])] < 0) | (psi_next[(a[0] + cross_mask[0][1], a[1] + cross_mask[1][1])] < 0)\\\r\n\t\t\t | (psi_next[(a[0] + cross_mask[0][2], a[1] + cross_mask[1][2])] < 0) | (psi_next[(a[0] + cross_mask[0][3], a[1] + cross_mask[1][3])] < 0))\r\n\tglobal zero_set\r\n\tzero_set = (a[0][index_of_a], a[1][index_of_a])\r\n\t# psi_next = np.round(psi_next)\r\n\t# for i, j in zip(differential_range[0], differential_range[1]):\r\n\t# \tif psi_next[i][j] == 0:\r\n\t# \t\tzero_set_i.append(i)\r\n\t# \t\tzero_set_j.append(j)\r\n\r\n\t# global zero_set\r\n\t# zero_set = (np.array(zero_set_i), np.array(zero_set_j))\r\n\treturn psi_next\r\n\r\n\r\n\r\n\r\n\r\n\r\n########### initialize interface ###########\r\ninitial_img = Initial_img()\r\ninitial_img.in_img = cv2.imread(\"test.png\")\r\ngray_img = cv2.cvtColor(initial_img.in_img, cv2.COLOR_BGR2GRAY)\r\ninitial_img.window_name = \"Input Image\"\r\n# cv2.namedWindow(initial_img.window_name, 0)\r\n# cv2.imshow(initial_img.window_name, initial_img.in_img)\r\n# cv2.setMouseCallback(initial_img.window_name, onMouse, initial_img)\r\n# while True:\r\n# \tif cv2.waitKey(0) == 13:\r\n# \t\tbreak\r\n# cv2.destroyAllWindows()\r\nimg_shape = gray_img.shape\r\n# print(initial_img.up_left, initial_img.bot_right)\r\n\r\ndist_mask = DistMaskMatrix(band_width)\r\n# lookup_mask_list = LookupMaskList(band_width)\r\n\r\n# initial_img.up_left = [391, 33]\r\n# initial_img.bot_right = [897, 887]\r\ninitial_img.up_left = [14, 15]\r\ninitial_img.bot_right = [83, 81]\r\n# \\psi\r\n\r\nbegin = time.time()\r\nunmasked_psi = InitNarrowBand(band_width, initial_img.up_left, initial_img.bot_right)\r\nend = time.time()\r\nprint(\"cost\", end - begin)\r\n# unmasked_psi = InitNarrowBand1(band_width, zero_set)\r\niteration = 0\r\n\r\n############# get phi ##############\r\nimg_gradient = np.gradient(gray_img)\r\nphi = 1 / (1 + img_gradient[0] ** 2 + img_gradient[1] ** 2)\r\nphi_gradient = np.gradient(phi)\r\n# # phi_dx = the second ite\r\n# print(np.max(phi_gradient[1]), np.max(phi_gradient[0]))\r\n\r\nwhile (iteration < 500):\r\n\t# interface_start = initial_img.up_left\r\n\t# store signs info\r\n\tsdf_mask = InitSDF(unmasked_psi)\r\n\r\n\tmasked_psi = InitLevelSet(unmasked_psi, sdf_mask)\r\n\r\n\r\n\t############# evolution #############\r\n\t# psi_next = Evolve(masked_psi, phi, phi_gradient)\r\n\r\n\tEvolve(masked_psi, phi, phi_gradient)\r\n\tunmasked_psi = InitNarrowBand1(band_width, zero_set)\r\n\r\n\tshape = gray_img.shape\r\n\ttest_show = gray_img.copy()\r\n\t# # a = np.where(masked_psi != 0)\r\n\t# a = np.where((masked_psi > 0) & (masked_psi <=band_width) )\r\n\t# test_show[a] = 200\r\n\t# test_show[np.where(masked_psi == 0)] = 0\r\n\t# test_show[np.where(masked_psi > band_width)] = 0\r\n\t# test_show[np.where((masked_psi >= -band_width) & (masked_psi < 0))] = 100\r\n\r\n\t# for i in range(band_width + 1):\r\n\ttest_show[np.where(np.fabs(masked_psi) == 0)] = 125\r\n\tcv2.namedWindow(\"test\", 0)\r\n\tcv2.imshow(\"test\", test_show)\r\n\tcv2.waitKey(1)\r\n\r\n\t# for i in range(band_width + 1):\r\n\ttest_show[np.where(np.fabs(unmasked_psi) == 0)] = 125\r\n\t# test_show[np.where(psi_next == 0)] = 0\r\n\t# test_show[np.where((psi_next > 0) & (psi_next <= band_width))] = 200\r\n\t# test_show[np.where((psi_next < 0) & (psi_next >= -band_width))] = 100\r\n\t# test_show[np.where(sdf_mask < 0)] = 125\r\n\tcv2.namedWindow(\"test\", 0)\r\n\tcv2.imshow(\"test\", test_show)\r\n\tcv2.waitKey(1)\r\n\r\n\titeration += 1\r\n\tprint(iteration)\r\n\r\n","sub_path":"levelsets.py","file_name":"levelsets.py","file_ext":"py","file_size_in_byte":14124,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"209569156","text":"import numpy as np\nimport math\nimport scipy\nfrom scipy import optimize\nimport matplotlib\nfrom matplotlib import gridspec\nfrom matplotlib import rc\nfrom matplotlib import cm\nimport matplotlib.pyplot as plt\nimport pandas as pd\n\n\n########## Load data ####################################\n\nmean_rep = pd.read_csv('data/Subtracted_spectrum.csv')\n\n\n# Define the optimal parameters for dE1 = 1.8 eV #\n# [ amp, E_bg, b ] #\n\namp = 2717\npars = [amp, 1.59, 1.257]\npars_high = [amp, 1.59, (1.26 + 0.3)]\npars_low = [amp, 1.59, (1.26 - 0.74)]\nnbins = 493\n\n\nrescolors = plt.rcParams['axes.prop_cycle'].by_key()['color']\n\n########################################################################\n\ndef bandgap(x, amp, BG,b):\n return amp * (x - BG)**(b)\n\n########################################################################\n\n\nnrows, ncols = 1,1\ngs = matplotlib.gridspec.GridSpec(nrows,ncols)\nplt.figure(figsize=(ncols*9,nrows*7))\n\ncm_subsection = np.linspace(0,1,24) \ncolors = [cm.viridis(x) for x in cm_subsection]\n\nhfont = rc('font',**{'family':'sans-serif','sans-serif':['Sans Serif']})\namp = 2700\npars = [amp, 1.59, 1.257]\npars_high = [amp, 1.59, (1.26 + 0.3)]\npars_low = [amp, 1.59, (1.26 - 0.74)]\n \nfor i in range(1):\n ax = plt.subplot(gs[i])\n ax.set_xlim([1.3,4.5])\n ax.set_ylim([-1e1,4.5e3])\n ax.set_ylabel('Intensity (a.u.)', fontsize=26)\n ax.set_xlabel('Energy loss (eV)', fontsize=26)\n \n ax.tick_params(which='major',direction='in',length=10, labelsize=16)\n ax.tick_params(which='minor',length=10)\n ax.set_xticks([1.5, 2, 2.5, 3, 3.5 , 4, 4.5])\n \n p1=ax.plot(np.linspace(-.3, 12, nbins), mean_rep['spectrum14'], 'k--',ls=\"dashdot\",lw=2)\n\n \n p2=ax.plot(np.linspace(-.3, 12, nbins), mean_rep['match14_median'], color=rescolors[1],ls=\"dashed\",lw=2)\n ax.fill_between(np.linspace(-.3, 12, nbins), mean_rep['match14_low'], mean_rep['match14_high'], \\\n color=rescolors[1], alpha=.2)\n p2b=ax.fill(np.NaN,np.NaN,color=rescolors[1],alpha=0.2)\n \n p3 = ax.plot(np.linspace(-.3, 12, nbins), mean_rep['dif14_median'], 'k-',color=rescolors[0],lw=2)\n ax.fill_between(np.linspace(-.3, 12, nbins), mean_rep['dif14_low'], mean_rep['dif14_high'], color=rescolors[0], alpha=.2)\n p3b=ax.fill(np.NaN,np.NaN,color=rescolors[0],alpha=0.2)\n ax.set_yticks([])\n\n ax.legend([(p1[0]),(p3[0],p3b[0]),(p2[0],p2b[0])],['sp14 (orig)','sp14 (subtr)',\"ZLP\"],loc='upper right', fontsize = 19)\n \n axins = ax.inset_axes([0.55, 0.10, 0.43, 0.40])\n \n axins.get_xaxis().set_visible(True)\n axins.get_yaxis().set_visible(True)\n axins.spines['right'].set_visible(True)\n axins.spines['top'].set_visible(True)\n axins.set_xticks([1, 2, 3, 4])\n axins.set_xlim([1,3.02])\n axins.set_ylim([-1e3, 5e3])\n \n p1=axins.plot(np.linspace(-.3, 12, nbins), mean_rep['dif14_median'], 'k-', alpha=.8, label='sp14 (subtr)',color=rescolors[0])\n axins.fill_between(np.linspace(-.3, 12, nbins), mean_rep['dif14_low'], \\\n mean_rep['dif14_high'], color=rescolors[0],alpha=0.2,lw=2)\n p1b=axins.fill(np.NaN,np.NaN,color=rescolors[0],alpha=0.2)\n \n \n x = np.linspace(1.5, 2.7, 100)\n p2=axins.plot(x, bandgap(x, *pars), label='Model fit',color=rescolors[2],lw=2,ls=\"dashed\")\n axins.fill_between(x, bandgap(x, *pars_low), \\\n bandgap(x, *pars_high), color=rescolors[2], alpha=.2)\n p2b=axins.fill(np.NaN,np.NaN,color=rescolors[2],alpha=0.2)\n axins.tick_params(which='both',direction='in', labelsize=12,right=True)\n axins.tick_params(which='major',length=10)\n axins.tick_params(which='minor',length=10)\n axins.set_yticks([])\n axins.legend([(p1[0],p1b[0]),(p2[0],p2b[0])],['sp14 (subtr)','Model fit'],loc='upper left', fontsize=15)\n \n\n \n \n\nplt.tight_layout()\nplt.savefig('SubtractedEELS_plot_sp14.pdf')\nprint(\"Saved fig = SubtractedEELS_plot_sp14.pdf\")\n","sub_path":"paper_WS2/Subtractionplots.py","file_name":"Subtractionplots.py","file_ext":"py","file_size_in_byte":3893,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"390265818","text":"class Solution(object):\n def findNumbers(self, nums):\n \"\"\"\n :type nums: List[int]\n :rtype: int\n \"\"\"\n digits = []\n count = 0\n for i in nums:\n while (i > 0):\n i = i//10\n count += 1\n digits.append(count)\n count = 0\n \n return len(list(filter(lambda a: a%2 == 0, digits)))\n","sub_path":"Python/1295_Find_Numbers_with_Even_Number_of_Digits.py","file_name":"1295_Find_Numbers_with_Even_Number_of_Digits.py","file_ext":"py","file_size_in_byte":405,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"282044678","text":"# Copyright 2014 IBM Corp.\n# All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nimport copy\n\nfrom tempest import auth\nfrom tempest.common import http\nfrom tempest import config\nfrom tempest import exceptions\nfrom tempest.openstack.common.fixture import mockpatch\nfrom tempest.tests import base\nfrom tempest.tests import fake_config\nfrom tempest.tests import fake_http\nfrom tempest.tests import fake_identity\n\n\nclass BaseAuthTestsSetUp(base.TestCase):\n _auth_provider_class = None\n credentials = {\n 'username': 'fake_user',\n 'password': 'fake_pwd',\n 'tenant_name': 'fake_tenant'\n }\n\n def _auth(self, credentials, **params):\n \"\"\"\n returns auth method according to keystone\n \"\"\"\n return self._auth_provider_class(credentials, **params)\n\n def setUp(self):\n super(BaseAuthTestsSetUp, self).setUp()\n self.stubs.Set(config, 'TempestConfigPrivate', fake_config.FakeConfig)\n self.fake_http = fake_http.fake_httplib2(return_type=200)\n self.stubs.Set(http.ClosingHttp, 'request', self.fake_http.request)\n self.auth_provider = self._auth(self.credentials)\n\n\nclass TestBaseAuthProvider(BaseAuthTestsSetUp):\n \"\"\"\n This tests auth.AuthProvider class which is base for the other so we\n obviously don't test not implemented method or the ones which strongly\n depends on them.\n \"\"\"\n _auth_provider_class = auth.AuthProvider\n\n def test_check_credentials_is_dict(self):\n self.assertTrue(self.auth_provider.check_credentials({}))\n\n def test_check_credentials_bad_type(self):\n self.assertFalse(self.auth_provider.check_credentials([]))\n\n def test_instantiate_with_bad_credentials_type(self):\n \"\"\"\n Assure that credentials with bad type fail with TypeError\n \"\"\"\n self.assertRaises(TypeError, self._auth, [])\n\n def test_auth_data_property(self):\n self.assertRaises(NotImplementedError, getattr, self.auth_provider,\n 'auth_data')\n\n def test_auth_data_property_when_cache_exists(self):\n self.auth_provider.cache = 'foo'\n self.useFixture(mockpatch.PatchObject(self.auth_provider,\n 'is_expired',\n return_value=False))\n self.assertEqual('foo', getattr(self.auth_provider, 'auth_data'))\n\n def test_delete_auth_data_property_through_deleter(self):\n self.auth_provider.cache = 'foo'\n del self.auth_provider.auth_data\n self.assertIsNone(self.auth_provider.cache)\n\n def test_delete_auth_data_property_through_clear_auth(self):\n self.auth_provider.cache = 'foo'\n self.auth_provider.clear_auth()\n self.assertIsNone(self.auth_provider.cache)\n\n def test_set_and_reset_alt_auth_data(self):\n self.auth_provider.set_alt_auth_data('foo', 'bar')\n self.assertEqual(self.auth_provider.alt_part, 'foo')\n self.assertEqual(self.auth_provider.alt_auth_data, 'bar')\n\n self.auth_provider.reset_alt_auth_data()\n self.assertIsNone(self.auth_provider.alt_part)\n self.assertIsNone(self.auth_provider.alt_auth_data)\n\n\nclass TestKeystoneV2AuthProvider(BaseAuthTestsSetUp):\n _auth_provider_class = auth.KeystoneV2AuthProvider\n\n def setUp(self):\n super(TestKeystoneV2AuthProvider, self).setUp()\n self.stubs.Set(http.ClosingHttp, 'request',\n fake_identity._fake_v2_response)\n self.target_url = 'test_api'\n\n def _get_fake_alt_identity(self):\n return fake_identity.ALT_IDENTITY_V2_RESPONSE['access']\n\n def _get_result_url_from_fake_identity(self):\n return fake_identity.COMPUTE_ENDPOINTS_V2['endpoints'][1]['publicURL']\n\n def _get_token_from_fake_identity(self):\n return fake_identity.TOKEN\n\n def _test_request_helper(self):\n filters = {\n 'service': 'compute',\n 'endpoint_type': 'publicURL',\n 'region': 'fakeRegion'\n }\n\n url, headers, body = self.auth_provider.auth_request('GET',\n self.target_url,\n filters=filters)\n\n result_url = self._get_result_url_from_fake_identity()\n self.assertEqual(url, result_url + '/' + self.target_url)\n self.assertEqual(self._get_token_from_fake_identity(),\n headers['X-Auth-Token'])\n self.assertEqual(body, None)\n\n def test_request(self):\n self._test_request_helper()\n\n def test_request_with_alt_auth(self):\n self.auth_provider.set_alt_auth_data(\n 'body',\n (fake_identity.ALT_TOKEN, self._get_fake_alt_identity()))\n self._test_request_helper()\n # Assert alt auth data is clear after it\n self.assertIsNone(self.auth_provider.alt_part)\n self.assertIsNone(self.auth_provider.alt_auth_data)\n\n def test_request_with_bad_service(self):\n filters = {\n 'service': 'BAD_SERVICE',\n 'endpoint_type': 'publicURL',\n 'region': 'fakeRegion'\n }\n self.assertRaises(exceptions.EndpointNotFound,\n self.auth_provider.auth_request, 'GET',\n 'http://fakeurl.com/fake_api', filters=filters)\n\n def test_request_without_service(self):\n filters = {\n 'service': None,\n 'endpoint_type': 'publicURL',\n 'region': 'fakeRegion'\n }\n self.assertRaises(exceptions.EndpointNotFound,\n self.auth_provider.auth_request, 'GET',\n 'http://fakeurl.com/fake_api', filters=filters)\n\n def test_check_credentials_missing_attribute(self):\n for attr in ['username', 'password']:\n cred = copy.copy(self.credentials)\n del cred[attr]\n self.assertFalse(self.auth_provider.check_credentials(cred))\n\n def test_check_credentials_not_scoped_missing_tenant_name(self):\n cred = copy.copy(self.credentials)\n del cred['tenant_name']\n self.assertTrue(self.auth_provider.check_credentials(cred,\n scoped=False))\n\n def test_check_credentials_missing_tenant_name(self):\n cred = copy.copy(self.credentials)\n del cred['tenant_name']\n self.assertFalse(self.auth_provider.check_credentials(cred))\n\n\nclass TestKeystoneV3AuthProvider(TestKeystoneV2AuthProvider):\n _auth_provider_class = auth.KeystoneV3AuthProvider\n credentials = {\n 'username': 'fake_user',\n 'password': 'fake_pwd',\n 'tenant_name': 'fake_tenant',\n 'domain_name': 'fake_domain_name',\n }\n\n def setUp(self):\n super(TestKeystoneV3AuthProvider, self).setUp()\n self.stubs.Set(http.ClosingHttp, 'request',\n fake_identity._fake_v3_response)\n\n def _get_fake_alt_identity(self):\n return fake_identity.ALT_IDENTITY_V3['token']\n\n def _get_result_url_from_fake_identity(self):\n return fake_identity.COMPUTE_ENDPOINTS_V3['endpoints'][1]['url']\n\n def test_check_credentials_missing_tenant_name(self):\n cred = copy.copy(self.credentials)\n del cred['domain_name']\n self.assertFalse(self.auth_provider.check_credentials(cred))\n","sub_path":"tempest/tests/test_auth.py","file_name":"test_auth.py","file_ext":"py","file_size_in_byte":7890,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"350815274","text":"# -*- coding: utf-8 -*-\n# /usr/bin/python2\n'''\nBy kyubyong park. kbpark.linguist@gmail.com.\nhttps://www.github.com/kyubyong/dc_tts\n'''\n#from hyperparams import Hyperparams as hp\nimport numpy as np\nimport librosa\nimport librosa.display\nimport copy\nimport matplotlib\nimport matplotlib.pyplot as plt\nfrom scipy import signal\nfrom scipy.signal import get_window\nimport os\nimport yaml\nfrom .attrdict import Config\nimport soundfile as sf\n# r9r9 preprocessing\nimport lws\nfrom typing import Dict, List, Optional, Tuple, Union, Callable\n\n\ndef plot_spectrogram(mag, save=''):\n librosa.display.specshow(mag, x_axis='off')\n plt.title('spectrogram')\n if save != '':\n plt.savefig(save, format='jpg')\n else:\n plt.show()\n\n\ndef _butter_highpass(cutoff, fs:int, order:Optional[int]=5):\n nyq = 0.5 * fs\n normal_cutoff = cutoff / nyq\n b, a = signal.butter(order, normal_cutoff, btype='high', analog=False)\n return b, a\n\ndef _pySTFT(x:np.ndarray, n_fft:Optional[int]=1024, hop_length:Optional[int]=256):\n\n x = np.pad(x, int(n_fft//2), mode='reflect')\n\n noverlap = n_fft - hop_length\n shape = x.shape[:-1]+((x.shape[-1]-noverlap)//hop_length, n_fft)\n strides = x.strides[:-1]+(hop_length*x.strides[-1], x.strides[-1])\n result = np.lib.stride_tricks.as_strided(x, shape=shape,\n strides=strides)\n\n fft_window = get_window('hann', n_fft, fftbins=True)\n result = np.fft.rfft(fft_window * result, n=n_fft).T\n\n return np.abs(result)\n\n\n\nclass Dsp():\n def __init__(self, config: Optional[Union[str, Config]] = 'config/dsp.yaml'):\n self.load_config(config)\n self._build_mel_basis()\n self._build_processor()\n self.b, self.a = _butter_highpass(30, self.hparams.sample_rate, order=5)\n\n def load_config(self, config: Union[str, Config]):\n if isinstance(config, str):\n self.hparams = Config.yaml_load(config)\n elif isinstance(config, Config):\n self.hparams = config\n\n def load_wav(self, path: 'PathLike[any]', trim=True):\n try:\n x, sr = sf.read(path)\n signed_int16_max = 2**15\n if x.dtype == np.int16:\n x = x.astype(np.float32) / signed_int16_max\n if sr != self.hparams.sample_rate:\n x = librosa.resample(x, sr, self.hparams.sample_rate)\n if trim:\n x, _ = librosa.effects.trim(x, top_db=15)\n x = np.clip(x, -1.0, 1.0)\n y = signal.filtfilt(self.b, self.a, x)\n return y\n except:\n print(f'Error: {path} is an invalid wavefile.')\n return -1\n\n def save_wav(self, wav: np.ndarray, path: 'PathLike[any]'):\n wav = wav * 32767 / max(0.01, np.max(np.abs(wav)))\n sf.write(path, wav.astype(np.int16), self.hparams.sample_rate)\n\n def spectrogram(self, y):\n D = self.processor.stft(self._preemphasis(y)).T\n S = self._amp_to_db(np.abs(D)) - self.hparams.ref_level_db\n return self._normalize(S).astype(np.float32)\n\n def inv_spectrogram(self, spectrogram):\n '''Converts spectrogram to waveform using librosa'''\n S = self._db_to_amp(self._denormalize(spectrogram) + self.hparams.ref_level_db) # Convert back to linear\n D = self.processor.run_lws(S.astype(np.float64).T ** self.hparams.power)\n y = self.processor.istft(D).astype(np.float32)\n return self._inv_preemphasis(y)\n\n def melspectrogram(self, y:[np.ndarray]):\n y = self._preemphasis(y)\n D = _pySTFT(y, n_fft=self.hparams.fft_size, hop_length=self.hparams.hop_size)\n S = self._amp_to_db(self._linear_to_mel(D)) - self.hparams.ref_level_db\n if not self.hparams.allow_clipping_in_normalization:\n assert ret.max() <= 0 and ret.min() - self.hparams.min_level_db >= 0\n return self._normalize(S).astype(np.float32)\n\n\n def inv_melspectrogram(self, melspectrogram):\n '''Converts spectrogram to waveform using librosa'''\n S = self._db_to_amp(self._denormalize(melspectrogram) + self.hparams.ref_level_db) # Convert back to linear\n S = self._mel_to_linear(S)\n y = self._griffin_lim(S)\n return self._inv_preemphasis(y)\n\n def melspectrogram2wav(self, melspectrogram, save=None):\n y = self.inv_melspectrogram(melspectrogram)\n if save is not None:\n self.save_wav(y, save)\n return y\n\n def _build_mel_basis(self):\n if self.hparams.fmax is not None:\n assert self.hparams.fmax <= self.hparams.sample_rate // 2\n self._mel_basis = librosa.filters.mel(self.hparams.sample_rate, self.hparams.fft_size,\n fmin=self.hparams.fmin, fmax=self.hparams.fmax,\n n_mels=self.hparams.num_mels)\n\n def _linear_to_mel(self, spectrogram):\n return np.dot(self._mel_basis, spectrogram)\n\n def _mel_to_linear(self, melspectrogram):\n def _mel_to_linear_matrix():\n m_t = np.transpose(self._mel_basis)\n p = np.matmul(self._mel_basis, m_t)\n d = [1.0 / x if np.abs(x) > 1.0e-8 else x for x in np.sum(p, axis=0)]\n return np.matmul(m_t, np.diag(d))\n m = _mel_to_linear_matrix()\n mag = np.dot(m, melspectrogram)\n return mag\n\n def _preemphasis(self, x):\n from nnmnkwii.preprocessing import preemphasis\n return preemphasis(x, self.hparams.preemphasis)\n\n def _inv_preemphasis(self, x):\n from nnmnkwii.preprocessing import inv_preemphasis\n return inv_preemphasis(x, self.hparams.preemphasis)\n\n def _amp_to_db(self, x):\n min_level = np.exp(self.hparams.min_level_db / 20 * np.log(10))\n return 20 * np.log10(np.maximum(min_level, x))\n\n def _db_to_amp(self, x):\n return np.power(10.0, x * 0.05)\n\n def _normalize(self, S):\n return np.clip((S - self.hparams.min_level_db) / -self.hparams.min_level_db, 0, 1)\n\n def _denormalize(self, S):\n return (np.clip(S, 0, 1) * -self.hparams.min_level_db) + self.hparams.min_level_db\n\n def _build_processor(self):\n self.processor = lws.lws(self.hparams.fft_size, self.hparams.hop_size, mode=\"speech\")\n\n\n def _griffin_lim(self, melspectrogram):\n '''Applies Griffin-Lim's raw.\n '''\n X_best = copy.deepcopy(melspectrogram)\n for i in range(100):\n X_t = librosa.istft(X_best, self.hparams.hop_size)\n est = librosa.stft(X_t, self.hparams.fft_size, hop_length=self.hparams.hop_size)\n phase = est / np.maximum(1e-8, np.abs(est))\n X_best = melspectrogram * phase\n X_t = librosa.istft(X_best, self.hparams.hop_size)\n y = np.real(X_t)\n\n return y\n\n\n\n","sub_path":"dsp.py","file_name":"dsp.py","file_ext":"py","file_size_in_byte":6731,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"128185811","text":"\nimport pygame\n\nfrom lib.Boards.Board import Board\nfrom lib.Actors.Player import Player\nfrom lib.Actors.Fruit import Fruit\n\n\nclass Scoreboard(Board):\n\t\n\tdef __init__(self, settings, screen, width, height, game):\n\t\t\n\t\tsuper().__init__(settings, screen, width, height, game)\n\t\t\n\t\tself.font_bold = pygame.font.SysFont(None, self.calculate_font_height(), True)\n\t\tself.font = pygame.font.SysFont(None, self.calculate_font_height())\n\t\t\n\t\tself.level = 0\n\t\t\n\t\tself.score = 0\n\t\tself.high_score = 0\n\t\t\n\t\tself.player_dummy = None\n\t\tself.initialize_dummy_player()\n\t\t\n\t\tself.lives_remaining = None\n\t\tself.set_lives_remaining(self.settings.default_extra_lives)\n\t\t\n\t\tself.fruit_dummy = None\n\t\tself.fruit_count = 3\n\t\tself.initialize_dummy_fruit()\n\t\n\tdef adjust_level(self, n):\n\t\t\n\t\tself.set_level(self.level + n)\n\t\n\tdef set_level(self, n):\n\t\n\t\tself.level = n\n\t\t\n\t\tself.dirty()\n\t\n\tdef get_level(self):\n\t\t\n\t\treturn self.level\n\t\n\tdef adjust_fruit_count(self, n):\n\t\t\n\t\tself.set_fruit_count(self.fruit_count + n)\n\t\n\tdef set_fruit_count(self, n):\n\t\t\n\t\tif n < 0:\n\t\t\tn = 0\n\t\t\n\t\tself.fruit_count = n\n\t\n\tdef calculate_font_height(self):\n\t\t\n\t\twidth, height = self.get_effective_dimensions()\n\t\t\n\t\tfont_h = height\n\t\tfont_h -= (self.settings.scoreboard_padding * 2)\n\t\tfont_h /= 2.0\n\t\t\n\t\tfont_h = int(font_h)\n\t\t\n\t\treturn font_h\n\t\n\tdef initialize_dummy_player(self):\n\t\t\n\t\tself.player_dummy = Player(\n\t\t\tself.settings,\n\t\t\tself.settings.scoreboard_player_size,\n\t\t\tself.game,\n\t\t\tNone, self.image,\n\t\t\tself\n\t\t)\n\t\t\n\t\tself.player_dummy.set_can_move(False)\n\t\tself.player_dummy.disable_fruit_eating()\n\t\t\n\t\tanimation = self.player_dummy.get_animation()\n\t\tanimation.set_fps(self.settings.scoreboard_player_fps)\n\t\n\tdef initialize_dummy_fruit(self):\n\t\t\n\t\tself.fruit_dummy = Fruit(\n\t\t\tself.settings, None,\n\t\t\t\"Fruit\", self.settings.scoreboard_player_size,\n\t\t\tself.game, None, self.image\n\t\t)\n\t\t\n\t\tself.fruit_dummy.set_can_move(False)\n\t\t\n\t\tanimation = self.fruit_dummy.get_animation()\n\t\tanimation.set_fps(self.settings.scoreboard_fruit_fps)\n\t\n\tdef adjust_lives_remaining(self, n):\n\t\t\n\t\treturn self.set_lives_remaining(self.lives_remaining + n)\n\t\t\n\tdef set_lives_remaining(self, n):\n\t\t\n\t\tif n < 0:\n\t\t\tn = 0\n\t\t\n\t\tself.lives_remaining = n\n\t\t\n\t\tself.dirty()\n\t\t\n\t\treturn self.lives_remaining\n\t\n\tdef get_lives_remaining(self):\n\t\t\n\t\treturn self.lives_remaining\n\t\n\tdef adjust_score(self, p):\n\t\n\t\tself.set_score(self.score + p)\n\t\n\tdef set_score(self, p):\n\t\t\n\t\tself.score = p\n\t\t\n\t\tself.check_high_score()\n\t\t\n\t\tself.dirty()\n\t\n\tdef get_score(self):\n\t\t\n\t\treturn self.score\n\t\n\tdef check_high_score(self):\n\t\n\t\tif self.score > self.high_score:\n\t\t\n\t\t\tself.high_score = self.score\n\t\t\n\t\tself.dirty()\n\t\n\tdef get_high_score(self):\n\t\t\n\t\treturn self.high_score\n\t\n\tdef update(self, elapsed_ms):\n\t\n\t\tsuper().update(elapsed_ms)\n\t\t\n\t\tself.player_dummy.update(elapsed_ms)\n\t\tself.fruit_dummy.update(elapsed_ms)\n\t\n\tdef draw(self, force=False):\n\t\t\n\t\tif self.image and (not self.is_dirty()) and (not force):\n\t\t\treturn\n\t\t\n\t\timage = self.create_image_surface()\n\t\t\n\t\t# Fill with background\n\t\timage.fill(self.settings.scoreboard_background_color)\n\t\t\n\t\t#\n\t\tself.draw_score(image)\n\t\tself.draw_lives(image)\n\t\tself.draw_level(image)\n\t\t\n\t\tself.image = image\n\t\n\tdef draw_score(self, image):\n\t\t\n\t\tif not image:\n\t\t\timage = self.image\n\t\t\n\t\timage_rect = image.get_rect()\n\t\t\n\t\t# Draw the word \"Score\", lol\n\t\tscore_word_image = self.font_bold.render(\n\t\t\t\"Score: \", True,\n\t\t\tself.settings.scoreboard_font_color,\n\t\t\tself.settings.scoreboard_background_color\n\t\t)\n\t\tscore_word_image_rect = score_word_image.get_rect()\n\t\t\n\t\t# Draw the actual score\n\t\tscore_image = self.font.render(\n\t\t\tstr(round(self.get_score())), True,\n\t\t\tself.settings.scoreboard_font_color,\n\t\t\tself.settings.scoreboard_background_color\n\t\t)\n\t\tscore_image_rect = score_image.get_rect()\n\t\t\n\t\t# Blit the word score\n\t\ty = int((image_rect.height - score_word_image_rect.height) / 2)\n\t\tx = self.settings.scoreboard_padding\n\t\timage.blit(\n\t\t\tscore_word_image,\n\t\t\t(x, y)\n\t\t)\n\t\tx += score_word_image_rect.width\n\t\t\n\t\t# Blit the actual score\n\t\ty = int((image_rect.height - score_image_rect.height) / 2)\n\t\tx += self.settings.scoreboard_padding\n\t\timage.blit(\n\t\t\tscore_image,\n\t\t\t(x, y)\n\t\t)\n\t\n\tdef draw_lives(self, image):\n\t\t\n\t\tif not image:\n\t\t\timage = self.image\n\t\t\n\t\timage_rect = image.get_rect()\n\t\t\n\t\t# Draw the word \"Lives\"\n\t\timage_word = self.font_bold.render(\n\t\t\t\"Lives: \", True,\n\t\t\tself.settings.scoreboard_font_color,\n\t\t\tself.settings.scoreboard_background_color\n\t\t)\n\t\timage_word_rect = image_word.get_rect()\n\t\t\n\t\t# Blit the word \"Lives\"\n\t\tx = int((image_rect.width / 2) - image_word_rect.width)\n\t\ty = int((image_rect.height - image_word_rect.height) / 2)\n\t\timage.blit(\n\t\t\timage_word,\n\t\t\t(x, y)\n\t\t)\n\t\tx += image_word_rect.width\n\t\t\n\t\t# Draw player icons for lives remaining\n\t\tfor i in range(self.lives_remaining):\n\t\t\t\n\t\t\tanimation = self.player_dummy.get_animation()\n\t\t\tif not animation:\n\t\t\t\traise Exception(\"Animation isn't ever supposed to be None here\")\n\t\t\tanimation_image = animation.get_image()\n\t\t\tif not animation_image:\n\t\t\t\traise Exception(\"Animation image isn't ever supposed to be None here\")\n\t\t\tanimation_image_rect = animation_image.get_rect()\n\t\t\t\n\t\t\ty = int((image_rect.height / 2))\n\t\t\tx += self.settings.scoreboard_padding * 2\n\t\t\tself.player_dummy.set_screen_location(x, y)\n\t\t\tself.player_dummy.blitme(image)\n\t\t\tx += animation_image_rect.width\n\t\n\tdef draw_level(self, image):\n\t\t\n\t\tif not image:\n\t\t\timage = self.image\n\t\t\n\t\timage_rect = image.get_rect()\n\t\t\n\t\t# Draw the word \"Level\"\n\t\timage_level_word = self.font_bold.render(\n\t\t\t\"Level: \", True,\n\t\t\tself.settings.scoreboard_font_color,\n\t\t\tself.settings.scoreboard_background_color\n\t\t)\n\t\timage_level_word_rect = image_level_word.get_rect()\n\t\t\n\t\t# Draw the actual level number\n\t\timage_level = self.font.render(\n\t\t\tstr(self.level), True,\n\t\t\tself.settings.scoreboard_font_color,\n\t\t\tself.settings.scoreboard_background_color\n\t\t)\n\t\timage_level_rect = image_level.get_rect()\n\t\t\n\t\t# Grab the fruit animation image\n\t\tfruit_animation = self.fruit_dummy.get_animation()\n\t\tif not fruit_animation:\n\t\t\traise Exception(\"Animation isn't ever supposed to be None here\")\n\t\tfruit_animation_image = fruit_animation.get_image()\n\t\tif not fruit_animation_image:\n\t\t\traise Exception(\"Animation image isn't ever supposed to be None here\")\n\t\tfruit_animation_image_rect = fruit_animation_image.get_rect()\n\t\t\n\t\t# Start X and Y positions\n\t\tx = image_rect.width\n\t\tx -= self.settings.scoreboard_padding\n\t\tx -= image_level_word_rect.width\n\t\tx -= image_level_rect.width\n\t\tx -= self.settings.scoreboard_padding\n\t\tx -= (self.fruit_count * fruit_animation_image_rect.width)\n\t\tx -= (self.fruit_count * self.settings.scoreboard_padding)\n\t\ty_left = int(\n\t\t\timage_rect.height - max(\n\t\t\t\timage_level_rect.height,\n\t\t\t\timage_level_word_rect.height,\n\t\t\t\tfruit_animation_image_rect.height\n\t\t\t)\n\t\t)\n\t\ty = int(y_left / 2)\n\t\t\n\t\t# Blit the word \"Level\"\n\t\timage.blit(image_level_word, (x, y))\n\t\tx += image_level_word_rect.width\n\t\t\n\t\t# Blit the level number\n\t\tx += self.settings.scoreboard_padding\n\t\timage.blit(image_level, (x, y))\n\t\tx += image_level_rect.width\n\t\t\n\t\t# Blit the Fruit icons for fruit remaining\n\t\tfor i in range(self.fruit_count):\n\t\t\t\n\t\t\tx += self.settings.scoreboard_padding\n\t\t\tself.fruit_dummy.set_screen_location(\n\t\t\t\tint(x + (fruit_animation_image_rect.width / 2)),\n\t\t\t\tint(y + (fruit_animation_image_rect.height / 2))\n\t\t\t)\n\t\t\tself.fruit_dummy.blitme(image)\n\t\t\tx += fruit_animation_image_rect.width\n","sub_path":"app/lib/Boards/Scoreboard.py","file_name":"Scoreboard.py","file_ext":"py","file_size_in_byte":7387,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"511015658","text":"# Copyright 2018 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\nimport kfp.dsl as dsl\nimport kfp.gcp as gcp\n\nimport datetime\n\n\n@dsl.pipeline(\n name='Workflow 2',\n description='Demonstrate TFT-based feature processing, TFMA, TFJob, BQ ingestion, and CMLE OP'\n)\ndef workflow2(\n input_handle_eval: dsl.PipelineParam=dsl.PipelineParam(name='input-handle-eval', value='bigquery-public-data.chicago_taxi_trips.taxi_trips'),\n input_handle_train: dsl.PipelineParam=dsl.PipelineParam(name='input-handle-train', value='bigquery-public-data.chicago_taxi_trips.taxi_trips'),\n outfile_prefix_eval: dsl.PipelineParam=dsl.PipelineParam(name='outfile-prefix-eval', value='eval_transformed'),\n outfile_prefix_train: dsl.PipelineParam=dsl.PipelineParam(name='outfile-prefix-train', value='train_transformed'),\n train_steps: dsl.PipelineParam=dsl.PipelineParam(name='train-steps', value=10000),\n project: dsl.PipelineParam=dsl.PipelineParam(name='project', value='YOUR_PROJECT_HERE'),\n working_dir: dsl.PipelineParam=dsl.PipelineParam(name='working-dir', value='YOUR_GCS_DIR_HERE'),\n tft_setup_file: dsl.PipelineParam=dsl.PipelineParam(name='tft-setup-file', value='/ml/transform/setup.py'),\n tfma_setup_file: dsl.PipelineParam=dsl.PipelineParam(name='tfma-setup-file', value='/ml/analysis/setup.py'),\n workers: dsl.PipelineParam=dsl.PipelineParam(name='workers', value=2),\n pss: dsl.PipelineParam=dsl.PipelineParam(name='pss', value=1),\n max_rows: dsl.PipelineParam=dsl.PipelineParam(name='max-rows', value=10000),\n ts1_1: dsl.PipelineParam=dsl.PipelineParam(name='ts1-1', value='2016-02-01 00:00:00'),\n ts2_1: dsl.PipelineParam=dsl.PipelineParam(name='ts2-1', value='2016-03-01 00:00:00'),\n ts1_2: dsl.PipelineParam=dsl.PipelineParam(name='ts1-2', value='2013-01-01 00:00:00'),\n ts2_2: dsl.PipelineParam=dsl.PipelineParam(name='ts2-2', value='2016-03-01 00:00:00'),\n preprocessing_module: dsl.PipelineParam=dsl.PipelineParam(name='preprocessing-module1', value='gs://aju-dev-demos-codelabs/KF/taxi-preproc/preprocessing.py'),\n preprocess_mode: dsl.PipelineParam=dsl.PipelineParam(name='preprocess-mode', value='local'),\n tfma_mode: dsl.PipelineParam=dsl.PipelineParam(name='tfma-mode', value='local')):\n\n\n tfteval = dsl.ContainerOp(\n name = 'tft-eval',\n image = 'gcr.io/google-samples/ml-pipeline-dataflow-tftbq-taxi',\n arguments = [ \"--input_handle\", input_handle_eval, \"--outfile_prefix\", outfile_prefix_eval,\n \"--working_dir\", '%s/%s/tft-eval' % (working_dir, '{{workflow.name}}'),\n \"--project\", project,\n \"--mode\", preprocess_mode,\n \"--setup_file\", tft_setup_file,\n \"--max_rows\", max_rows,\n \"--ts1\", ts1_1,\n \"--ts2\", ts2_1,\n \"--stage\", \"eval\",\n \"--preprocessing_module\", preprocessing_module]\n ).apply(gcp.use_gcp_secret('user-gcp-sa'))\n tfttrain = dsl.ContainerOp(\n name = 'tft-train',\n image = 'gcr.io/google-samples/ml-pipeline-dataflow-tftbq-taxi',\n arguments = [ \"--input_handle\", input_handle_train, \"--outfile_prefix\", outfile_prefix_train,\n \"--working_dir\", '%s/%s/tft-train' % (working_dir, '{{workflow.name}}'),\n \"--project\", project,\n \"--mode\", preprocess_mode,\n \"--setup_file\", tft_setup_file,\n \"--max_rows\", max_rows,\n \"--ts1\", ts1_1,\n \"--ts2\", ts2_1,\n \"--stage\", \"train\",\n \"--preprocessing_module\", preprocessing_module]\n ).apply(gcp.use_gcp_secret('user-gcp-sa'))\n tfteval2 = dsl.ContainerOp(\n name = 'tft-eval2',\n image = 'gcr.io/google-samples/ml-pipeline-dataflow-tftbq-taxi',\n arguments = [ \"--input_handle\", input_handle_eval, \"--outfile_prefix\", outfile_prefix_eval,\n \"--working_dir\", '%s/%s/tft-eval2' % (working_dir, '{{workflow.name}}'),\n \"--project\", project,\n \"--mode\", preprocess_mode,\n \"--setup_file\", tft_setup_file,\n \"--max_rows\", max_rows,\n \"--ts1\", ts1_2,\n \"--ts2\", ts2_2,\n \"--stage\", \"eval\",\n \"--preprocessing_module\", preprocessing_module]\n ).apply(gcp.use_gcp_secret('user-gcp-sa'))\n tfttrain2 = dsl.ContainerOp(\n name = 'tft-train2',\n image = 'gcr.io/google-samples/ml-pipeline-dataflow-tftbq-taxi',\n arguments = [ \"--input_handle\", input_handle_train, \"--outfile_prefix\", outfile_prefix_train,\n \"--working_dir\", '%s/%s/tft-train2' % (working_dir, '{{workflow.name}}'),\n \"--project\", project,\n \"--mode\", preprocess_mode,\n \"--setup_file\", tft_setup_file,\n \"--max_rows\", max_rows,\n \"--ts1\", ts1_2,\n \"--ts2\", ts2_2,\n \"--stage\", \"train\",\n \"--preprocessing_module\", preprocessing_module]\n ).apply(gcp.use_gcp_secret('user-gcp-sa'))\n\n train = dsl.ContainerOp(\n name = 'train',\n image = 'gcr.io/google-samples/ml-pipeline-kubeflow-tf-taxi',\n arguments = [ \"--tf-transform-dir\", '%s/%s/tft-train' % (working_dir, '{{workflow.name}}'),\n \"--output-dir\", '%s/%s/tf' % (working_dir, '{{workflow.name}}'),\n \"--working-dir\", '%s/%s/tf/serving_model_dir' % (working_dir, '{{workflow.name}}'),\n \"--job-dir\", '%s/%s/tf' % (working_dir, '{{workflow.name}}'),\n \"--train-files-dir\", '%s/%s/tft-train' % (working_dir, '{{workflow.name}}'),\n \"--eval-files-dir\", '%s/%s/tft-eval' % (working_dir, '{{workflow.name}}'),\n \"--train-files-prefix\", outfile_prefix_train,\n \"--eval-files-prefix\", outfile_prefix_eval,\n \"--train-steps\", train_steps,\n \"--workers\", workers,\n \"--pss\", pss]\n )\n train.after(tfteval)\n train.after(tfttrain)\n\n train2 = dsl.ContainerOp(\n name = 'train2',\n image = 'gcr.io/google-samples/ml-pipeline-kubeflow-tf-taxi',\n arguments = [ \"--tf-transform-dir\", '%s/%s/tft-train2' % (working_dir, '{{workflow.name}}'),\n \"--output-dir\", '%s/%s/tf2' % (working_dir, '{{workflow.name}}'),\n \"--working-dir\", '%s/%s/tf2/serving_model_dir' % (working_dir, '{{workflow.name}}'),\n \"--job-dir\", '%s/%s/tf2' % (working_dir, '{{workflow.name}}'),\n \"--train-files-dir\", '%s/%s/tft-train2' % (working_dir, '{{workflow.name}}'),\n \"--eval-files-dir\", '%s/%s/tft-eval2' % (working_dir, '{{workflow.name}}'),\n \"--train-files-prefix\", outfile_prefix_train,\n \"--eval-files-prefix\", outfile_prefix_eval,\n \"--train-steps\", train_steps,\n \"--workers\", workers,\n \"--pss\", pss]\n )\n train2.after(tfteval2)\n train2.after(tfttrain2)\n\n analyze = dsl.ContainerOp(\n name = 'analyze',\n image = 'gcr.io/google-samples/ml-pipeline-dataflow-tfma-taxi',\n arguments = [\"--input_csv\", '%s/%s/tft-eval/eval.csv-00000-of-00001' % (working_dir, '{{workflow.name}}'),\n \"--tfma_run_dir\", '%s/%s/tfma/output' % (working_dir, '{{workflow.name}}'),\n \"--eval_model_dir\", '%s/%s/tf/eval_model_dir' % (working_dir, '{{workflow.name}}'),\n \"--mode\", tfma_mode,\n \"--setup_file\", tfma_setup_file,\n \"--project\", project]\n ).apply(gcp.use_gcp_secret('user-gcp-sa'))\n analyze2 = dsl.ContainerOp(\n name = 'analyze2',\n image = 'gcr.io/google-samples/ml-pipeline-dataflow-tfma-taxi',\n arguments = [\"--input_csv\", '%s/%s/tft-eval/eval.csv-00000-of-00001' % (working_dir, '{{workflow.name}}'),\n \"--tfma_run_dir\", '%s/%s/tfma2/output' % (working_dir, '{{workflow.name}}'),\n \"--eval_model_dir\", '%s/%s/tf2/eval_model_dir' % (working_dir, '{{workflow.name}}'),\n \"--mode\", tfma_mode,\n \"--setup_file\", tfma_setup_file,\n \"--project\", project]\n ).apply(gcp.use_gcp_secret('user-gcp-sa'))\n cmleop = dsl.ContainerOp(\n name = 'cmleop',\n image = 'gcr.io/google-samples/ml-pipeline-cmle-op',\n arguments = [\"--gcs-path\", '%s/%s/tf/serving_model_dir/export/chicago-taxi' % (working_dir, '{{workflow.name}}'),\n \"--version-name\", '{{workflow.name}}',\n \"--project\", project]\n ).apply(gcp.use_gcp_secret('user-gcp-sa'))\n cmleop2 = dsl.ContainerOp(\n name = 'cmleop2',\n image = 'gcr.io/google-samples/ml-pipeline-cmle-op',\n arguments = [\"--gcs-path\", '%s/%s/tf2/serving_model_dir/export/chicago-taxi' % (working_dir, '{{workflow.name}}'),\n \"--version-name\", '{{workflow.name}}_2',\n \"--project\", project]\n ).apply(gcp.use_gcp_secret('user-gcp-sa'))\n\n\n analyze.after(train)\n analyze.after(tfteval)\n analyze2.after(tfteval)\n analyze2.after(train2)\n cmleop.after(train)\n cmleop2.after(train2)\n\nif __name__ == '__main__':\n import kfp.compiler as compiler\n compiler.Compiler().compile(workflow2, __file__ + '.tar.gz')\n","sub_path":"ml/kubeflow-pipelines/samples/kubeflow-tf/older/workflow2.py","file_name":"workflow2.py","file_ext":"py","file_size_in_byte":9228,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"393478179","text":"\n\nfrom xai.brain.wordbase.nouns._avatar import _AVATAR\n\n#calss header\nclass _AVATARS(_AVATAR, ):\n\tdef __init__(self,): \n\t\t_AVATAR.__init__(self)\n\t\tself.name = \"AVATARS\"\n\t\tself.specie = 'nouns'\n\t\tself.basic = \"avatar\"\n\t\tself.jsondata = {}\n","sub_path":"xai/brain/wordbase/nouns/_avatars.py","file_name":"_avatars.py","file_ext":"py","file_size_in_byte":238,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"332203695","text":"import tkinter\nimport tkinter.colorchooser\n\nroot = tkinter.Tk()\nroot.geometry(\"300x300\")\n\ndef color():\n result = tkinter.colorchooser.askcolor(color=\"yellow\")\n print(result)\n root[\"bg\"] = result[1]\nbtn1 = tkinter.Button(root, text=\"选择颜色\", command=color)\nbtn1.pack()\n\nroot.mainloop()","sub_path":"004-GUI编程/Tkinter/xidilian/21、tkinter_colorchooser.py","file_name":"21、tkinter_colorchooser.py","file_ext":"py","file_size_in_byte":299,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"609273143","text":"# -*- coding: utf-8 -*-\nfrom kafka import KafkaConsumer, KafkaProducer\nfrom json import dumps\nfrom helper.settings import logger, http_response_400, http_response_200\nfrom typing import Optional\n\n# =====================================================================\n# ------------------------- Kafka Connector ---------------------------\n# =====================================================================\n\n\nclass KafkaConnector:\n def __init__(self, topic_consumer, topic_producer, group_id,\n bootstrap_servers=None, enable_auto_commit=False,\n auto_offset_reset=\"earliest\", max_poll_records=3,\n max_poll_interval_ms=600000,\n session_timeout_ms=100000,\n heartbeat_interval_ms=20000):\n\n self.topic_consumer: str = topic_consumer\n self.topic_producer: str = topic_producer\n self.group_id: str = group_id\n self.bootstrap_servers = bootstrap_servers\n self.enable_auto_commit: bool = enable_auto_commit\n self.auto_offset_reset: str = auto_offset_reset\n self.max_poll_records: int = max_poll_records\n self.max_poll_interval_ms: int = max_poll_interval_ms\n self.session_timeout_ms: int = session_timeout_ms\n self.heartbeat_interval_ms: int = heartbeat_interval_ms\n self.connection: bool = False\n self.consumer: Optional[KafkaConsumer] = None\n self.producer: Optional[KafkaProducer] = None\n\n def verify_kafka_connection(self):\n response: dict = {\"status\": 400,\n \"message\": http_response_400}\n try:\n self.connection = False\n self.init_kafka_consumer()\n if self.consumer is not None:\n topics = self.consumer.topics()\n if topics:\n self.connection = True\n response[\"status\"] = 200\n response[\"message\"] = http_response_200\n else:\n logger.error(\"No topics available, the connection to Kafka will be closed\")\n except Exception as e:\n logger.error(e)\n return response\n\n def init_kafka_consumer(self):\n self.connection: bool = False\n try:\n self.consumer: KafkaConsumer = KafkaConsumer(self.topic_consumer,\n group_id=self.group_id,\n bootstrap_servers=self.bootstrap_servers,\n auto_offset_reset=self.auto_offset_reset,\n enable_auto_commit=self.enable_auto_commit,\n max_poll_records=self.max_poll_records,\n max_poll_interval_ms=self.max_poll_interval_ms,\n session_timeout_ms=self.session_timeout_ms,\n heartbeat_interval_ms=self.heartbeat_interval_ms,\n api_version=(2,))\n self.connection: bool = True\n except ConnectionError as ce:\n logger.error(ce)\n except Exception as e:\n logger.error(e)\n\n def init_kafka_producer(self):\n self.connection: bool = False\n try:\n self.producer: KafkaProducer = KafkaProducer(bootstrap_servers=self.bootstrap_servers,\n value_serializer=lambda x: dumps(x).encode('utf-8'),\n api_version=(2,))\n self.connection: bool = True\n except ConnectionError as ce:\n logger.error(ce)\n except Exception as e:\n logger.error(e)\n\n def put_data_into_topic(self, data: dict):\n try:\n if self.producer is not None:\n self.producer.send(topic=self.topic_producer, value=data)\n self.producer.flush()\n except ConnectionError as ce:\n logger.error(ce)\n self.connection: bool = False\n except Exception as e:\n logger.error(e)\n","sub_path":"code/Source-credibility/connectors/kafka_connector.py","file_name":"kafka_connector.py","file_ext":"py","file_size_in_byte":4270,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"71656974","text":"#!/usr/bin/env python3\n# POC add rows\nimport tkinter as tk\nclass App:\n def new_row(self):\n new_entry = tk.Entry(root)\n self.num_rows += 1\n new_entry.grid(column=0, row=self.num_rows)\n def __init__(self): #the code that executes when an class is initiated\n self.num_rows = 1\n createRow_button = tk.Button(root, text='New Row', command=self.new_row) #self refers to the current instance of the class, used to access variables/functions within the class\n createRow_button.grid()\nroot = tk.Tk() #creates the Tk window thing\napp = App() #initiates an instance of the App class called app\nroot.mainloop()","sub_path":"wips/add_button.py","file_name":"add_button.py","file_ext":"py","file_size_in_byte":646,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"46066440","text":"# -*- coding: utf-8 -*-\n\n# Epoch: 0, Cost: 1.116698\n# Epoch: 1000, Cost: 0.068308\n# Epoch: 2000, Cost: 0.041813\n# Epoch: 3000, Cost: 0.032390\n# Epoch: 4000, Cost: 0.027661\n# Epoch: 5000, Cost: 0.024789\n# Epoch: 6000, Cost: 0.022837\n# Epoch: 7000, Cost: 0.021408\n# Epoch: 8000, Cost: 0.020304\n# Epoch: 9000, Cost: 0.019416\n# Epoch: 10000, Cost: 0.018671\n\nimport numpy as np\nfrom toydata import load_spiral_dataset, plot_decision_boundary\n\nfrom simplenn.model import Model\nfrom simplenn.utils import GroundTruth\n\nnp.random.seed(1024)\n\nX, y = load_spiral_dataset(3)\ngt = GroundTruth(y)\nm, n_x = X.shape\nn_y = 3\nn_h = 20\ndecay = 0\nlearning_rate = 1\nnum_epochs = 10_000\n\n# Use Linear + CE\n# model = Model(\n# layers=[\n# (\"LINEAR\", n_x, n_h),\n# (\"RELU\",),\n# (\"LINEAR\", n_h, n_y),\n# ],\n# loss=(\"CE\",),\n# solver=(\"SGD\", learning_rate, decay))\n\n# Or use LOG_SOFTMAX + NLL\nmodel = Model(\n layers=[\n (\"LINEAR\", n_x, n_h),\n (\"RELU\",),\n (\"LINEAR\", n_h, n_y),\n (\"LOG_SOFTMAX\", n_h, n_y),\n ],\n loss=(\"NLL\",),\n solver=(\"SGD\", learning_rate, decay))\n\n# for epoch in range(1, num_epochs + 1):\n# try:\n# model.TrainStep(X, gt)\n# if epoch % 100 == 0:\n# print(model.cost)\n# except KeyboardInterrupt:\n# print(\"Keyboard Interrupted\")\n# break\n\n# for epoch in range(1, num_epochs // 100 + 1):\n# try:\n# model.Train100Steps(X, gt)\n# print(model.cost)\n# except KeyboardInterrupt:\n# print(\"Keyboard Interrupted\")\n# break\n\nmodel.Train(X, gt, num_epochs, 1000)\n\nplot_decision_boundary(lambda x: model.Predict(x), X, y)\n","sub_path":"example/Test-Flower-3-CE.py","file_name":"Test-Flower-3-CE.py","file_ext":"py","file_size_in_byte":1671,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"291694168","text":"# https://chrisalbon.com/machine_learning/trees_and_forests/random_forest_classifier_example/\n\n# Load the library with the iris dataset\nfrom sklearn.datasets import load_iris\n# Load scikit's random forest classifier library\nfrom sklearn.ensemble import RandomForestClassifier\nimport pandas as pd\nimport numpy as np\nfrom sklearn.model_selection import train_test_split\n\n# Create an object called iris with the iris data\niris = load_iris()\n\n# Create a dataframe with the four feature variables\ndf = pd.DataFrame(iris.data, columns=iris.feature_names)\n\n# View the top 5 rows\ndf.head()\n\ndf['species'] = pd.Categorical.from_codes(iris.target, iris.target_names)\n\n# View the top 5 rows\ndf.head()\n\n\n# train test split\ntrain,test = train_test_split(df)\n\n\n# Show the number of observations for the test and training dataframes\nprint('Number of observations in the training data:', len(train))\nprint('Number of observations in the test data:',len(test))\n\n# Create a list of the feature column's names\nfeatures = df.columns[:4]\n\n\n# train['species'] contains the actual species names. Before we can use it,\n# we need to convert each species name into a digit. So, in this case there\n# are three species, which have been coded as 0, 1, or 2.\ny = pd.factorize(train['species'])[0]\n\n# View target\ny\n\n\n# Create a random forest Classifier. By convention, clf means 'Classifier'\n# Jobs = jobs to run in parallel for both fit and predict. None means 1 unless in a joblib.parallel_backend context. -1 means using all processors.\n# n_estimators = trees, default = 100 for version >.22, 10 otherwise\nclf = RandomForestClassifier(n_jobs=2,n_estimators=10)\n\n# Train the Classifier to take the training features and learn how they relate\n# to the training y (the species)\nclf.fit(train[features], y)\n\n\n# Apply the Classifier we trained to the test data (which, remember, it has never seen before)\nclf.predict(test[features])\n\n\npreds = iris.target_names[clf.predict(test[features])]\n\n\npd.crosstab(test['species'], preds, rownames=['Actual Species'], colnames=['Predicted Species'])\n\n# View a list of the features and their importance scores\nlist(zip(train[features], clf.feature_importances_))\n\n\n\n\n\n","sub_path":"random_forest.py","file_name":"random_forest.py","file_ext":"py","file_size_in_byte":2173,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"520802732","text":"#!/usr/bin/env python3\r\nimport numpy as np\r\nfrom copy import copy\r\nfrom sequence import Sequence\r\nfrom gates import Gate, VirtualZGate, CompositeGate, IdentityGate, CustomGate\r\nfrom pulse import PulseShape, Pulse, PulseType\r\n\r\nclass CustomSequence(Sequence):\r\n def generate_sequence(self, config):\r\n \"\"\"Generate sequence by adding gates/pulses to waveforms\"\"\"\r\n\r\n frequency = config.get('Parameter #1')\r\n amplitude = config.get('Parameter #2')\r\n width = config.get('Parameter #3')\r\n plateau = config.get('Parameter #4')\r\n drag_coeff= config.get('Parameter #5') #(drag coefficient)\r\n shapeID=config.get('Parameter #6') # (0) gaussian, (1) cosine\r\n pulse_train=config.get('Parameter #7') #yes (1) or no (0) for multiple alternate pulses for optimizing drag and Pi pulse\r\n N_pulses=config.get('Parameter #8') #ignored if pulse_train == 0\r\n self.add_gate_to_all(Gate.Xp)\r\n\r\n pulse12 = Pulse()\r\n pulse12n = Pulse()\r\n pulse12.width = width\r\n pulse12n.width = width\r\n pulse12.plateau = plateau\r\n pulse12n.plateau = plateau\r\n pulse12.amplitude = amplitude\r\n pulse12n.amplitude = amplitude\r\n pulse12.frequency = frequency\r\n pulse12n.frequency = frequency\r\n if shapeID==0:\r\n pulse12.shape=PulseShape.GAUSSIAN\r\n pulse12n.shape=PulseShape.GAUSSIAN\r\n if shapeID==1:\r\n pulse12.shape=PulseShape.COSINE\r\n pulse12n.shape=PulseShape.COSINE\r\n pulse12.use_drag = True\r\n pulse12n.use_drag = True\r\n pulse12.drag_coefficient = drag_coeff\r\n pulse12n.drag_coefficient = drag_coeff\r\n pulse12.pulse_type=PulseType.XY\r\n pulse12n.pulse_type=PulseType.XY\r\n pulse12.phase = 0\r\n pulse12n.phase = -np.pi\r\n gateP = CustomGate(pulse12)\r\n gateN = CustomGate(pulse12n)\r\n if pulse_train:\r\n for i in range(int(N_pulses)):\r\n if ((i % 2) == 0):\r\n self.add_gate_to_all(gateP)\r\n else:\r\n self.add_gate_to_all(gateN)\r\n else:\r\n self.add_gate_to_all(gateP)\r\n\r\n # self.add_gate_to_all(Gate.Xp)\r\n","sub_path":"Custom Sequence/Anharmonicity2.py","file_name":"Anharmonicity2.py","file_ext":"py","file_size_in_byte":2274,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"566536087","text":"class TripleP:\n def __init__(self, stopwords_list: list = None):\n \"\"\"\n in class baraye pish pardazeshe motone farsi sakhte shude ast k bar payeye hazm kar mikonad.\n :param stopwords_list: shuma mitavanid liste stopword haye khod ra be in class bedahid\n ya az defult khgode an estefade konid.\n \"\"\"\n if stopwords_list is not None:\n self._STOPWORDSLIST = stopwords_list\n from hazm import Normalizer as hazm_normilizer\n self.normalizer = hazm_normilizer(\n remove_extra_spaces=True,\n persian_style=True,\n persian_numbers=True,\n remove_diacritics=True,\n affix_spacing=True,\n token_based=True,\n punctuation_spacing=True\n )\n\n def normal_string(self, string: str) -> str:\n \"\"\"\n dar in ghesmat yek string farsi ra migirm va normal mikonim baraye etelae az chegonegiye normal kardan\n documention hazm ra motalee befarmaeid.\n :param string:\n :return: string noral shude\n \"\"\"\n _str = ''\n for c in string:\n if self._is_symbol(c):\n _str += f' {c} '\n continue\n _str += c\n string = _str\n string = self.normalizer.normalize(string)\n return string\n\n def tokens(self, string) -> list:\n \"\"\"\n dar in ghesmat yek string farsi k shamele chand jole ast daryaft mishavad va dar nahayat tamame in string bar\n asase kalamate darone an tokenize mishavad va darone ye liste pythoni gharar migirad\n ghabele tavajoh mibashad:\n :param string: yek reshteye farsi\n :return:yek list pythoni k -> [jomleye1:list, jomleye2:list, ...] va\n har jomle niz yek listpythoni k -> [kalame1:str, kalame2:str, ...].\n \"\"\"\n\n from hazm import SentenceTokenizer, WordTokenizer\n sent_tokenizer = SentenceTokenizer().tokenize\n word_tokenizer = WordTokenizer().tokenize\n string = self.normal_string(string)\n sentences_list = sent_tokenizer(string)\n _sents = []\n for sent in sentences_list:\n words = word_tokenizer(sent)\n _sents.append(words)\n sentences_list = _sents\n return sentences_list\n\n def without_stop_words(self, string: str, stopwords_list: list = None) -> str:\n \"\"\"\n ba estefade az in tabe mitavanid mati k darid ra normal va bedone stopword konid.\n :param string:\n :param stopwords_list:\n :return:\n \"\"\"\n from .extractor import Stopwords\n if stopwords_list is not None:\n stpws = Stopwords(stopwords_list)\n else:\n stpws = Stopwords(self._STOPWORDSLIST)\n string = self.normal_string(string)\n is_sword = stpws.is_stopword\n string_split = string.split(' ')\n _str = ''\n for _wrd in string_split:\n string_split.remove(_wrd)\n if not is_sword(_wrd):\n _str = _wrd\n break\n for _wrd in string_split:\n if not is_sword(_wrd):\n _str += f' {_wrd}'\n string = _str\n return string\n\n @staticmethod\n def _is_symbol(character: str) -> bool:\n PERSISAN_SYMBOL = ['!', '\"', '#', '(', ')', '*', ',', '-', '.', '/', ':', '[', ']', '«', '»', '،', '؛', '؟',\n '+', '=', '_', '-', '&', '^', '%', '$', '#', '@', '!', '~', '\"', \"'\", ':', ';', '>', '<',\n '.', ',', '/', '\\\\', '|', '}', '{', '-', 'ـ', ]\n if character in PERSISAN_SYMBOL:\n return True\n return False\n","sub_path":"plp/persian_pre_processing.py","file_name":"persian_pre_processing.py","file_ext":"py","file_size_in_byte":3636,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"431893887","text":"#! /usr/bin/python\n# -*- coding: utf-8 -*-\n\nimport sys\nfrom tkinter import *\n\ndef onClickOk():\n\ttry:\n\t\tvalue=int(inputText.get())\n\t\tvalue*=5\n\t\tetiqueta.config(text=value)\n\texcept ValueError as e:\n\t\tetiqueta.config(\"Introduce un numero\")\n\napp=Tk()\napp.title(\"Mi segunda app Grafica\")\n\nvp=Frame(app)\nvp.grid(column=0,row=0,padx=50,pady=10)\nvp.columnconfigure(0,weight=1)\nvp.rowconfigure(0,weight=1)\n\netiqueta=Label(vp,text=\"Valor\")\netiqueta.grid(column=2,row=2,sticky=(W,E))\n\nbutton=Button(vp,text=\"OK!\", command=onClickOk)\nbutton.grid(column=1,row=1)\n\nvalor=\"\"\ninputText=Entry(vp,width=10,textvariable=valor)\ninputText.grid(column=2,row=1)\n\napp.mainloop()\n","sub_path":"basic_tkinter/002_formulario_simple.py","file_name":"002_formulario_simple.py","file_ext":"py","file_size_in_byte":655,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"102873598","text":"# Uses python3\n\n\ndef evalt(a, b, op):\n if op == '+':\n return a + b\n elif op == '-':\n return a - b\n elif op == '*':\n return a * b\n else:\n assert False\n\n\ntable = {} # lookup table\n\n\ndef minmax(ds, i, j, ops):\n if (i, j) in table:\n return table[(i, j)]\n\n mn = float('inf') if ops[i:j] else int(ds[2 * i])\n mx = float('-inf') if ops[i:j] else int(ds[2 * i])\n\n for k in range(i, j):\n op = ops[k]\n mn1, mx1 = minmax(ds, i, k, ops)\n mn2, mx2 = minmax(ds, k + 1, j, ops)\n\n a = evalt(mx1, mx2, ops[k])\n b = evalt(mx1, mn2, ops[k])\n c = evalt(mn1, mx2, ops[k])\n d = evalt(mn1, mn2, ops[k])\n\n mn = min(mn, a, b, c, d)\n mx = max(mx, a, b, c, d)\n\n table[(i, j)] = mn, mx\n return mn, mx\n\n\ndef get_maximum_value(ds):\n #write your code here\n n = len(ds) // 2 + 1\n ops = [ds[i] for i in range(1, len(ds), 2)]\n mns = [[0 for _ in range(n)] for _ in range(n)]\n mxs = [[0 for _ in range(n)] for _ in range(n)]\n\n for i in range(n):\n mns[i][i] = int(ds[2 * i])\n mxs[i][i] = int(ds[2 * i])\n\n for s in range(1, n):\n for i in range(n - s):\n j = i + s\n mns[i][j], mxs[i][j] = minmax(ds, i, j, ops)\n\n return mxs[0][-1]\n\n\nif __name__ == \"__main__\":\n print(get_maximum_value(input()))\n\n","sub_path":"week6_dynamic_programming2/3_maximum_value_of_an_arithmetic_expression/placing_parentheses.py","file_name":"placing_parentheses.py","file_ext":"py","file_size_in_byte":1356,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"137134679","text":"import torch.nn as nn\nimport torch\nfrom torchviz import make_dot\n\nclass RNN(nn.Module):\n def __init__(self, input_size, hidden_size, output_size):\n super(RNN, self).__init__()\n\n self.hidden_size = hidden_size\n\n self.i2h = nn.Linear(input_size + hidden_size, hidden_size)\n self.i2o = nn.Linear(input_size + hidden_size, output_size)\n self.softmax = nn.LogSoftmax(dim=1)\n\n def forward(self, input, hidden):\n combined = torch.cat((input, hidden), 1)\n hidden = self.i2h(combined)\n output = self.i2o(combined)\n output = self.softmax(output)\n return output, hidden\n\n def initHidden(self):\n return torch.zeros(1, self.hidden_size)\n\nn_hidden = 128\nn_letters = 57\nn_categories = 18\nrnn = RNN(n_letters, n_hidden, n_categories)\nprint(n_letters,n_hidden,n_categories)\n\nx = torch.randn(1,57)\nh = torch.randn(1,128)\n\ng = make_dot(rnn(x,h),params=dict(rnn.named_parameters()))\ng.render('m',view=False)\n","sub_path":"pytorch/visulization_network/vis_rnn.py","file_name":"vis_rnn.py","file_ext":"py","file_size_in_byte":974,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"103245903","text":"import logging\n\nfrom telegram.ext import Updater\nfrom telegram.ext import CallbackQueryHandler, CommandHandler\nfrom telegram import Bot, InlineKeyboardMarkup\n\nfrom application import keyboards\n\n\nlogging.basicConfig(format='%(asctime)s - %(name)s - %(levelname)s - %(message)s',\n level=logging.INFO)\nlogger = logging.getLogger()\nlogger.setLevel(logging.DEBUG)\n\n\nMAIN_MESSAGE = 'Меню: '\n\n\ndef start(bot, update):\n update.message.reply_text(MAIN_MESSAGE,\n reply_markup=main_menu_keyboard())\n\n\ndef main_menu(bot, update):\n query = update.callback_query\n logging.info(update)\n bot.edit_message_text(chat_id=query.message.chat_id,\n message_id=query.message.message_id,\n text=MAIN_MESSAGE,\n reply_markup=main_menu_keyboard())\n\n\ndef expense(bot, update):\n query = update.callback_query\n bot.edit_message_text(chat_id=query.message.chat_id,\n message_id=query.message.message_id,\n text=MAIN_MESSAGE,\n reply_markup=expense_groups_keyboard())\n\n\ndef income(bot, update):\n query = update.callback_query\n bot.edit_message_text(chat_id=query.message.chat_id,\n message_id=query.message.message_id,\n text=MAIN_MESSAGE,\n reply_markup=()) # keyboard\n\n\ndef main_menu_keyboard():\n return InlineKeyboardMarkup(keyboards.main_keyboard)\n\n\ndef expense_groups_keyboard():\n return InlineKeyboardMarkup(keyboards.group_keyboard)\n\n\ndef g_1(bot, update):\n query = update.callback_query\n bot.send_message(chat_id=query.message.chat_id, text='None')\n\n\nbot = Bot(token)\nupdater = Updater(token)\ndispatcher = updater.dispatcher\n\ndispatcher.add_handler(CommandHandler('start', start))\ndispatcher.add_handler(CallbackQueryHandler(main_menu, pattern='main'))\ndispatcher.add_handler(CallbackQueryHandler(expense, pattern='expense'))\ndispatcher.add_handler(CallbackQueryHandler(income, pattern='income'))\ndispatcher.add_handler(CallbackQueryHandler(g_1, pattern='g_1'))\n\nupdater.start_polling()","sub_path":"application/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2167,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"42134813","text":"import time\nimport psutil\nimport socket\nimport firebase_admin\nfrom firebase_admin import credentials\nfrom firebase_admin import firestore\n\ndoc = {\n 'cpu': {\n 'core': 0,\n 'threads': 0,\n 'usage_rate': []\n },\n 'memory': {\n 'total': 0,\n 'free': 0,\n 'usage_rate': 0\n },\n 'disk': [],\n 'network':{\n 'receive': 0,\n 'send': 0,\n 'usage': 0\n }\n}\n\ndef get_host_ip():\n try:\n s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n s.connect(('8.8.8.8', 80))\n ip = s.getsockname()[0]\n finally:\n s.close()\n\n return ip\n\ndef cpu():\n cpu_core = psutil.cpu_count(logical=False);\n cpu_threads = psutil.cpu_count(logical=True);\n cpu_usage_rate = psutil.cpu_percent(interval=1, percpu=True)\n\n \n doc['cpu']['core'] = cpu_core;\n doc['cpu']['threads'] = cpu_threads;\n doc['cpu']['usage_rate'] = cpu_usage_rate;\n\ndef memory():\n memory_free = round(psutil.virtual_memory().free/(1024.0*1024.0*1024.0), 2);\n memory_total = round(psutil.virtual_memory().total/(1024.0*1024.0*1024.0), 2);\n memory_usage_rate = round((memory_total-memory_free)/memory_total, 2)*100;\n\n doc['memory']['total'] = memory_total;\n doc['memory']['free'] = memory_free;\n doc['memory']['usage_rate'] = memory_usage_rate;\n\ndef disk():\n disk_info = psutil.disk_partitions();\n for i in disk_info:\n if(i.fstype == 'NTFS'):\n disk_usage = psutil.disk_usage(i.device);\n disk_total = round(disk_usage.total/(1024.0*1024.0*1024.0), 2);\n disk_used = round(disk_usage.used/(1024.0*1024.0*1024.0), 2);\n disk_free = round(disk_usage.free/(1024.0*1024.0*1024.0), 2);\n disk_usage_rate = round(disk_used/disk_total, 2)*100;\n\n diskInfo = {};\n diskInfo['name'] = i.device;\n diskInfo['total'] = disk_total;\n diskInfo['used'] = disk_used;\n diskInfo['free'] = disk_free;\n diskInfo['usage_rate'] = disk_usage_rate;\n doc['disk'].append(diskInfo);\n\ndef network():\n net = psutil.net_io_counters();\n bytes_receive = net.bytes_recv;\n bytes_sent = net.bytes_sent;\n\n bytes_all = bytes_receive + bytes_sent;\n time.sleep(1);\n bytes_usage = bytes_receive + bytes_sent - bytes_all;\n\n doc['network']['receive'] = bytes_receive;\n doc['network']['send'] = bytes_sent;\n doc['network']['usage'] = bytes_usage;\n\ndef convert_to_gbit(value):\n return value/1024./1024.*8\n\ndef send_stat(value):\n print (\"%0.3fMpbs\" % convert_to_gbit(value))\n\ndef showData():\n showCPUInfo(doc['cpu']['core'], doc['cpu']['threads'])\n for i in range(len(doc['cpu']['usage_rate'])):\n showCPUUsageRate(i, doc['cpu']['usage_rate'][i]);\n \n showMemoryInfo(doc['memory']['total'], doc['memory']['free'], doc['memory']['usage_rate']);\n\n for diskInfo in doc['disk']:\n showDiskInfo(diskInfo['name'], diskInfo['total'], diskInfo['used'], diskInfo['free'], diskInfo['usage_rate']);\n \n showNetword(doc['network']['receive'], doc['network']['send'], doc['network']['usage']);\n\ndef showCPUInfo(core, threads):\n print ('CPU {0}核{1}線程'.format(core, threads));\n\ndef showCPUUsageRate(threads, usage_rate):\n print ('CPU {0}線程使用率: {1}%'.format(threads, usage_rate));\n\ndef showMemoryInfo(total, free, usage_rate):\n print ('記憶體 總共:{0}GB 剩餘:{1}GB 使用率:{2}%'.format(total, free, usage_rate));\n\ndef showDiskInfo(name, total, used, free, usage_rate):\n print ('硬碟{0} 總共:{1}GB 使用:{2}GB 剩餘:{3}GB 使用率:{4}%'.format(name, total, used, free, usage_rate));\n\ndef showNetword(receive, send, usage):\n print ('網路 接收:{0:.2f} 送出:{1:.2f} 使用:{2:.3f}'.format(receive/1024./1024, send/1024./1024, usage/1024./1024.*8));\n\ndef initFirebase():\n cred = credentials.Certificate('serviceAccount.json');\n firebase_admin.initialize_app(cred);\n\ndef sendDataToFirebase():\n db = firestore.client();\n doc_ref = db.collection(\"system-monitor\").document(get_host_ip())\n doc_ref.set(doc)\n\ninitFirebase();\ncpu();\nmemory();\ndisk();\nnetwork();\n\nprint (get_host_ip())\nshowData();\nsendDataToFirebase()","sub_path":"System Monitor/system-monitor.py","file_name":"system-monitor.py","file_ext":"py","file_size_in_byte":4191,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"343841801","text":"\"\"\"\nThis program demonstrates how Huffman codes can be used to efficiently\ncompress and uncompress files (text or binary).\n\"\"\"\nimport os\nfrom optparse import OptionParser\nfrom collections import Counter\n\nfrom bitarray import bitarray\nfrom bitarray.util import huffman_code\n\n\ndef encode(filename):\n with open(filename, 'rb') as fi:\n plain = bytearray(fi.read())\n\n code = huffman_code(Counter(plain))\n with open(filename + '.huff', 'wb') as fo:\n for sym in sorted(code):\n fo.write(b'%02x %s\\n' % (sym, code[sym].to01().encode()))\n a = bitarray(endian='little')\n a.encode(code, plain)\n # write unused bits\n fo.write(b'unused %d\\n' % a.buffer_info()[3])\n a.tofile(fo)\n print('Bits: %d / %d' % (len(a), 8 * len(plain)))\n print('Ratio =%6.2f%%' % (100.0 * a.buffer_info()[1] / len(plain)))\n\n\ndef decode(filename):\n assert filename.endswith('.huff')\n code = {}\n\n with open(filename, 'rb') as fi:\n while 1:\n line = fi.readline()\n sym, b = line.split()\n if sym == b'unused':\n u = int(b)\n break\n i = int(sym, 16)\n code[i] = bitarray(b.decode())\n a = bitarray(endian='little')\n a.fromfile(fi)\n\n if u:\n del a[-u:]\n\n with open(filename[:-5] + '.out', 'wb') as fo:\n fo.write(bytearray(a.iterdecode(code)))\n\n\ndef main():\n p = OptionParser(\"usage: %prog [options] FILE\")\n p.add_option(\n '-e', '--encode',\n action=\"store_true\",\n help=\"encode (compress) FILE using the Huffman code calculated for \"\n \"the frequency of characters in FILE itself. \"\n \"The output is FILE.huff which contains both the Huffman \"\n \"code and the bitarray resulting from the encoding.\")\n p.add_option(\n '-d', '--decode',\n action=\"store_true\",\n help=\"decode (decompress) FILE.huff and write the output to FILE.out\")\n p.add_option(\n '-t', '--test',\n action=\"store_true\",\n help=\"encode FILE, decode FILE.huff, compare FILE with FILE.out, \"\n \"and unlink created files.\")\n opts, args = p.parse_args()\n if len(args) != 1:\n p.error('exactly one argument required')\n filename = args[0]\n\n if opts.encode:\n encode(filename)\n\n elif opts.decode:\n decode(filename + '.huff')\n\n elif opts.test:\n huff = filename + '.huff'\n out = filename + '.out'\n encode(filename)\n decode(huff)\n assert open(filename, 'rb').read() == open(out, 'rb').read()\n os.unlink(huff)\n os.unlink(out)\n\n else:\n p.error(\"no option provided\")\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"examples/huffman/compress.py","file_name":"compress.py","file_ext":"py","file_size_in_byte":2726,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"122175813","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\nimport os.path\nimport logging\n\nimport pygame\n\nfrom npgameworld.world import World\nfrom test_world import get_hero_actions\n\n\ndef main():\n\n # Init game world\n world = World(conf_path=os.path.realpath('config-example.json'))\n world.logger.setLevel(logging.DEBUG)\n\n # pygame used for circles drawing only\n screen = pygame.display.set_mode((world.screen_width, world.screen_height))\n clock = pygame.time.Clock()\n fps = 120\n pygame.init()\n pygame.display.init()\n\n hero_color = (255, 255, 255)\n hero_bullet_color = (239, 0, 255)\n\n # Start world generator\n wg = world.world_gen()\n wg.send(None)\n\n # Run pygame loop with world generator\n while not world.game_over:\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n world.gameover = True\n\n screen.fill((0, 0, 0))\n\n # Hero actions\n hero_actions = get_hero_actions(world.world_stat['enemies'],\n world.hero_x, world.hero_y)\n\n try:\n wg.send(hero_actions)\n except StopIteration:\n continue\n\n # Draw hero\n pygame.draw.circle(screen, hero_color,\n (int(world.hero_x), int(world.hero_y)),\n world.hero_radius)\n\n # Draw enemies\n for e in world.world_stat['enemies']:\n color = (e['power'] + e['hp'] + e['spd']**2 + e['radius']) * 4\n\n if color > 255:\n color = 255\n\n pygame.draw.circle(screen, (color, color, 255),\n (int(e['x']), int(e['y'])), e['radius'])\n\n # Draw bullets\n for b in world.world_stat['bullets']:\n pygame.draw.circle(screen, hero_bullet_color,\n (int(b['x']), int(b['y'])), b['radius'])\n\n pygame.display.flip()\n clock.tick(fps)\n\n print(world.world_stat)\n pygame.display.quit()\n pygame.quit()\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"play_world_advanced.py","file_name":"play_world_advanced.py","file_ext":"py","file_size_in_byte":2049,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"452812345","text":"# 编写一个程序,找出第 n 个丑数。 \n# \n# 丑数就是质因数只包含 2, 3, 5 的正整数。 \n# \n# 示例: \n# \n# 输入: n = 10\n# 输出: 12\n# 解释: 1, 2, 3, 4, 5, 6, 8, 9, 10, 12 是前 10 个丑数。 \n# \n# 说明: \n# \n# \n# 1 是丑数。 \n# n 不超过1690。 \n# \n# Related Topics 堆 数学 动态规划\n\n\n# leetcode submit region begin(Prohibit modification and deletion)\nclass Solution(object):\n def nthUglyNumber(self, n):\n \"\"\"\n :type n: int\n :rtype: int\n \"\"\"\n dp = [1 for _ in range(n)]\n p2, p3, p5 = 0, 0, 0\n for i in range(1, n):\n dp[i] = min([dp[p2] * 2, dp[p3] * 3, dp[p5] * 5])\n if dp[i] == dp[p2] * 2:\n p2 += 1\n if dp[i] == dp[p3] * 3:\n p3 += 1\n if dp[i] == dp[p5] * 5:\n p5 += 1\n\n return dp[n-1]\n\n# leetcode submit region end(Prohibit modification and deletion)\n","sub_path":"Week02/Assignment/[264]丑数 II.py","file_name":"[264]丑数 II.py","file_ext":"py","file_size_in_byte":951,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"443557285","text":"import numpy as np\nimport sklearn.utils\nimport torch\nimport torch.nn as nn\nfrom model import Net\nfrom torch.autograd import Variable\nfrom utils import load_data, write_for_latex\n\n\ndef train(inputs, labels):\n # we want to keep all values, not just mean in lists\n # so that we can answer the questions for assignment\n tr_loss = []\n inputs, labels = sklearn.utils.shuffle(inputs, labels)\n\n # Select data from numpy array and conv to tensor\n inputs, labels = torch.from_numpy(inputs), torch.from_numpy(labels)\n for i in range(0, labels.shape[0], batch_size):\n inputs_batch, labels_batch = inputs[i:i + batch_size], labels[i:i + batch_size]\n\n # Convert torch tensor to Variable\n inputs_batch, labels_batch = Variable(inputs_batch).float(), Variable(labels_batch.squeeze()).long()\n if torch.cuda.is_available() and use_cuda:\n inputs_batch, labels_batch = inputs_batch.cuda(), labels_batch.cuda().long()\n # Forward + Backward + Optimize\n optimizer.zero_grad() # zero the gradient buffer\n predictions = net(inputs_batch)\n\n loss = criterion(predictions, labels_batch)\n tr_loss.append(loss.data[0])\n\n loss.backward()\n optimizer.step()\n return tr_loss\n\n\ndef evaluate_accuracy(inputs, labels):\n inputs, labels = torch.from_numpy(inputs), torch.from_numpy(labels)\n\n # Convert torch tensor to Variable\n inputs, labels = Variable(inputs).float(), Variable(labels.squeeze()).long()\n if torch.cuda.is_available() and use_cuda:\n inputs, labels = inputs.cuda(), labels.cuda().long()\n # Forward\n predictions = net(inputs)\n _, predictions = torch.max(predictions, 1)\n correct = predictions.eq(labels).cpu().float().sum()\n return correct * 100.0 / labels.shape[0]\n\n\n# Data loading and pre-processing chosing\nx, y, vx, vy = load_data(True, False,\n True)\n\n# Hyper Parameters\ninput_size = x.shape[1]\nhidden_size = 100\noutput_size = 20\nnum_epochs = 100\nbatch_size = 100\nlearning_rate = 0.02\nuse_cuda = False\n\n# Different learning rates to go through\nlearning_rates = [0.1, 2, 1, 0.5]\n\nfor lr in learning_rates:\n print(\"Learning rate: \" + str(lr))\n # Model\n net = Net(input_size, hidden_size, output_size)\n if torch.cuda.is_available() and use_cuda:\n net = net.cuda()\n # Loss and Optimizer\n criterion = nn.CrossEntropyLoss()\n optimizer = torch.optim.SGD(net.parameters(), lr=lr)\n # Train the Model\n for epoch in range(num_epochs):\n training_loss = train(x, y)\n\n # Measure accuracy on training and validation set\n training_accuracy = evaluate_accuracy(x, y)\n validation_accuracy = evaluate_accuracy(vx, vy)\n\n # Write values in latex file\n write_for_latex(str(lr) + \"_training.txt\", epoch + 1, training_accuracy)\n write_for_latex(str(lr) + \"_validation.txt\", epoch + 1, validation_accuracy)\n print('Epoch : %02d Training Loss : %.3f | Accuracy : %.2f ||| Validation Accuracy : %.2f '\n % (epoch + 1, np.mean(training_loss), training_accuracy,\n validation_accuracy))\n","sub_path":"Assignment 1/Code/Newsgroups-Classifier/train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":3126,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"499096517","text":"import numpy as np\nimport soundfile as sf\nimport glob\nimport os\nfrom scipy import spatial\n\ndef ZCR(signal):\n end = len(signal)\n end3 = int(end/3)\n s1 = signal[0:end3]\n s2 = signal[end3:2*end3]\n s3 = signal[2*end3:end]\n s = [s1,s2,s3]\n ZCR = []\n for i in s:\n ZCR.append(np.mean(\n np.abs(\n np.diff(\n np.sign(i)\n )\n )\n )/2)\n ZCR.append(np.sum(signal**2))\n return ZCR\n\ndef fileZCR(dir):\n files = glob.glob(dir)\n totalZCR = []\n for path in files:\n data ,fs = sf.read(path)\n signalZCR = ZCR(data)\n totalZCR.append(signalZCR)\n return np.mean(totalZCR,axis=0)\n\ndef findSimilarity(one,two):\n return 1 - spatial.distance.cosine(one,two)\n\n\ndef main():\n yes_path = os.path.dirname(__file__) + \"/train/yes/*.wav\"\n ZCR_yes = fileZCR(yes_path)\n no_path = os.path.dirname(__file__) + \"/train/no/*.wav\"\n ZCR_no = fileZCR(no_path)\n\n yes_test = glob.glob(os.path.dirname(__file__) + \"/test/yes/*.wav\")\n no_test = glob.glob(os.path.dirname(__file__) + \"/test/no/*.wav\")\n\n for file in yes_test+no_test:\n data,fs = sf.read(file)\n test = ZCR(data)\n yes_sim = findSimilarity(test,ZCR_yes)\n no_sim = findSimilarity(test,ZCR_no)\n ans = False\n if(yes_sim > no_sim):\n ans=True\n print(\"is %s a yes? %r\" % (file.rsplit('\\\\')[-1],ans))\n\nif __name__ == \"__main__\":\n main()","sub_path":"phase1.py","file_name":"phase1.py","file_ext":"py","file_size_in_byte":1535,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"430581608","text":"#Murali Siddappa Badiger\n\nimport os\nimport tensorflow as tf\nfrom sklearn.metrics import f1_score\nimport time\nimport random\nimport logging\nimport preprocess_heart_disease_data\n\n\n# Turn off tensorflow warnings\nos.environ['TF_CPP_MIN_LOG_LEVEL'] = '3' # or any {'0', '1', '2'}\n\n\ndef MyDNN(trainX_fully_preprocessed, trainY_binary, testX_fully_preprocessed, testY_binary, hidden_units = [10,5], dropout=0.5,activation_fn=tf.nn.elu, steps=1000 ):\n\n Model_01 = tf.contrib.learn.DNNClassifier(hidden_units = hidden_units,\n n_classes=2,\n dropout=dropout,\n feature_columns=tf.contrib.learn.infer_real_valued_columns_from_input(trainX_fully_preprocessed),\n activation_fn=activation_fn\n ,gradient_clip_norm=0.9\n ,optimizer = tf.train.AdamOptimizer(learning_rate=0.001)\n )\n #This line makes the tf model compatible with the sklearn way of codding\n Model_01 = tf.contrib.learn.SKCompat(Model_01)\n \n start_train = time.time()\n Model_01.fit(trainX_fully_preprocessed,\n trainY_binary,\n batch_size=100,\n steps=steps\n )\n\n end_train = time.time()\n \n train_time = end_train - start_train \n \n test_predict_01 = Model_01.predict(testX_fully_preprocessed)['classes']#The TensorFlow predict object is a bit different from the sklearn object\n\n start_test = time.time()\n print(start_test)\n f1 = f1_score(testY_binary, test_predict_01)\n end_test = time.time()\n print(end_test)\n \n test_time = end_test-start_test\n print(test_time)\n return f1, train_time, test_time, test_predict_01\n\n\n\ndef neural_network(split_size):\n logging.getLogger('tensorflow').disabled = True\n\n random.seed(1)\n\n trainX_fully_preprocessed, trainY_binary, testX_fully_preprocessed, testY_binary = preprocess_heart_disease_data.Preprocess_Heart_Disease_Data(split_size)\n net=[10, 5]\n F1, train_time, test_time, test_predict = MyDNN(trainX_fully_preprocessed,\n trainY_binary,\n testX_fully_preprocessed,\n testY_binary,\n hidden_units=net,\n dropout=0.5,\n activation_fn=tf.nn.elu,\n steps=10000)\n print(\"Results for split percentage: \")\n print(\"Time taken to train the network: {0}\".format(train_time))\n print(\"Time taken for prediction is :{0}\".format(test_time) )\n print(\"Accuracy of the system using neural network: {0}\".format(F1))\n return F1, train_time, test_time\n\n\n\n\n\n\n\n\n\n\n\n\n\n","sub_path":"TensorFlow_DNN_Heart_Disease.py","file_name":"TensorFlow_DNN_Heart_Disease.py","file_ext":"py","file_size_in_byte":2858,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"2697234","text":"from django import forms\nfrom customer.models import ShippingAddress\n\n\nclass CheckoutForm(forms.Form):\n address = forms.ModelChoiceField(\n widget=forms.Select(attrs={'class': 'form-control'}),\n label='Адрес доставки',\n queryset=None,\n empty_label=None,\n required=True,\n help_text='Укажите адрес достваки, как можно подробнее'\n '(улицу, дом, корпус, этаж, подъезд, код домофона), '\n 'чтобы курьеру было легче Вас найти.'\n )\n comment = forms.CharField(\n widget=forms.Textarea(attrs={'class': 'form-control', 'rows': 2}),\n label='Пожелание к заказу',\n required=False,\n help_text='Пожелание к заказу'\n )\n\n def __init__(self, *args, **kwargs):\n user = kwargs.pop('user', None)\n super(CheckoutForm, self).__init__(*args, **kwargs)\n self.fields['address'].queryset = ShippingAddress.objects.filter(user=user, is_deleted=False)\n\n\nclass CommentForm(forms.Form):\n text = forms.CharField(widget=forms.Textarea, label='', required=True)","sub_path":"project/store/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":1214,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"534959443","text":"#! ../env/bin/python\n# -*- coding: utf-8 -*-\nfrom silverflask import create_app\nfrom silverflask import db\nfrom silverflask.models import Page, User, SiteTree\nimport unittest\nimport tempfile\nfrom flask.ext.testing import TestCase\n\nclass BaseTest(TestCase):\n def create_app(self):\n self.app = create_app(\"silverflask.settings.TestConfig\", env=\"dev\")\n return self.app\n\n def setUp(self):\n from silverflask.models import User\n from silverflask.models.User import Role\n if not len(Role.query.all()):\n admin_role = Role(\"admin\", \"Admin has all privileges\")\n db.session.add(admin_role)\n db.session.commit()\n if not len(User.query.all()):\n # create standard user\n u = User(\"admin\", \"admin\")\n u.email = \"admin\"\n db.session.add(u)\n admin_role = Role.query.filter(Role.name == \"admin\").first()\n u.roles.append(admin_role)\n db.session.commit()\n\n from silverflask.models import SiteConfig\n if not len(SiteConfig.query.all()):\n sc = SiteConfig()\n db.session.add(sc)\n db.session.commit()\n\n if not len(Page.query.all()):\n page = Page()\n page.content = \"

Please proceed to the admin interface at admin!

\"\n page.name = \"home\"\n page.urlsegment = \"home\"\n db.session.add(page)\n db.session.commit()\n page.mark_as_published()\n db.session.commit()\n","sub_path":"tests/test_base.py","file_name":"test_base.py","file_ext":"py","file_size_in_byte":1550,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"135028450","text":"# -*- coding: utf-8 -*-\n#\n# This file is part of CERN Document Server.\n# Copyright (C) 2015 CERN.\n#\n# Invenio is free software; you can redistribute it and/or\n# modify it under the terms of the GNU General Public License as\n# published by the Free Software Foundation; either version 2 of the\n# License, or (at your option) any later version.\n#\n# Invenio is distributed in the hope that it will be useful, but\n# WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU\n# General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with Invenio; if not, write to the Free Software Foundation, Inc.,\n# 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.\n\n\"\"\"CDS Image MARC 21 field definitions.\"\"\"\n\nfrom dojson import utils\n\nfrom cds_dojson.marc21.models.image import model as marc21\n\n\n@marc21.over('album_parent', '^774[_01][_8]', override=True)\n@utils.for_each_value\ndef album_parent(self, key, value):\n \"\"\"Album ID which contains this photo\"\"\"\n return {\n 'dump_album': value.get('a'),\n 'album_id': value.get('r')\n }\n\n\n@marc21.over('image_url', '^856[_012347][_0128]', override=True)\n@utils.for_each_value\n@utils.filter_values\ndef image_url(self, key, value):\n \"\"\"Image URL.\n\n Contains the URL to the concrete image file\n and information about the format.\n \"\"\"\n indicator_map1 = {\n \"#\": \"No information provided\",\n \"0\": \"Email\",\n \"1\": \"FTP\",\n \"2\": \"Remote login (Telnet)\",\n \"3\": \"Dial-up\",\n \"4\": \"HTTP\",\n \"7\": \"Method specified in subfield $2\"}\n indicator_map2 = {\n \"#\": \"No information provided\",\n \"0\": \"Resource\",\n \"1\": \"Version of resource\",\n \"2\": \"Related resource\",\n \"8\": \"No display constant generated\"}\n return {\n 'size': value.get('s'),\n 'path': value.get('d'),\n 'electronic_format_type': value.get('q'),\n 'uri': value.get('u'),\n 'link_text': value.get('y'),\n 'public_note': value.get('z'),\n 'subformat': value.get('x'),\n 'photo_id': value.get('8'),\n 'relationship': indicator_map2.get(key[4]),\n 'access_method':\n value.get('2') if key[3] == '7' else indicator_map1.get(key[3]),\n }\n","sub_path":"cds_dojson/marc21/fields/image.py","file_name":"image.py","file_ext":"py","file_size_in_byte":2331,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"98240154","text":"#!/usr/bin/env python3\n \n#\n# pygame (simple) template - by furas\n#\n# https://github.com/furas/my-python-codes/tree/master/pygame/__template__/\n#\n \n# ---------------------------------------------------------------------\n \n__author__ = 'Bartlomiej \"furas\" Burek'\n__webpage__ = 'http://blog.furas.pl'\n \n# ---------------------------------------------------------------------\n \nimport pygame\n \n# === CONSTANS === (UPPER_CASE names)\n \nBLACK = ( 0, 0, 0)\nWHITE = (255, 255, 255)\n \nRED = (255, 0, 0)\nGREEN = ( 0, 255, 0)\nBLUE = ( 0, 0, 255)\n \nSCREEN_WIDTH = 600\nSCREEN_HEIGHT = 400\n \nBLOCK_SIZE = 50\n \n# === CLASSES === (CamelCase names)\n \n'''\nclass Button():\n'''\n \n# === FUNCTIONS === (lower_case names)\n \n # empty\n \n# === MAIN === (lower_case names)\n \n# --- (global) variables ---\n \n# --- init ---\n \npygame.init()\n \nscreen = pygame.display.set_mode((SCREEN_WIDTH, SCREEN_HEIGHT))\nscreen_rect = screen.get_rect()\n \n# --- objects ---\n \n'''\nbutton = Button(...)\n'''\n \nrects = []\n \nfor x in range(10):\n rects.append( pygame.Rect(x*(BLOCK_SIZE+5), BLOCK_SIZE, BLOCK_SIZE, BLOCK_SIZE) )\n \nselected = None\n \n# --- mainloop ---\n \nclock = pygame.time.Clock()\nis_running = True\n \nwhile is_running:\n \n # --- events ---\n \n for event in pygame.event.get():\n \n # --- global events ---\n \n if event.type == pygame.QUIT:\n is_running = False\n \n elif event.type == pygame.KEYDOWN:\n if event.key == pygame.K_ESCAPE:\n is_running = False\n \n elif event.type == pygame.MOUSEBUTTONDOWN:\n if event.button == 1:\n for i, r in enumerate(rects):\n if r.collidepoint(event.pos):\n selected = i\n selected_offset_x = r.x - event.pos[0]\n selected_offset_y = r.y - event.pos[1]\n \n elif event.type == pygame.MOUSEBUTTONUP:\n if event.button == 1:\n selected = None\n \n elif event.type == pygame.MOUSEMOTION:\n if selected is not None: # selected can be `0` so `is not None` is required\n # move object\n rects[selected].x = event.pos[0] + selected_offset_x\n rects[selected].y = event.pos[1] + selected_offset_y\n \n # --- objects events ---\n \n '''\n button.handle_event(event)\n '''\n \n # --- updates ---\n \n # empty\n \n # --- draws ---\n \n screen.fill(BLACK)\n \n '''\n button.draw(screen) \n '''\n \n # draw rect\n for r in rects:\n pygame.draw.rect(screen, RED, r)\n \n pygame.display.update()\n \n # --- FPS ---\n \n clock.tick(25)\n \n# --- the end ---\n \npygame.quit()\n","sub_path":"pygame/drag-rectangles-circles/drag_rectangles.py","file_name":"drag_rectangles.py","file_ext":"py","file_size_in_byte":2764,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"421542273","text":"#!/usr/bin/env python\n\"\"\"Django's command-line utility for administrative tasks.\"\"\"\nimport os\nimport sys\n\nfrom dotenv import load_dotenv\n\nload_dotenv()\n\ndef main():\n \"\"\"Run administrative tasks.\"\"\"\n\n pwd = os.getcwd()\n paths = [\n pwd,\n os.path.join(pwd, 'apps')\n ]\n\n for index, path in enumerate(paths):\n if path not in sys.path:\n sys.path.insert(index, path)\n\n if os.environ['DEBUG'] == '1':\n os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'config.settings.local')\n else:\n os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'config.settings.production')\n\n try:\n from django.core.management import execute_from_command_line\n except ImportError as exc:\n raise ImportError(\n \"Couldn't import Django. Are you sure it's installed and \"\n \"available on your PYTHONPATH environment variable? Did you \"\n \"forget to activate a virtual environment?\"\n ) from exc\n execute_from_command_line(sys.argv)\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"manage.py","file_name":"manage.py","file_ext":"py","file_size_in_byte":1057,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"506477028","text":"import torch\nimport numpy as np\n\n# Various ways to initialize tensors\n# 1. from data\ndata = [[1, 2], [3, 4]]\nx_data = torch.tensor(data)\n# 2. from numpy array\nnp_array = np.array(data)\nx_np = torch.tensor(np_array)\n# 3. from another tensor\nx_ones = torch.ones_like(x_data)\nprint(f\"Ones Tensor: \\n {x_ones} \\n\")\n\nx_rand = torch.rand_like(x_data, dtype=torch.float)\nprint(f\"Random tensor: \\n {x_rand} \\n\")\n\n# Generate tensors from shape tuple\nshape = (2, 3,)\nrand_tensor = torch.rand(shape)\nones_tensor = torch.ones(shape)\nzeros_tensor = torch.zeros(shape)\n\nprint(f\"Random Tensor: \\n {rand_tensor} \\n\")\nprint(f\"Ones Tensor: \\n {ones_tensor} \\n\")\nprint(f\"Zeros Tensor: \\n {zeros_tensor} \\n\")\n\n\n# Attributes of a tensor: shape, data typs, device\ntensor = torch.rand(3, 4)\nprint(f\"Shape: {tensor.shape} Data type: {tensor.dtype} Device: {tensor.device}\")\n\n# Move tensor to GPU if available\nif torch.cuda.is_available():\n tensor = tensor.to('cuda')\n\n# Tensor indexing\ntensor = torch.rand(4, 4)\nprint('First row: ', tensor[0])\nprint('First column: ', tensor[:, 0])\nprint('Last column: ', tensor[..., -1])\nprint('Last column: ', tensor[:, -1])\ntensor[:, 1] = 0\nprint(tensor)\n\n\n# Joining tensors: torch.cat, torch.stack\nt1 = torch.cat([tensor, tensor, tensor], dim=1)\nprint(t1)\n\n# Arithmetic operations\n# Matrix multiplication (y1,y2,y3 will have the same value)\ny1 = tensor @ tensor.T\ny2 = tensor.matmul(tensor.T)\ny3 = torch.rand_like(tensor)\ntorch.matmul(tensor, tensor.T, out=y3)\n# print('y1: \\n', y1)\n# print('y2: \\n', y2)\n# print('y3: \\n', y3)\n\n# Element wise product( z1, z2, z3 will have the same value)\nz1 = tensor * tensor\nz2 = tensor.mul(tensor)\nz3 = torch.rand_like(tensor)\ntorch.mul(tensor, tensor, out=z3)\n# print('z1: \\n', z1)\n# print('z2: \\n', z2)\n# print('z3: \\n', z3)\n\n\n# Single element tensor\nagg = tensor.sum()\nagg_item = agg.item()\nprint(agg_item, type(agg), type(agg_item))\n\n# Inplace operations (using _ suffix)\nprint(tensor, \"\\n\")\ntensor.add_(5)\nprint(tensor)\n\n\n# Bridge with numpy\n# Tensor to NumPy array\nt = torch.ones(5)\nprint(f\"t: {t} type: {type(t)}\")\nn = t.numpy()\nprint(f\"n {n} type: {type(n)}\")\n\n# A change in the tensor reflects in the NumPy array.\nt.add_(1)\nprint(t, n)\n\n# numpy array to tensor\nnp.add(n, 1, out=n)\nprint(t, n)\n","sub_path":"introduction_to_pytorch/lesson_1_tensors/tensors.py","file_name":"tensors.py","file_ext":"py","file_size_in_byte":2256,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"346650358","text":"import time\n\nimport networkx as nx\nimport numpy as np\nimport torch\nimport torch.optim as optim\n\nfrom torch_geometric.datasets import TUDataset\nfrom torch_geometric.datasets import Planetoid\nfrom torch_geometric.data import DataLoader\n\nimport torch_geometric.nn as pyg_nn\n\nfrom models import GNNStack\nfrom utils import build_optimizer\n\ndef train(dataset, args):\n\n # build model\n model = GNNStack(dataset.num_node_features, args.hidden_dim, args.embed_dim, \n args)\n scheduler, opt = build_optimizer(args, model.parameters())\n\n loader = DataLoader(dataset, batch_size=args.batch_size, shuffle=True)\n\n # train\n train_loss = []\n train_valid = []\n for epoch in range(args.epochs):\n total_loss = 0\n total_valid = 0\n model.train()\n # for data in dataset: #this is bad, still need dataloader\n # if True:\n # data = dataset[0]\n for data in loader:\n train_mask = torch.FloatTensor(data.edge_attr.shape[0], 1).uniform_() < 0.8\n # train_mask = torch.FloatTensor(data.edge_attr.shape[0], 1).uniform_() < 1.1\n valid_mask = ~train_mask\n \n edge_attr = data.edge_attr.clone().detach()\n edge_attr = edge_attr[:,0].view(-1,1)\n # edge_attr = edge_attr/torch.max(edge_attr)\n\n known_edge_attr = edge_attr.clone().detach()\n known_edge_attr[valid_mask] = 0.\n\n x = data.x.clone().detach()\n edge_index = data.edge_index.clone().detach()\n\n opt.zero_grad()\n pred = model(x, known_edge_attr, edge_index)\n label = edge_attr\n\n pred_valid = pred[valid_mask]\n label_valid = label[valid_mask]\n vald_score = model.loss(pred_valid, label_valid)\n total_valid += vald_score.item()\n\n pred_train = pred[train_mask]\n label_train = label[train_mask]\n loss = model.loss(pred_train, label_train)\n loss.backward()\n opt.step()\n total_loss += loss.item()\n # total_loss /= len(dataset)\n train_loss.append(total_loss)\n train_valid.append(total_valid)\n print('epoch: ',epoch)\n print('loss: ',total_loss)\n print('valid: ',total_valid)\n\n import matplotlib.pyplot as plt\n plt.figure()\n plot1, = plt.plot(train_loss)\n plot2, =plt.plot(train_valid)\n plt.legend([plot1,plot2],['trian','valid'])\n plt.title('train valid curve')\n plt.figure()\n plot1, = plt.plot(pred_train.detach().numpy())\n plot2, = plt.plot(label_train.detach().numpy())\n plt.legend([plot1,plot2],['pred','label'])\n plt.title('final train result')\n plt.figure()\n plot1, = plt.plot(pred_valid.detach().numpy()) \n plot2, = plt.plot(label_valid.detach().numpy())\n plt.legend([plot1,plot2],['pred','label'])\n plt.title('final valid result')\n plt.show()\n \nclass objectview(object):\n def __init__(self, d):\n self.__dict__ = d\n\ndef main():\n for args in [\n {'model_type': 'EGCN', 'num_layers': 3, 'batch_size': 32, 'hidden_dim': 1, 'embed_dim': 6, 'dropout': 0., 'epochs': 5000, 'opt': 'adam', 'opt_scheduler': 'none', 'opt_restart': 0, 'weight_decay': 0., 'lr': 0.001},\n ]:\n args = objectview(args)\n dataset = TUDataset(root='/tmp/COX2_MD', name='COX2_MD', use_node_attr=True, use_edge_attr=True)\n dataset = dataset.shuffle() # add this line!\n train(dataset, args) \n\nif __name__ == '__main__':\n main()\n\n","sub_path":"train_cox2_mid.py","file_name":"train_cox2_mid.py","file_ext":"py","file_size_in_byte":3504,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"377007904","text":"import unittest\nfrom onnx import load\nimport torch\nimport onnxruntime as _ort\nimport io\nimport numpy\n\nfrom torch.onnx import register_custom_op_symbolic\nfrom onnxruntime_customops import (\n onnx_op,\n get_library_path as _get_library_path)\n\n\ndef my_inverse(g, self):\n return g.op(\"ai.onnx.contrib::Inverse\", self)\n\n\nclass CustomInverse(torch.nn.Module):\n def forward(self, x):\n return torch.inverse(x) + x\n\n\nclass TestPyTorchCustomOp(unittest.TestCase):\n\n @classmethod\n def setUpClass(cls):\n\n @onnx_op(op_type=\"Inverse\")\n def inverse(x):\n # the user custom op implementation here:\n return numpy.linalg.inv(x)\n\n def test_custom_pythonop_pytorch(self):\n\n # register_custom_op_symbolic(\n # '::inverse', my_inverse, )\n register_custom_op_symbolic('::inverse', my_inverse, 1)\n\n x = torch.randn(3, 3)\n\n # Export model to ONNX\n f = io.BytesIO()\n torch.onnx.export(CustomInverse(), (x,), f)\n onnx_model = load(io.BytesIO(f.getvalue()))\n self.assertIn('domain: \"ai.onnx.contrib\"', str(onnx_model))\n\n model = CustomInverse()\n pt_outputs = model(x)\n\n so = _ort.SessionOptions()\n so.register_custom_ops_library(_get_library_path())\n\n # Run the exported model with ONNX Runtime\n ort_sess = _ort.InferenceSession(f.getvalue(), so)\n ort_inputs = dict((ort_sess.get_inputs()[i].name, input.cpu().numpy())\n for i, input in enumerate((x,)))\n ort_outputs = ort_sess.run(None, ort_inputs)\n\n # Validate PyTorch and ONNX Runtime results\n numpy.testing.assert_allclose(pt_outputs.cpu().numpy(),\n ort_outputs[0], rtol=1e-03, atol=1e-05)\n\n\nif __name__ == \"__main__\":\n unittest.main()\n","sub_path":"test/test_torch_ops.py","file_name":"test_torch_ops.py","file_ext":"py","file_size_in_byte":1848,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"476540738","text":"from simpledb.buffer.BufferMgr import BufferMgr\nfrom simpledb.log.LogMgr import LogMgr\nfrom simpledb.record.Layout import Layout\nfrom simpledb.record.Schema import Schema\nfrom simpledb.record.TableScan import TableScan\nfrom simpledb.tx.Transaction import Transaction\nfrom simpledb.util.File import File\nfrom simpledb.file.FileMgr import FileMgr\nimport random\n\n\nclass TableScanTest(object):\n @classmethod\n def main(cls, args):\n # db = SimpleDB(\"tabletest\", 400, 8)\n fm = FileMgr(File(\"tabletest\"), 400)\n lm = LogMgr(fm, \"simpledb.log\")\n bm = BufferMgr(fm, lm, 8)\n tx = Transaction(fm, lm, bm)\n\n sch = Schema()\n sch.addIntField(\"A\")\n sch.addStringField(\"B\", 9)\n layout = Layout(sch)\n for fldname in layout.schema().fields():\n offset = layout.offset(fldname)\n print(fldname + \" has offset \" + str(offset))\n\n print(\"Filling the table with 50 random records.\")\n ts = TableScan(tx, \"T\", layout)\n for i in range(50):\n ts.insert()\n n = random.randint(0, 50)\n ts.setInt(\"A\", n)\n ts.setString(\"B\", \"rec\" + str(n))\n print(\"inserting into slot \" + ts.getRid().__str__() + \": {\" + str(n) + \", \" + \"rec\" + str(n) + \"}\")\n\n print(\"Deleting these records, whose A-values are less than 25.\")\n count = 0\n ts.beforeFirst()\n while ts.next():\n a = ts.getInt(\"A\")\n b = ts.getString(\"B\")\n if a < 25:\n count += 1\n print(\"slot \" + ts.getRid().__str__() + \": {\" + str(a) + \", \" + b + \"}\")\n ts.delete()\n print(str(count) + \" values under 10 were deleted.\\n\")\n\n print(\"Here are the remaining records.\")\n ts.beforeFirst()\n while ts.next():\n a = ts.getInt(\"A\")\n b = ts.getString(\"B\")\n print(\"slot \" + ts.getRid().__str__() + \": {\" + str(a) + \", \" + b + \"}\")\n ts.close()\n tx.commit()\n\n\nif __name__ == '__main__':\n import sys\n TableScanTest.main(sys.argv)\n","sub_path":"simpledb/record/TableScanTest.py","file_name":"TableScanTest.py","file_ext":"py","file_size_in_byte":2084,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"370011442","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\nimport datetime\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('CorpRoomApp', '0061_auto_20151210_1316'),\n ]\n\n operations = [\n migrations.CreateModel(\n name='UserBookingStatisticTrack',\n fields=[\n ('user_booking_statistic_track_id', models.AutoField(serialize=False, primary_key=True)),\n ('booking_path', models.CharField(max_length=100, verbose_name=b'Booking Path')),\n ('count', models.PositiveIntegerField(default=1)),\n ('current_timestamp', models.DateTimeField(default=datetime.datetime(2015, 12, 11, 7, 11, 27, 382495), verbose_name=b'Current Time Stamp')),\n ('booking_date', models.DateField()),\n ],\n ),\n migrations.CreateModel(\n name='UserRequestStatisticTrack',\n fields=[\n ('user_request_statistic_track_id', models.AutoField(serialize=False, primary_key=True)),\n ('booking_path', models.CharField(max_length=100, verbose_name=b'Booking Path')),\n ('count', models.PositiveIntegerField(default=1)),\n ('current_timestamp', models.DateTimeField(default=datetime.datetime(2015, 12, 11, 7, 11, 27, 383050), verbose_name=b'Current Time Stamp')),\n ('request_date', models.DateField()),\n ],\n ),\n migrations.AlterField(\n model_name='booking',\n name='booking_creation_date',\n field=models.DateTimeField(default=datetime.datetime(2015, 12, 11, 7, 11, 27, 376389)),\n ),\n migrations.AlterField(\n model_name='booking',\n name='booking_datetime',\n field=models.DateTimeField(default=datetime.datetime(2015, 12, 11, 7, 11, 27, 375925)),\n ),\n migrations.AlterField(\n model_name='company',\n name='company_update_date',\n field=models.DateTimeField(default=datetime.datetime(2015, 12, 11, 7, 11, 27, 363039)),\n ),\n migrations.AlterField(\n model_name='corporatetransaction',\n name='transaction_date',\n field=models.DateField(default=datetime.datetime(2015, 12, 11, 7, 11, 27, 380293)),\n ),\n migrations.AlterField(\n model_name='customer',\n name='cust_updated_date',\n field=models.DateTimeField(default=datetime.datetime(2015, 12, 11, 7, 11, 27, 364787)),\n ),\n migrations.AlterField(\n model_name='guest',\n name='guest_update_date',\n field=models.DateTimeField(default=datetime.datetime(2015, 12, 11, 7, 11, 27, 373330)),\n ),\n migrations.AlterField(\n model_name='invoice',\n name='invoice_datetime',\n field=models.DateField(default=datetime.datetime(2015, 12, 11, 7, 11, 27, 379293)),\n ),\n migrations.AlterField(\n model_name='invoice',\n name='invoice_generated_datetime',\n field=models.DateTimeField(default=datetime.datetime(2015, 12, 11, 7, 11, 27, 379412), verbose_name=b'Invoice Generated DateTime'),\n ),\n migrations.AlterField(\n model_name='mailinglist',\n name='updated_date',\n field=models.DateTimeField(default=datetime.datetime(2015, 12, 11, 7, 11, 27, 381034), verbose_name=b'Requested Date Time'),\n ),\n migrations.AlterField(\n model_name='paymenttransaction',\n name='booking_transaction_update_date',\n field=models.DateTimeField(default=datetime.datetime(2015, 12, 11, 7, 11, 27, 375041), verbose_name=b'Update Date'),\n ),\n migrations.AlterField(\n model_name='promotion_code',\n name='created_date',\n field=models.DateTimeField(default=datetime.datetime(2015, 12, 11, 7, 11, 27, 377725)),\n ),\n migrations.AlterField(\n model_name='property',\n name='property_update_date',\n field=models.DateTimeField(default=datetime.datetime(2015, 12, 11, 7, 11, 27, 366557)),\n ),\n migrations.AlterField(\n model_name='propertyroom',\n name='room_update_date',\n field=models.DateTimeField(default=datetime.datetime(2015, 12, 11, 7, 11, 27, 368477)),\n ),\n migrations.AlterField(\n model_name='relationshipmanager',\n name='rm_update_date',\n field=models.DateTimeField(default=datetime.datetime(2015, 12, 11, 7, 11, 27, 363995)),\n ),\n migrations.AlterField(\n model_name='usertrackingactivity',\n name='session_out_time',\n field=models.DateTimeField(default=datetime.datetime(2015, 12, 11, 7, 11, 27, 381948), verbose_name=b'Session Out Time'),\n ),\n migrations.AddField(\n model_name='userrequeststatistictrack',\n name='user_id',\n field=models.ForeignKey(verbose_name=b'Customer Name', blank=True, to='CorpRoomApp.Customer', null=True),\n ),\n migrations.AddField(\n model_name='userbookingstatistictrack',\n name='user_id',\n field=models.ForeignKey(verbose_name=b'Customer Name', blank=True, to='CorpRoomApp.Customer', null=True),\n ),\n ]\n","sub_path":"CorpRoomApp/migrations/0062_auto_20151211_0711.py","file_name":"0062_auto_20151211_0711.py","file_ext":"py","file_size_in_byte":5384,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"333029999","text":"import praw\nimport time\n\n##CONFIGS\n\nusername = 'captainmeta4'\nsubreddit = input('subreddit: /r/')\n\nprint('Input dates in format 01 Jan 2016')\nbegin_day = input('Begin Day: ')\nend_day = input('End Day: ')\n\n\n\nbegin_time = begin_day+\" 00:00:00\"\nend_time = end_day+\" 23:59:59\"\n\n##END CONFIGS\n\n#instanciate reddit and log in\nr=praw.Reddit('modmail counter by /u/captainmeta4, running under /u/'+username)\nr.login(username,input('password: '), disable_warning = True)\n\n#turn subreddit name into subreddit object\nsubreddit = r.get_subreddit(subreddit)\n\n#generator to return all message replies\n#yay recursion\ndef all_replies(message):\n yield message\n for reply in message.replies:\n for child in all_replies(reply):\n yield child\n\n#convert times to timestamps\n##step 1 - parse strings to strftime objects\nbegin_time = time.strptime(begin_time, \"%d %b %Y %H:%M:%S\")\nend_time = time.strptime(end_time, \"%d %b %Y %H:%M:%S\")\n\n##step 2 - turn strftime objects into epoch timestamps\nbegin_time = time.mktime(begin_time)\nend_time = time.mktime(end_time)\n\n#pre-load all current mods from modlist\nfirst_response = {}\ntotal_response = {}\nfor mod in r.get_moderators(subreddit):\n first_response[mod.name]=0\n total_response[mod.name]=0\n\n#load modmails\nfor modmail in r.get_mod_mail(subreddit, limit=5000):\n\n #ignore mod-created or admin-created threads\n if modmail.distinguished is not None:\n continue\n\n #check thread creation time - continue if thread too recent\n #not worrying about thread too old just yet\n if modmail.created_utc > end_time:\n continue\n\n #keep track of last-bumped time\n last_bumped = modmail.created_utc\n \n #keep track of first responder\n first_responder = True\n\n #keep track of who has points so far this thread\n thread_scored = []\n \n for reply in all_replies(modmail):\n\n #update last_bumped\n if reply.created_utc > last_bumped:\n last_bumped = reply.created_utc\n\n #check thread creation time again; skip over if too old\n #this is checked here (instead of above) so that we can get an accurate last_bumped\n if modmail.created_utc < begin_time:\n continue\n\n #ignore non-mod replies\n if reply.distinguished is None:\n continue\n\n #ignore replies whose authors already have points this thread\n if reply.author.name in thread_scored:\n continue\n thread_scored.append(reply.author.name)\n\n #check first responder; award point\n if first_responder == True:\n try:\n first_response[reply.author.name] += 1\n except KeyError:\n first_response[reply.author.name] = 1\n \n first_responder = False\n\n #award participation point\n try:\n total_response[reply.author.name] += 1\n except KeyError:\n total_response[reply.author.name] = 1\n\n #check last bumped - this will be an accurate termination criteria\n if last_bumped < begin_time:\n break\n\n#sort by first_response - modlist will contain the mod sort order\nmodlist = []\nfor entry in first_response:\n i=0\n for mod in modlist:\n if first_response[entry] < first_response[mod]:\n i+=1\n else:\n break\n modlist.insert(i,entry)\n \nprint('##Participation in user-initiated modmail threads')\nprint('')\nprint('Mod|First Response|Total Participation')\nprint('-|-|-')\n\n#now iterate through the sorted modlist to output the copy-pasteable text\nfor entry in modlist:\n\n if entry in first_response:\n score = {'name':entry, 'first':first_response[entry], 'total':total_response[entry]}\n elif entry not in first_response:\n score = {'name':entry, 'first':0, 'total':total_response[entry]}\n\n print(\"%(name)s | %(first)s | %(total)s\" % score)\n \n","sub_path":"modmail_counter.py","file_name":"modmail_counter.py","file_ext":"py","file_size_in_byte":3857,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"364391857","text":"#********************** NLP **********************#\n# H.W 1\n# Haim Shalelashvily 200832780\n# Zahi Kfir 200681476\n#********************** NLP **********************#\n\nimport sys, os, codecs, re, time\n\n# replace every linefid symbol with closeRowLineFid symbols (support multiple text editors)\ndef ReplaceLFwithCRLF(text):\n # most text have only line fid symbol - change it to closeRowLineFid symbol to support multiple text editors\n text = str(text).replace(\"\\n\",\"\\r\\n\")\n # if we had already a closeLine symbol we will no have two closeLines symbols togther. Remove one \n return str(text).replace(\"\\r\\r\",\"\\r\")\n\n# split a text to sentences using the given delimiters list\ndef SplitTextToSentences(text, delimiters):\n # replace all the line fid characters to CloseRowLineFid symbols\n text = ReplaceLFwithCRLF(text)\n\n # get an iterator to all the occurances of the delimiters and their concataneted regular expressions , also the one that ends with double quotes\n ReResult = re.finditer(\"[\" + ''.join(delimiters) + \"][\" + ''.join(delimiters) + \"]*(\\\")*\",text)\n\n # add new line fid after every occurance\n SentencesList = []\n LastIndex = 0;\n for iter in ReResult:\n # get the index of the end of the regular expression\n currentEndingIndex = iter.regs[0][1] \n # check if we found a fraction. format : digit*.digit*\n if (text[currentEndingIndex-1]==\".\" and currentEndingIndex>1 and text[currentEndingIndex].isdigit() and text[currentEndingIndex-2].isdigit()):\n # in this case we don't need to add a line fid , we just found a number\n continue\n # check if we found a fullstop mark inside a ward. format : alpha.alpha\n if (text[currentEndingIndex-1]==\".\" and currentEndingIndex>1 and text[currentEndingIndex].isalpha() and text[currentEndingIndex-2].isalpha()):\n # in this case we don't need to add a line fid\n continue\n # add the current sentences to the sentences list\n SentencesList.append(text[LastIndex:currentEndingIndex])\n # update the last handled index\n LastIndex = currentEndingIndex\n\n # add the last sentence if available\n SentencesList.append(text[LastIndex:])\n # concatenate all the sentences by adding the CloseRowLineFid between them\n text = \"\\r\\n\".join(SentencesList)\n # return an array of non empty sentences ( Remove also the old closeLineLineFid symbols)\n return [str(sentence) for sentence in text.split(\"\\r\\n\") if ((not str(sentence).isspace()) and len(sentence) > 0)]\n\n\ndef SeparateTokens(text): \n # Always add spaces before and after this tokens\n SimpleSeparators = [\".\" , \"!\" , \"?\" , \",\" , \":\" , \";\" , '<' , '>' , '@' , '#', '$' , '%' , '^' , '&' , '*' , '(' , ')' , '+', '=', '[' , ']' , '{' , '}' , \"/\" , \"\\\\\" , '_' , '~',\"-\",\"'\" ,'\"' ]\n\n # Add spaces before and after this tokens only when not between 2 letters (???\"? ?'?? ??-?????)\n SpecialSeparators = [\"-\",\"'\" , '\"']\n \n # Add spaces before and after this tokens only when not between 2 Digits (1,000,000 5.2)\n SpecialDigitSeparators = [\".\",\",\",\"/\"]\n\n # Holds separated tokens\n TokonsList = []\n\n # The last char index that was checked\n LastCheckedIndex = 0;\n\n # Check the first character\n if (text[0] in SimpleSeparators):\n if text[1] != ' ': \n TokonsList.append(text[0]) \n LastCheckedIndex = 1\n\n # loop each character,Except the first and last character, searching for separator tokens\n Index = 1;\n StartTime = time.clock()\n while Index < (len(text)-1):\n if not( (text[Index] in SpecialSeparators) and (text[Index-1].isalpha()) and (text[Index+1].isalpha()) ): # leave ' \" - when in a word\n if not( (text[Index] in SpecialDigitSeparators) and (text[Index-1].isdigit()) and (text[Index+1].isdigit()) ): # leave . , when in a digit\n if text[Index] in SimpleSeparators:\n if (text[Index-1] != ' ') and (not(text[Index-1] in SimpleSeparators)):\n TokonsList.append(text[LastCheckedIndex:Index]) \n LastCheckedIndex = Index\n if text[Index+1] != ' ': \n TokonsList.append(text[LastCheckedIndex:Index+1]) \n LastCheckedIndex = Index+1\n Index = Index+1\n \n # Check the last character\n if (text[-1] in SimpleSeparators) and text[-2] != ' ':\n TokonsList.append(text[LastCheckedIndex:-1]) \n TokonsList.append(text[-1]) \n else:\n TokonsList.append(text[LastCheckedIndex:])\n \n print(\"\\t\\t Splited in \",time.clock() - StartTime,\" sec\")\n return \" \".join(TokonsList)\n\n\n\n#get the command line argument\nif len(sys.argv) < 2: sys.exit(\"Please enter a data directory path\")\ncurrentDir = sys.argv[1]\nprint(\"The data folder is: \" + currentDir)\n\n\n# Section 1:\n# get all the txt file paths from the given data directory\ntxtFilesList = [ os.path.join(currentDir, f) for f in os.listdir(currentDir) if (os.path.isfile(os.path.join(currentDir, f)) & str(f).endswith(\".txt\"))]\n\n# set the list of line ending delimiters\nlineEndingDelimiters = [\".\",\"!\",\"?\"]\n\n# seperate each text to sentences \nfor f in txtFilesList:\n print(\"Seperating \" + f + \" to sentences\")\n \n # open the input file and create an output file\n inputFileStream = codecs.open(f,\"r\",\"utf-8\")\n outputFileStream = codecs.open(f[0:(f.rfind(\".txt\"))] + \"_sentences.txt\" , \"w\", \"utf-8\")\n\n #read the text and split it by the line ending delimiters\n listOfSentences = SplitTextToSentences(inputFileStream.read(), lineEndingDelimiters)\n outputFileStream.writelines((\"%s\\r\\n\" % l for l in listOfSentences))\n outputFileStream.close()\n\n\n# section 2:\ntxtFilesList = [ os.path.join(currentDir, f) for f in os.listdir(currentDir) if (os.path.isfile(os.path.join(currentDir, f)) & str(f).endswith(\"_sentences.txt\"))]\n\nprint(\"Seperating tokens:\" ) \nfor f in txtFilesList:\n print(\"\\t Processing \" + f) \n StartTime = time.clock()\n\n # open the input file and create an output file\n inputFileStream = codecs.open(f,\"r\",\"utf-8\")\n outputFileStream = codecs.open(f[0:(f.rfind(\"_sentences.txt\"))] + \".out.txt\" , \"w\", \"utf-8\")\n\n #read the text and seperate tokens\n outputFileStream.writelines((SeparateTokens(inputFileStream.read())))\n outputFileStream.close()\n\n print(\"\\t\\t Done in \",time.clock() - StartTime,\" sec\")\n\n# section 3:\n# get all the txt file paths from the given data directory\ntxtFilesList = [ os.path.join(currentDir, f) for f in os.listdir(currentDir) if (os.path.isfile(os.path.join(currentDir, f)) & str(f).endswith(\".out.txt\"))]\n\n# class for creating a counted dictionary\nfrom collections import Counter\nglobalFreqs = Counter()\n\nfor f in txtFilesList:\n print(\"Creating a frequency list of file : \" + f)\n \n # open the input file\n inputFileStream = codecs.open(f,\"r\",\"utf-8\")\n \n # add the dictionary of words of the current text file to the global freq dictionary\n globalFreqs.update(Counter(inputFileStream.read().split()))\n\n# sort the dictionary from high freq to low\nglobalFreqs = globalFreqs.most_common()\n\n# output the freqs table to the output file\noutputFileStream = codecs.open(currentDir + \"\\\\freqlist.txt\" , \"w\", \"utf-8\")\noutputFileStream.writelines((\"%15d\\t%15s\\t%15d \\r\\n\" % (idx + 1, val[0], val[1]) for idx, val in enumerate(globalFreqs)))\noutputFileStream.close()\n\n","sub_path":"CorporaDrill/hw1.py","file_name":"hw1.py","file_ext":"py","file_size_in_byte":7486,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"107290208","text":"\nfrom __future__ import absolute_import, division, print_function\n\nimport argparse\nimport csv\nimport logging\nimport os\nimport random\nimport sys\n\nimport numpy as np\nimport math\nimport torch\nimport torch.nn as nn\nfrom torch.utils.data import (DataLoader, RandomSampler, SequentialSampler,\n TensorDataset)\nfrom torch.utils.data.distributed import DistributedSampler\nfrom tqdm import tqdm, trange\n\nfrom torch.nn import CrossEntropyLoss, MSELoss\nfrom scipy.stats import pearsonr, spearmanr\nfrom sklearn.metrics import matthews_corrcoef, f1_score\n\nfrom pytorch_pretrained_bert.file_utils import PYTORCH_PRETRAINED_BERT_CACHE, WEIGHTS_NAME, CONFIG_NAME\nfrom pytorch_pretrained_bert.modeling import BertForSequenceClassification, BertConfig\nfrom pytorch_pretrained_bert.tokenization import BertTokenizer\nfrom pytorch_pretrained_bert.optimization import BertAdam, WarmupLinearSchedule\nfrom pytorch_pretrained_bert import BertModel, BertConfig\nfrom torch.nn.init import xavier_uniform_\nfrom models.neural import MultiHeadedAttention, PositionwiseFeedForward\nimport glob\n\nlogger = logging.getLogger(__name__)\n\ndef str2bool(v):\n if v.lower() in ('yes', 'true', 't', 'y', '1'):\n return True\n elif v.lower() in ('no', 'false', 'f', 'n', '0'):\n return False\n else:\n raise argparse.ArgumentTypeError('Boolean value expected.')\n\n\n#################################################\n#常量定义\n#################################################\nmodel_flags = ['hidden_size', 'ff_size', 'heads', 'inter_layers','encoder','ff_actv', 'use_interval','rnn_size']\n\n#################################################\n#数据参数\n#################################################\nclass InputExample(object):\n \"\"\"A single training/test example for simple sequence classification.\"\"\"\n\n def __init__(self, src, labels, segs, clss,src_txt,tgt_txt):\n \"\"\"Constructs a InputExample.\n \"\"\"\n\n self.src = src #indexed_tokens\n self.labels=labels #labels = labels[:len(cls_ids)],labels是句子下标\n self.segs = segs #segments_ids #句子段标记\n self.clss = clss #CLS标记index--cls_ids = [i for i, t in enumerate(src_subtoken_idxs) if t == self.cls_vid]\n self.src_txt = src_txt\n self.tgt_txt = tgt_txt\n\nclass InputFeatures(object):\n \"\"\"A single set of features of data.\"\"\"\n\n def __init__(self, src,labels,segs ,clss,src_mask ,cls_mask):\n self.src = src\n self.labels = labels\n self.segs = segs\n self.clss = clss\n self.src_mask = src_mask\n self.mask_cls = cls_mask\n\n#################################################\n#数据加载\n#################################################\nclass DataProcessor(object):\n \"\"\"Base class for data converters for sequence classification data sets.\"\"\"\n\n def get_train_examples(self, data_dir):\n return self.load_dataset(data_dir,\"train\")\n\n def get_dev_examples(self, data_dir):\n return self.load_dataset(data_dir, \"valid\")\n\n def get_test_examples(self, data_dir):\n return self.load_dataset(data_dir, \"test\")\n\n def load_dataset(self,data_dir, corpus_type, shuffle= False):\n \"\"\"\n Dataset generator. Don't do extra stuff here, like printing,\n because they will be postponed to the first loading time.\n\n Args:\n corpus_type: 'train' or 'valid'\n Returns:\n A list of dataset, the dataset(s) are lazily loaded.\n \"\"\"\n assert corpus_type in [\"train\", \"valid\", \"test\"]\n examples = []\n\n # Sort the glob output by file name (by increasing indexes).\n pts = sorted(glob.glob(os.path.join(data_dir, f\"*{corpus_type}.[0-9]*.pt\")))\n if pts:\n if (shuffle):\n random.shuffle(pts)\n\n for pt in pts:\n dataset = torch.load(pt)\n logger.info('Loading %s dataset from %s, number of examples: %d' %\n (corpus_type, pt, len(dataset)))\n\n for ex in dataset:\n if (len(ex['src']) == 0):\n logger.warning('src is 0.')\n continue\n examples.append(\n InputExample(src= ex['src'],\n labels =ex['labels'],\n segs = ex['segs'],\n clss = ex['clss'],\n src_txt = ex['src_txt'],\n tgt_txt = ex['tgt_txt']))\n\n else:\n # Only one inputters.*Dataset, simple!\n\n pt = os.path.join(data_dir, f\".{corpus_type}.pt\")\n dataset = torch.load(pt)\n logger.info('Loading %s dataset from %s, number of examples: %d' % (corpus_type, pt, len(dataset)))\n\n for ex in dataset:\n if(len(ex['src']) == 0):\n logger.warning('src is 0.')\n continue\n examples.append(\n InputExample(src=ex['src'],\n labels=ex['labels'],\n segs=ex['segs'],\n clss=ex['clss'],\n src_txt=ex['src_txt'],\n tgt_txt=ex['tgt_txt']))\n logger.info('Loading %s dataset from %s, total of examples: %d' % (corpus_type, data_dir, len(examples)))\n return examples\n\n#######################################################################\n#转换数据格式\n#######################################################################\ndef _pad(self, data, pad_id, width=-1):\n if (width == -1):\n for d in data:\n width = max(len(d),width)\n\n rtn_data = [d + [pad_id] * (width - len(d)) for d in data]\n return rtn_data\n\n\ndef convert_examples_to_features(examples):\n \"\"\"Loads a data file into a list of `InputBatch`s.\"\"\"\n\n width_src = 0\n width_labels = 0\n width_segs = 0\n width_clss = 0\n\n for x in examples:\n width_src = max(len(x.src),width_src)\n width_labels = max(len(x.labels),width_labels)\n width_segs = max(len(x.segs),width_segs)\n width_clss = max(len(x.clss),width_clss)\n\n logger.info(f\"width_src:{width_src},width_labels:{width_labels},width_segs:{width_segs},width_clss:{width_clss}\")\n\n features = []\n for (ex_index, example) in enumerate(examples):\n if ex_index % 10000 == 0:\n logger.info(\"Writing example %d of %d\" % (ex_index, len(examples)))\n\n pre_src = example.src\n pre_labels = example.labels\n pre_segs = example.segs\n pre_clss = example.clss\n\n # Zero-pad up to the sequence length.\n src_padding = [0] * (width_src - len(pre_src))\n src = pre_src+src_padding\n src = torch.tensor(src)\n\n src_mask = [1] * len(pre_src)\n src_mask += src_padding\n\n labels_padding = [0] * (width_labels - len(pre_labels))\n labels = pre_labels +labels_padding\n labels = torch.tensor(labels)\n\n segs_padding = [0] * (width_segs - len(pre_segs))\n segs = pre_segs + segs_padding\n segs = torch.tensor(segs)\n\n clss_padding = [0] * (width_clss - len(pre_clss))\n clss = pre_clss + clss_padding\n clss = torch.tensor(clss)\n\n cls_mask = [1] * len(pre_clss)\n cls_mask +=clss_padding\n\n\n if ex_index < 5:\n logger.info(\"*** Example ***\")\n logger.info(\"src: %s\" % \" \".join([str(x.item()) for x in src]))\n logger.info(\"src_mask: %s\" % \" \".join([str(x) for x in src_mask]))\n logger.info(\"labels: %s\" % \" \".join([str(x.item()) for x in labels]))\n logger.info(\"segs: %s\" % \" \".join([str(x.item()) for x in segs]))\n logger.info(\"clss: %s\" % \" \".join([str(x.item()) for x in clss]))\n logger.info(\"cls_mask: %s\" % \" \".join([str(x) for x in cls_mask]))\n\n features.append(\n InputFeatures(\n src \t\t=src,\n labels\t =labels,\n segs \t\t=segs,\n clss\t\t=clss,\n src_mask =src_mask,\n cls_mask =cls_mask\n ))\n return features\n\n\n#################################################\n#模型\n#################################################\n\nclass PositionalEncoding(nn.Module):\n\n def __init__(self, dropout, dim, max_len=5000):\n pe = torch.zeros(max_len, dim)\n position = torch.arange(0, max_len).unsqueeze(1)\n div_term = torch.exp((torch.arange(0, dim, 2, dtype=torch.float) *\n -(math.log(10000.0) / dim)))\n pe[:, 0::2] = torch.sin(position.float() * div_term)\n pe[:, 1::2] = torch.cos(position.float() * div_term)\n pe = pe.unsqueeze(0)\n super(PositionalEncoding, self).__init__()\n self.register_buffer('pe', pe)\n self.dropout = nn.Dropout(p=dropout)\n self.dim = dim\n\n def forward(self, emb, step=None):\n emb = emb * math.sqrt(self.dim)\n if (step):\n emb = emb + self.pe[:, step][:, None, :]\n\n else:\n emb = emb + self.pe[:, :emb.size(1)]\n emb = self.dropout(emb)\n return emb\n\n def get_emb(self, emb):\n return self.pe[:, :emb.size(1)]\n\nclass TransformerEncoderLayer(nn.Module):\n def __init__(self, d_model, heads, d_ff, dropout):\n super(TransformerEncoderLayer, self).__init__()\n\n self.self_attn = MultiHeadedAttention(\n heads, d_model, dropout=dropout)\n self.feed_forward = PositionwiseFeedForward(d_model, d_ff, dropout)\n self.layer_norm = nn.LayerNorm(d_model, eps=1e-6)\n self.dropout = nn.Dropout(dropout)\n\n def forward(self, iter, query, inputs, mask):\n if (iter != 0):\n input_norm = self.layer_norm(inputs)\n else:\n input_norm = inputs\n\n mask = mask.unsqueeze(1)\n context = self.self_attn(input_norm, input_norm, input_norm,\n mask=mask)\n out = self.dropout(context) + inputs\n return self.feed_forward(out)\n\n\nclass TransformerInterEncoder(nn.Module):\n def __init__(self, d_model, d_ff, heads, dropout, num_inter_layers=0):\n super(TransformerInterEncoder, self).__init__()\n self.d_model = d_model\n self.num_inter_layers = num_inter_layers\n self.pos_emb = PositionalEncoding(dropout, d_model)\n self.transformer_inter = nn.ModuleList(\n [TransformerEncoderLayer(d_model, heads, d_ff, dropout)\n for _ in range(num_inter_layers)])\n self.dropout = nn.Dropout(dropout)\n self.layer_norm = nn.LayerNorm(d_model, eps=1e-6)\n self.wo = nn.Linear(d_model, 1, bias=True)\n self.sigmoid = nn.Sigmoid()\n\n def forward(self, top_vecs, mask):\n \"\"\" See :obj:`EncoderBase.forward()`\"\"\"\n\n batch_size, n_sents = top_vecs.size(0), top_vecs.size(1)\n pos_emb = self.pos_emb.pe[:, :n_sents]\n x = top_vecs * mask[:, :, None].float()\n x = x + pos_emb\n\n for i in range(self.num_inter_layers):\n x = self.transformer_inter[i](i, x, x, 1 - mask) # all_sents * max_tokens * dim\n\n x = self.layer_norm(x)\n sent_scores = self.sigmoid(self.wo(x))\n sent_scores = sent_scores.squeeze(-1) * mask.float()\n\n return sent_scores\n\n\nclass Bert(nn.Module):\n def __init__(self, temp_dir, load_pretrained_bert, bert_config,bert_model):\n super(Bert, self).__init__()\n if(load_pretrained_bert):\n self.model = BertModel.from_pretrained(bert_model, cache_dir=temp_dir)\n else:\n self.model = BertModel(bert_config)\n\n def forward(self, x, segs, mask):\n encoded_layers, _ = self.model(x, segs, attention_mask =mask)\n top_vec = encoded_layers[-1]\n return top_vec\n\n\n\nclass Summarizer(nn.Module):\n def __init__(self, args, device, load_pretrained_bert = False, bert_config = None, bert_model = \"bert-base-uncased\"):\n print(\"Summarizer\")\n super(Summarizer, self).__init__()\n self.args = args\n self.device = device\n self.bert = Bert(args.cache_dir, load_pretrained_bert, bert_config, bert_model)\n\n self.encoder = TransformerInterEncoder(self.bert.model.config.hidden_size, args.ff_size, args.heads,\n args.dropout, args.inter_layers)\n\n if args.param_init != 0.0:\n for p in self.encoder.parameters():\n p.data.uniform_(-args.param_init, args.param_init)\n if args.param_init_glorot:\n for p in self.encoder.parameters():\n if p.dim() > 1:\n xavier_uniform_(p)\n\n self.to(device)\n def load_cp(self, pt):\n self.load_state_dict(pt['model'], strict=True)\n\n def forward(self, x, segs, clss, mask, mask_cls, sentence_range=None):\n\n top_vec = self.bert(x, segs, mask)\n sents_vec = top_vec[torch.arange(top_vec.size(0)).unsqueeze(1), clss]\n sents_vec = sents_vec * mask_cls[:, :, None].float()\n sent_scores = self.encoder(sents_vec, mask_cls).squeeze(-1)\n return sent_scores, mask_cls\n\ndef main():\n parser = argparse.ArgumentParser()\n\n ## Required parameters\n parser.add_argument(\"--data_dir\",\n default=None,\n type=str,\n required=True,\n help=\"The input data dir. Should contain the .tsv files (or other data files) for the task.\")\n parser.add_argument(\"--bert_model\", default=None, type=str, required=True,\n help=\"Bert pre-trained model selected in the list: bert-base-uncased, \"\n \"bert-large-uncased, bert-base-cased, bert-large-cased, bert-base-multilingual-uncased, \"\n \"bert-base-multilingual-cased, bert-base-chinese.\")\n parser.add_argument(\"--output_dir\",\n default=None,\n type=str,\n required=True,\n help=\"The output directory where the model predictions and checkpoints will be written.\")\n\n ## Other parameters\n parser.add_argument(\"--cache_dir\",\n default=\"\",\n type=str,\n help=\"Where do you want to store the pre-trained models downloaded from s3\")\n parser.add_argument(\"--max_seq_length\",\n default=128,\n type=int,\n help=\"The maximum total input sequence length after WordPiece tokenization. \\n\"\n \"Sequences longer than this will be truncated, and sequences shorter \\n\"\n \"than this will be padded.\")\n parser.add_argument(\"--do_train\",\n action='store_true',\n help=\"Whether to run training.\")\n parser.add_argument(\"--do_eval\",\n action='store_true',\n help=\"Whether to run eval on the dev set.\")\n parser.add_argument(\"--do_lower_case\",\n action='store_true',\n help=\"Set this flag if you are using an uncased model.\")\n parser.add_argument(\"--train_batch_size\",\n default=32,\n type=int,\n help=\"Total batch size for training.\")\n parser.add_argument(\"--eval_batch_size\",\n default=8,\n type=int,\n help=\"Total batch size for eval.\")\n parser.add_argument(\"--learning_rate\",\n default=5e-5,\n type=float,\n help=\"The initial learning rate for Adam.\")\n parser.add_argument(\"--num_train_epochs\",\n default=3.0,\n type=float,\n help=\"Total number of training epochs to perform.\")\n parser.add_argument(\"--warmup_proportion\",\n default=0.1,\n type=float,\n help=\"Proportion of training to perform linear learning rate warmup for. \"\n \"E.g., 0.1 = 10%% of training.\")\n parser.add_argument(\"--no_cuda\",\n action='store_true',\n help=\"Whether not to use CUDA when available\")\n parser.add_argument(\"--local_rank\",\n type=int,\n default=-1,\n help=\"local_rank for distributed training on gpus\")\n parser.add_argument('--seed',\n type=int,\n default=42,\n help=\"random seed for initialization\")\n parser.add_argument('--gradient_accumulation_steps',\n type=int,\n default=1,\n help=\"Number of updates steps to accumulate before performing a backward/update pass.\")\n parser.add_argument('--fp16',\n action='store_true',\n help=\"Whether to use 16-bit float precision instead of 32-bit\")\n parser.add_argument('--loss_scale',\n type=float, default=0,\n help=\"Loss scaling to improve fp16 numeric stability. Only used when fp16 set to True.\\n\"\n \"0 (default value): dynamic loss scaling.\\n\"\n \"Positive power of 2: static loss scaling value.\\n\")\n parser.add_argument('--server_ip', type=str, default='', help=\"Can be used for distant debugging.\")\n parser.add_argument('--server_port', type=str, default='', help=\"Can be used for distant debugging.\")\n\n parser.add_argument(\"--bert_config_path\", default='',type=str)\n parser.add_argument(\"--train_from\",default=\"\",type=str,help=\"Loading checkpoint from\")\n parser.add_argument(\"-hidden_size\", default=128, type=int)\n parser.add_argument(\"-ff_size\", default=512, type=int)\n parser.add_argument(\"-heads\", default=4, type=int)\n parser.add_argument(\"-inter_layers\", default=2, type=int)\n parser.add_argument(\"-param_init\", default=0, type=float)\n parser.add_argument(\"-param_init_glorot\", type=str2bool, nargs='?',const=True,default=True)\n parser.add_argument(\"-dropout\", default=0.1, type=float)\n\n args = parser.parse_args()\n\n if args.server_ip and args.server_port:\n # Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script\n import ptvsd\n print(\"Waiting for debugger attach\")\n ptvsd.enable_attach(address=(args.server_ip, args.server_port), redirect_output=True)\n ptvsd.wait_for_attach()\n\n if args.local_rank == -1 or args.no_cuda:\n device = torch.device(\"cuda\" if torch.cuda.is_available() and not args.no_cuda else \"cpu\")\n n_gpu = torch.cuda.device_count()\n else:\n torch.cuda.set_device(args.local_rank)\n device = torch.device(\"cuda\", args.local_rank)\n n_gpu = 1\n # Initializes the distributed backend which will take care of sychronizing nodes/GPUs\n torch.distributed.init_process_group(backend='nccl')\n\n logging.basicConfig(format = '%(asctime)s - %(levelname)s - %(name)s - %(message)s',\n datefmt = '%m/%d/%Y %H:%M:%S',\n level = logging.INFO if args.local_rank in [-1, 0] else logging.WARN)\n\n logger.info(\"device: {} n_gpu: {}, distributed training: {}, 16-bits training: {}\".format(\n device, n_gpu, bool(args.local_rank != -1), args.fp16))\n\n if args.gradient_accumulation_steps < 1:\n raise ValueError(\"Invalid gradient_accumulation_steps parameter: {}, should be >= 1\".format(\n args.gradient_accumulation_steps))\n\n args.train_batch_size = args.train_batch_size // args.gradient_accumulation_steps\n\n random.seed(args.seed)\n np.random.seed(args.seed)\n torch.manual_seed(args.seed)\n if n_gpu > 0:\n torch.cuda.manual_seed_all(args.seed)\n\n if not args.do_train and not args.do_eval:\n raise ValueError(\"At least one of `do_train` or `do_eval` must be True.\")\n\n if os.path.exists(args.output_dir) and os.listdir(args.output_dir) and args.do_train:\n raise ValueError(\"Output directory ({}) already exists and is not empty.\".format(args.output_dir))\n if not os.path.exists(args.output_dir):\n os.makedirs(args.output_dir)\n\n #tokenizer = BertTokenizer.from_pretrained(args.bert_model, do_lower_case=args.do_lower_case)\n\n # Prepare model\n cache_dir = args.cache_dir if args.cache_dir else os.path.join(str(PYTORCH_PRETRAINED_BERT_CACHE), 'distributed_{}'.format(args.local_rank))\n\n model = Summarizer(args, device, load_pretrained_bert=True,bert_config=args.bert_config_path,bert_model=args.bert_model)\n\n model.to(device)\n\n if n_gpu > 1:\n model = torch.nn.DataParallel(model)\n\n #准备训练\n if args.do_train:\n #加载数据\n train_examples = DataProcessor().get_train_examples(args.data_dir)\n train_features = convert_examples_to_features(train_examples)\n\n all_src = torch.tensor([f.src for f in train_features], dtype=torch.long)\n all_labels = torch.tensor([f.labels for f in train_features], dtype=torch.long)\n all_segs = torch.tensor([f.segs for f in train_features], dtype=torch.long)\n all_clss = torch.tensor([f.clss for f in train_features], dtype=torch.long)\n all_src_mask = torch.tensor([f.src_mask for f in train_features], dtype=torch.long)\n all_mask_cls = torch.tensor([f.mask_cls for f in train_features], dtype=torch.long)\n\n train_data = TensorDataset(all_src, all_labels, all_segs, all_clss,all_src_mask,all_mask_cls)\n\n train_sampler = DistributedSampler(train_data)\n\n train_dataloader = DataLoader(train_data, sampler=train_sampler, batch_size=args.train_batch_size)\n\n num_train_optimization_steps = len(train_dataloader) // args.gradient_accumulation_steps * args.num_train_epochs\n\n # Prepare optimizer\n\n param_optimizer = list(model.named_parameters())\n no_decay = ['bias', 'LayerNorm.bias', 'LayerNorm.weight']\n optimizer_grouped_parameters = [\n {'params': [p for n, p in param_optimizer if not any(nd in n for nd in no_decay)], 'weight_decay': 0.01},\n {'params': [p for n, p in param_optimizer if any(nd in n for nd in no_decay)], 'weight_decay': 0.0}\n ]\n\n optimizer = BertAdam(optimizer_grouped_parameters,\n lr=args.learning_rate,\n warmup=args.warmup_proportion,\n t_total=num_train_optimization_steps)\n\n logger.info(\"***** Running training *****\")\n logger.info(\" Num examples = %d\", len(train_examples))\n logger.info(\" Batch size = %d\", args.train_batch_size)\n logger.info(\" Num steps = %d\", num_train_optimization_steps)\n\n model.train()\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"src/my_bertsum.py","file_name":"my_bertsum.py","file_ext":"py","file_size_in_byte":23308,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"496943938","text":"import pygame\r\nimport glm\r\nfrom glm import vec2, vec3, vec4, mat4\r\n\r\nimport math\r\n\r\nfrom constants import *\r\n\r\nfrom typing import TYPE_CHECKING\r\nif TYPE_CHECKING:\r\n from main import App\r\n\r\nclass SpriteEntity(pygame.sprite.Sprite):\r\n \"\"\"\r\n Base class for objects that will be drawn to the screen and derive from pygame.sprite.Sprite\r\n \"\"\"\r\n def __init__(self):\r\n super().__init__()\r\n\r\n self.world_offset = vec3(0)\r\n\r\n def update(self, dt):\r\n self.rect.x += int(self.world_offset.x)\r\n self.rect.y += int(self.world_offset.y)\r\n\r\nclass CelestialObject(SpriteEntity):\r\n def __init__(self, center, **kwargs):\r\n super().__init__() \r\n self.__id = '0'\r\n\r\n self.neighbours = pygame.sprite.Group()\r\n\r\n radius = kwargs.pop(\"radius\", 0)\r\n self.density = kwargs.pop(\"density\", PLANET_DEFAULT_DENSITY)\r\n \r\n self.__radius = self.__correct_radius(radius)\r\n self.mass = self.density*(4/3*math.pi*(self.__radius**3)) \r\n \r\n self.F = vec3(0)\r\n self.acc = vec3(0)\r\n self.vel = vec3(0)\r\n self.pos = vec3(center[0], center[1], 0)\r\n\r\n size = 2*self.__radius+2\r\n surf = pygame.Surface([size]*2, pygame.SRCALPHA)\r\n pygame.draw.circle(surf, PLANET_COLOR, [self.__radius]*2, self.__radius)\r\n self.image = surf.convert_alpha()\r\n self.rect = self.image.get_rect(center=center)\r\n\r\n # Store the original image copy to prevent scale transform artifacts\r\n self.__zero_image = self.image.convert_alpha()\r\n\r\n self.force_just_calcd = False \r\n\r\n ###\r\n ### Properties\r\n ###\r\n\r\n @property\r\n def id(self):\r\n return self.__id\r\n\r\n @id.setter\r\n def id(self, id):\r\n self.__id = id\r\n\r\n @property\r\n def radius(self):\r\n \"\"\"\r\n Getter for radius\r\n \"\"\"\r\n return self.__radius\r\n\r\n @radius.setter\r\n def radius(self, r):\r\n \"\"\"\r\n Setter for radius\r\n - Resets image, rect, mass and radius\r\n \"\"\"\r\n r = self.__correct_radius(r)\r\n size = 2*r+2 \r\n surf = pygame.Surface([size]*2, pygame.SRCALPHA)\r\n pygame.draw.circle(surf, PLANET_COLOR, [r]*2, r)\r\n self.image = surf.convert_alpha()\r\n self.rect = self.image.get_rect(center=self.rect.center) \r\n self.mass = self.density*r**3\r\n self.__radius = r\r\n\r\n # Store original image copy to prevent scale transform artifacts\r\n self.__zero_image = self.image.convert_alpha()\r\n\r\n @property\r\n def acceleration(self):\r\n return self.acc\r\n\r\n @property\r\n def velocity(self):\r\n \"\"\"\r\n Getter for velocity\r\n \"\"\"\r\n return self.vel\r\n\r\n @velocity.setter\r\n def velocity(self, vel : vec3, use_actual_value = False):\r\n \"\"\"\r\n Setter for velocity\r\n - Takes the length of an arrow and sets the velocity proportional to it.\r\n \"\"\"\r\n if use_actual_value:\r\n self.vel.x = vel.x\r\n self.vel.y = vel.y\r\n self.vel.z = 0\r\n else:\r\n self.vel.x = vel.x * ARROW_TO_VEL_RATIO\r\n self.vel.y = vel.y * ARROW_TO_VEL_RATIO\r\n self.vel.z = 0\r\n\r\n @property\r\n def position(self):\r\n return self.pos\r\n\r\n @position.setter\r\n def position(self, pos):\r\n if isinstance(pos, tuple):\r\n self.pos = vec3(pos[0], pos[1], 0)\r\n self.rect.center = pos\r\n elif isinstance(pos, vec3):\r\n self.pos = pos\r\n self.rect.center = (pos.x, pos.y)\r\n\r\n ###\r\n ### Public functions\r\n ###\r\n\r\n def update(self, dt):\r\n \"\"\"\r\n Update function\r\n \"\"\"\r\n super().update(dt) # This is a waste right now but will leave\r\n \r\n # Euler function\r\n self.__integration_euler()\r\n\r\n # Update rect position from actual position\r\n self.rect.center = (int(self.pos.x), int(self.pos.y))\r\n\r\n # Update image + rect (but not radius?) for zoom\r\n zoom_factor = 1\r\n if self.world_offset.z == 0:\r\n self.image = self.__zero_image.convert_alpha()\r\n self.rect = self.image.get_rect(center = (self.rect.center))\r\n else:\r\n # Calc new drawing diameter\r\n zoom_factor = (self.world_offset.z+100)/100\r\n draw_diam = int(self.__radius*2*zoom_factor)\r\n \r\n # Check if we need to scale\r\n if draw_diam != self.rect.width:\r\n # Check if we even need to show the body anymore\r\n if draw_diam > 0:\r\n new_size = [draw_diam]*2\r\n print(f\"Zoom factor: {zoom_factor} Drawing to new size {new_size} from {self.rect.width},{self.rect.height}\")\r\n self.image = pygame.transform.scale(self.__zero_image, new_size)\r\n else:\r\n surf = pygame.Surface((1,1)).convert_alpha()\r\n surf.fill(PLANET_COLOR)\r\n self.image = surf\r\n\r\n self.rect = self.image.get_rect(center = (self.rect.center))\r\n\r\n # Update rect position based on world offset\r\n wx = zoom_factor*(self.rect.center[0] + int(self.world_offset.x))\r\n wy = zoom_factor*(self.rect.center[1] + int(self.world_offset.y))\r\n center_in_world = (wx, wy)\r\n self.rect.center = center_in_world\r\n\r\n\r\n ###\r\n ### Private functions \r\n ###\r\n\r\n def __correct_radius(self, radius):\r\n \"\"\"\r\n Private function to limit radius min and max\r\n (used by __init__ and in radius @setter)\r\n \"\"\"\r\n if radius > PLANET_MAX_RADIUS:\r\n return PLANET_MAX_RADIUS\r\n elif radius < PLANET_MIN_RADIUS:\r\n return PLANET_MIN_RADIUS\r\n\r\n return radius\r\n \r\n def __integration_euler(self):\r\n '''\r\n Calculates the new position and velocity using Euler integration method.\r\n \r\n The force is also added to the other object so it doesn't have to be \r\n calculated twice.\r\n '''\r\n for obj in self.neighbours:\r\n if obj != self:\r\n f = self.__get_force(obj)\r\n self.F.x += f.x\r\n self.F.y += f.y\r\n if not self.force_just_calcd: \r\n obj.F.x -= f.x\r\n obj.F.y -= f.y\r\n obj.force_just_calcd = True\r\n self.force_just_calcd = True\r\n \r\n self.acc.x = self.F.x / self.mass\r\n self.acc.y = self.F.y / self.mass\r\n \r\n self.pos.x += self.vel.x * DELTA_T + 0.5 * self.acc.x * DELTA_T\r\n self.pos.y += self.vel.y * DELTA_T + 0.5 * self.acc.y * DELTA_T\r\n \r\n self.vel.x += self.acc.x * DELTA_T\r\n self.vel.y += self.acc.y * DELTA_T\r\n \r\n self.F = vec3(0) #resets force for the next iteration\r\n \r\n def __get_force(self, obj) -> vec3:\r\n '''\r\n Return the force between self and obj.\r\n '''\r\n vect = vec3(obj.pos.x - self.pos.x, obj.pos.y - self.pos.y, 0)\r\n dist = glm.distance(self.pos, obj.pos)\r\n factor = self.mass * obj.mass / dist**3 #Power of 3 because the directional vector is not normalized\r\n return vec3(vect.x*factor, vect.y*factor, 0)\r\n \r\nclass TransientDrawEntity():\r\n \"\"\"\r\n Base class for Objects that will be drawn to screen but do not derive from pygame.sprite.Sprite\r\n \"\"\"\r\n def __init__(self):\r\n self.dead = False\r\n self.world_offset = vec3(0)\r\n\r\n def update(self, dt):\r\n pass\r\n\r\n def draw(self, surface : pygame.Surface):\r\n pass\r\n\r\nclass VelocityArrow(TransientDrawEntity):\r\n\r\n def __init__(self, start, parent : CelestialObject = None, **kwargs):\r\n super().__init__()\r\n\r\n self.parent : CelestialObject = parent\r\n \r\n if isinstance(start, tuple):\r\n self.start = vec3(start[0], start[1], 0)\r\n elif isinstance(start, vec3):\r\n self.start = start\r\n\r\n self.end = vec3(self.start.x+1, self.start.y+1, 0)\r\n\r\n self.color = kwargs.pop(\"color\", ARROW_COLOR_VEL)\r\n self.thickness = kwargs.pop(\"thickness\", ARROW_HALF_THICKNESS)\r\n self.cap_angle = kwargs.pop(\"cap_angle\", ARROW_CAP_ANGLE)\r\n self.cap_length = kwargs.pop(\"cap_length\", ARROW_CAP_LENGTH)\r\n self.indcator_type = kwargs.pop(\"indicator_type\", None)\r\n\r\n self.component = vec3(0)\r\n self.length = 0.0\r\n self.angle = 0.0\r\n\r\n @property\r\n def arrow_end(self):\r\n return self.end\r\n\r\n @arrow_end.setter\r\n def arrow_end(self, e):\r\n # print(f\"Updating arrow endpoint {e}\")\r\n self.end = vec3(e[0], e[1], 0)\r\n self.__limit_length()\r\n\r\n def __limit_length(self):\r\n length = glm.distance(self.start, self.end)\r\n if length > ARROW_MAX_LENGTH:\r\n self.end = vec3((self.end.x - self.start.x) / length * ARROW_MAX_LENGTH + self.start.x, \r\n (self.end.y - self.start.y) / length * ARROW_MAX_LENGTH + self.start.y,\r\n 0)\r\n self.length = glm.distance(self.start, self.end)\r\n\r\n @property\r\n def velocity_component(self) -> vec3:\r\n self.angle = self.__calc_angle()\r\n v = vec3(self.length * math.cos(self.angle), self.length * math.sin(self.angle), 0) \r\n return v\r\n\r\n def __calc_angle(self):\r\n angle = math.atan2(self.start.y - self.end.y, self.end.x - self.start.x)\r\n print(angle)\r\n return angle\r\n\r\n def __recalculate_for_celestial(self):\r\n if self.parent:\r\n if isinstance(self.parent, CelestialObject):\r\n origin = self.parent.position\r\n self.start = vec3(origin.x, origin.y, 0)\r\n\r\n endx = 0\r\n endy = 0\r\n if self.indcator_type == TYPE_VEL:\r\n endx = origin.x+self.parent.velocity.x/ARROW_TO_VEL_RATIO\r\n endy = origin.y+self.parent.velocity.y/ARROW_TO_VEL_RATIO\r\n elif self.indcator_type == TYPE_ACCEL:\r\n endx = origin.x+self.parent.acceleration.x/ARROW_TO_ACC_RATIO\r\n endy = origin.y+self.parent.acceleration.y/ARROW_TO_ACC_RATIO\r\n\r\n self.end = vec3(endx, endy, 0)\r\n\r\n def update(self, dt):\r\n super().update(dt)\r\n \r\n # self.__update_angle() # TODO: This recalculation might not be necessary here\r\n \r\n self.__recalculate_for_celestial()\r\n\r\n self.start += self.world_offset\r\n self.end += self.world_offset\r\n\r\n def draw(self, surface : pygame.Surface):\r\n super().draw(surface)\r\n # Draw the arrow line\r\n pygame.draw.line(surface, self.color, (self.start.x, self.start.y), (self.end.x, self.end.y), self.thickness) \r\n\r\n # Draw the arrow head\r\n arrow_points = self.__generate_arrowhead_method1(3)\r\n pygame.draw.polygon(surface, self.color, arrow_points, 0)\r\n\r\n def __arrow_head(self):\r\n arrow_head = [\r\n vec2(0, 2), vec2(-1, -2), vec2(1, -2)\r\n ]\r\n return arrow_head\r\n\r\n def __generate_arrowhead_method1(self, scale) -> []:\r\n arrow_points = []\r\n z = vec3(0, 0, 1)\r\n rads = glm.radians(270) - self.__calc_angle()\r\n for p in self.__arrow_head():\r\n p = vec4(p.x, p.y, 0, 0)\r\n p = p*scale\r\n M = glm.rotate(mat4(1), rads, z)\r\n p = M*p\r\n p = vec2(p.x, p.y)\r\n p = p+vec2(self.end.x, self.end.y)\r\n arrow_points.append(p)\r\n\r\n return arrow_points\r\n\r\n def __generate_arrowhead_method2(self, scale) -> []:\r\n arrow_points = []\r\n rads = self.__calc_angle() + glm.radians(90)\r\n degs = glm.degrees(rads)\r\n for p in self.__arrow_head():\r\n p = p*scale\r\n p = p.rotate(-degs)\r\n p = p+pygame.Vector2(self.end.x, self.end.y)\r\n arrow_points.append(p)\r\n\r\n return arrow_points\r\n\r\nclass TextObject(TransientDrawEntity):\r\n def __init__(self, text, font : pygame.font, color):\r\n super().__init__()\r\n self.text = text\r\n self.font = font\r\n self.color = color\r\n\r\n def draw(self, surface : pygame.Surface):\r\n super().draw(surface)\r\n pycol = pygame.Color(self.color[0], self.color[1], self.color[2])\r\n rtxt = self.font.render(self.text, False, pycol)\r\n rsiz = self.font.size(self.text)\r\n surface.blit(rtxt, (5, 5))\r\n\r\n","sub_path":"objects.py","file_name":"objects.py","file_ext":"py","file_size_in_byte":12526,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"383024178","text":"import user\n\n# or from user import User\nfrom post import Post\napp_user_one = user.User(\"rr@gg.com\", \"Riks R\", \"ppp1\", \"student\")\napp_user_one.get_user_info()\napp_user_one.change_status(\"in job market\")\napp_user_one.get_user_info()\n\n\napp_user_two = user.User(\"z43@gg.com\", \"Bobby L\", \"zz1\", \"student\")\napp_user_two.get_user_info()\n\nnew_post = Post(\"Going for it\", app_user_two.name)\nnew_post.get_post_info()","sub_path":"Classes_and_Objects.py","file_name":"Classes_and_Objects.py","file_ext":"py","file_size_in_byte":406,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"96033003","text":"# -*- coding: utf-8 -*-\n#\nfrom .. import built_in\n\n\nclass VolumeBase(built_in.volume_base.VolumeBase):\n \"\"\"\n Increments the Volume ID every time a new volume object\n is created. Inherits from built_in VolumeBase.\n \"\"\"\n\n _ID = 0\n dimension = 3\n\n def __init__(self, is_list=False, id0=None):\n super(VolumeBase, self).__init__(id0=id0)\n\n self.is_list = is_list\n if is_list:\n self.id += \"[]\"\n return\n\n def char_length_code(self, char_length):\n if char_length is None:\n return []\n\n return [\n \"pts_{}[] = PointsOf{{Volume{{{}}};}};\".format(self.id, self.id),\n \"Characteristic Length{{pts_{}[]}} = {};\".format(self.id, char_length),\n ]\n","sub_path":"pygmsh/opencascade/volume_base.py","file_name":"volume_base.py","file_ext":"py","file_size_in_byte":746,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"348255403","text":"\"\"\" ---------------------------------------------------------------------------\nWritten in Python 3.5\n\n C = breaks a program stuck in an infinite loop\n--------------------------------------------------------------------------- \"\"\"\nimport tkinter\n\n\nclass Window(tkinter.Frame):\n\n def __init__(self, master=None):\n tkinter.Frame.__init__(self, master)\n self.master = master\n self.init_window()\n\n def init_window(self):\n self.master.title(\"GUI\")\n self.pack(fill=tkinter.BOTH, expand=1)\n our_menu = tkinter.Menu(self.master)\n self.master.config(menu=our_menu)\n file = tkinter.Menu(our_menu)\n file.add_command(label='Exit', command=self.client_exit)\n our_menu.add_cascade(label='File', menu=file)\n new_menu_item = tkinter.Menu(our_menu)\n new_menu_item.add_command(label='New Dropdown Item')\n our_menu.add_cascade(label='New Menu Item', menu=new_menu_item)\n\n def client_exit(self):\n root.destroy()\n\nroot = tkinter.Tk()\nroot.geometry(\"400x300\")\napp = Window(root)\n\nroot.mainloop()\n\n\"\"\" ---------------------------------------------------------------------------\nMulti-line comments\n--------------------------------------------------------------------------- \"\"\"\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n","sub_path":"Python/RRTemplateDevelopment.py","file_name":"RRTemplateDevelopment.py","file_ext":"py","file_size_in_byte":1305,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"419704689","text":"#!/usr/bin/env python\nfrom kafka import KafkaProducer\nfrom flask import Flask\napp = Flask(__name__)\nevent_logger = KafkaProducer(bootstrap_servers='kafka:29092')\nevents_topic = 'events'\n\n@app.route(\"/\")\ndef default_response():\n event_logger.send(events_topic, 'default'.encode())\n return \"\\nThis is the default response!\\n\"\n\n@app.route(\"/purchase_a_sword\")\ndef purchase_sword():\n # business logic to purchase sword\n # log event to kafka\n event_logger.send(events_topic, 'purchased_sword'.encode())\n return \"\\nSword Purchased!\\n\"\n\n@app.route(\"/purchase_a_knife\")\ndef purchase_knife():\n # business logic to purchase knife\n # log event to kafka\n event_logger.send(events_topic, 'purchased_knife'.encode())\n return \"\\nKnife Purchased!\\n\"\n\n@app.route(\"/join_a_guild\")\ndef join_guild():\n # business logic to join guild\n # log event to kafka\n event_logger.send(events_topic, 'guild_joined'.encode())\n return \"\\nGuild Joined!\\n\"\n\n\n\n\n\n\n","sub_path":"assignment-09/game_api.py","file_name":"game_api.py","file_ext":"py","file_size_in_byte":969,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"98192184","text":"\"\"\"server URL Configuration\n\nThe `urlpatterns` list routes URLs to views. For more information please see:\n https://docs.djangoproject.com/en/3.1/topics/http/urls/\nExamples:\nFunction views\n 1. Add an import: from my_app import views\n 2. Add a URL to urlpatterns: path('', views.home, name='home')\nClass-based views\n 1. Add an import: from other_app.views import Home\n 2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')\nIncluding another URLconf\n 1. Import the include() function: from django.urls import include, path\n 2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))\n\"\"\"\n# from django.contrib import admin\nfrom django.urls import path\nfrom apps.endpoints.views import decode, encode, homePage, hide,retrieve, statusMeter, statusId\n\nurlpatterns = [\n path(\"decode/\", decode, name=\"decode\"),\n path(\"encode/\", encode, name=\"encode\"),\n path(\"status/\", statusId, name=\"statusId\"),\n path(\"status/\", statusMeter, name=\"statusMeter\"),\n path(\"\", homePage, name=\"homePage\"),\n path(\"hide\", hide, name=\"hide\"),\n path(\"retrieve\", retrieve, name=\"retrieve\")\n]\n","sub_path":"backend/server/server/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1149,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"280280079","text":"\"\"\" This is a script to take each MIMII dataset\nfile from specified folder, make a spectrogram of it, and save it as a png\nin a destination folder.\n\nnote: The chunk error simply means unsupported metadata at the end of the \nwav files(possibly not present after librosa update?)\"\"\"\n\n# imports\nfrom scipy.io import wavfile\nimport scipy.io as sio\n\nfrom scipy import signal\nfrom scipy.fft import fftshift\nfrom PIL import Image\n# ^ old ----------------------------\n\nimport librosa\nimport librosa.display\nimport matplotlib.pyplot as plt\n\nimport sys\nimport os\nimport re\n\n\ndef main():\n if len(sys.argv) < 2: # test cli args\n print(\"use: python script.py \")\n return 0\n\n # extract the cli args\n wav_folder = sys.argv[1]\n dest_folder = sys.argv[2] \n \n # get file count\n _, _, files = next(os.walk(wav_folder))\n file_count = len(files)\n\n # check or make the destination folder\n if not os.path.exists(dest_folder):\n os.mkdir(dest_folder)\n print(\"Directory\", dest_folder, \"was created\")\n else:\n print(\"Directory\", dest_folder, \"already exists\")\n\n # get the pump id\n regex_pattern = r'id_\\d{2}'\n pump_id = re.search(regex_pattern, wav_folder).group(0)\n \n counter = 0\n # go through the folder and apply transform/save func\n for wav_file in files:\n image_fname = dest_folder + \"/\" + 'spectro_' + wav_file[:-4] + pump_id\n make_save_spectro(wav_folder + '/' + wav_file, image_fname)\n counter += 1\n print(f\"finished {counter}, file_count is {file_count}.\", end='\\r', flush=True)\n\n print(\"Finished spectrogram conversion.\")\n\ndef make_save_spectro(wav_fname, image_fname):\n \"\"\" inputs:\n wav_fname: string, wav filename\n image_fname: string, png filename\n spectro_func(TODO): string, options: normal, log, mfcc \n\n Makes a spectrogram from an incoming wav file, and \n saves it as a png named image_fname\n \"\"\"\n # extract info\n y, sr = librosa.load(wav_fname)\n\n # create spectro figure (currently mfcc)\n fig = plt.figure(figsize=(4,4))\n plt.axis('off')\n spectro_data = librosa.feature.mfcc(y=y, sr=sr)\n librosa.display.specshow(data=spectro_data, sr=sr, x_axis=\"time\",\n cmap=\"plasma\")\n\n # save\n fig.savefig(image_fname)\n plt.close()\n\ndef make_save_spectro_old(wav_fname, image_fname): \n \"\"\" \n DEPRECATED\n inputs:\n wav_fname: string, wav filename\n image_fname: string, png filename\n Makes a spectrogram from an incoming wav file, and \n saves it as a png named image_fname\n \"\"\"\n # extract info\n samplerate, data = sio.wavfile.read(wav_fname) # problem here?\n length = data.shape[0] / samplerate # sample/sample_rate = time\n\n # make spectrogram, needs to be square\n freqs, time_segs, spectro_array = signal.spectrogram(data[:, 0], samplerate)\n # save plot no whitespace \n plt.figure(figsize=(6,6))\n fig = plt.pcolormesh(time_segs, freqs, spectro_array)\n plt.axis('off')\n fig.axes.get_xaxis().set_visible(False)\n fig.axes.get_yaxis().set_visible(False)\n plt.savefig(image_fname, bbox_inches='tight', pad_inches = 0)\n plt.close()\n\n # reopen and crop with PIL\n im = Image.open(image_fname + '.png')\n width, height = im.size\n # left, top, right, bottom, from top left\n im1 = im.crop((0, 0, width - 3, height))\n im1.save(image_fname + '.png')\n im.close()\n im1.close()\n\n\n\nif __name__ == \"__main__\":\n main()\n\n","sub_path":"wav-to-png-script.py","file_name":"wav-to-png-script.py","file_ext":"py","file_size_in_byte":3504,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"281248298","text":"import numpy as np\nimport src.model as md\nimport src.train as t\nimport src.utils as ut\n\n\n# Support functions/classes\nclass HMM:\n def __init__(self, pi, t, mu, cov):\n self.pi = pi\n self.t = t\n self.mu = mu\n self.cov = cov\n\n def _simulate(self, N):\n def draw_from_multinomial(i):\n return np.where(np.random.multinomial(1, i) == 1)[0][0]\n\n states = np.zeros(N, dtype=np.int)\n states[0] = draw_from_multinomial(self.pi)\n\n for n in range(1, N):\n states[n] = draw_from_multinomial(self.t[states[n - 1], :])\n return states\n\n def generate(self, N):\n states = self._simulate(N)\n\n distributions = []\n for mu, cov in zip(self.mu, self.cov):\n distributions.append(np.random.multivariate_normal(mu, cov, N).T)\n\n data = np.zeros([N, self.mu[0].shape[0]])\n for n, s in enumerate(states):\n data[n] = distributions[s][:, n]\n\n return data, states\n\n\n# Unit tests\ndef main():\n\n k = 4\n n = 100000\n batch_size = 20\n seq_len = 100\n\n mu = np.array([[-10, -10, -10], [0, 0, 0], [5, 5, 5], [20, 0, 10]])\n cov = np.array([np.eye(3) for _ in range(k)])\n\n p = np.array([[0.97, 0.03, 0., 0.],\n [0., 0.97, 0.03, 0.],\n [0., 0.02, 0.98, 0.],\n [0.03, 0., 0., 0.97]])\n pi = np.array([1 / k for _ in range(k)])\n\n generating_hmm = HMM(pi, p, mu, cov)\n data, true_states = generating_hmm.generate(n)\n\n data_test = data[:int(n * 0.8)]\n true_test = data[:int(n * 0.8)]\n\n data_valid = data[int(n * 0.8):]\n true_valid = data[int(n * 0.8):]\n\n model = md.LatentRNN(3)\n test_loader = ut.DataLoader(data_test, batch_size, seq_len)\n valid_loader = ut.DataLoader(data_valid, batch_size, seq_len)\n\n losses, valid_losses = t.train(model, test_loader, valid_loader,\n num_epochs=10)\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"RVAE/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":1968,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"358529678","text":"import os\n\nimport gitlab\n\n\nif __name__ == '__main__':\n config_file = os.path.expanduser(\"~/.python-gitlab.cfg\")\n gl = gitlab.Gitlab.from_config('vauxoo', [config_file])\n project = gl.projects.get(\"vauxoo/bistro\")\n # print(gl.projects.list())\n # print(project)\n mrs = project.mergerequests.list(state=\"opened\")\n for mr in mrs:\n diffs = mr.diffs.list()\n # print(len(diffs))\n # print(mr)\n # print(diffs)\n # print(dir(mr))\n for change in mr.changes()['changes']:\n print(change['diff'])\n # for diff in diffs:\n # print(diff)\n # print(dir(diff))\n # print(diff.list())\n # print(diff.short_print())\n # print(diff.display(True))\n # break\n # print(mrs, len(mrs))\n","sub_path":"gitlab_api.py","file_name":"gitlab_api.py","file_ext":"py","file_size_in_byte":798,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"182799155","text":"def run_timing():\n runs = 0\n time = 0\n\n while True:\n run_time = input(\"Enter 10 km run time: \")\n if not run_time:\n break\n runs += 1\n time += float(runs)\n\n average = time / runs\n print(f\"Average: {average}, Runs: {runs}\")\n\n\nrun_timing()","sub_path":"ex03.py","file_name":"ex03.py","file_ext":"py","file_size_in_byte":289,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"67268352","text":"import os\nfrom flask import Flask\nfrom flask_migrate import Migrate\nfrom flask_restful import Api\n\nfrom app import models\nfrom config import app_configuration\nfrom app.views.assets import AssetResource\n\n\ndef create_app(environment):\n \"\"\"Initialize Flask App\"\"\"\n app = Flask(__name__, instance_relative_config=True)\n app.config.from_object(app_configuration[environment])\n models.db.init_app(app)\n \n migrate = Migrate(app,models.db)\n @app.route('/')\n def index():\n return \"Welcome to La Cisco\"\n\n api = Api(app)\n\n api.add_resource(AssetResource,\n '/api/v1/assets', '/api/v1/assets/',\n endpoint='assets')\n\n return app\n\n\napp = create_app(os.getenv(\"FLASK_CONFIG\"))\n\nif __name__ == \"__main__\":\n environment = os.getenv(\"FLASK_CONFIG\")\n app = create_app(environment)\n app.run()\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":861,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"258601093","text":"# -*- coding: UTF-8 -*-\n# File: noname.py\n# Author: Yuxin Wu \n\nfrom .base import ImageAugmentor\nimport numpy as np\nimport cv2\n\n__all__ = ['Flip', 'MapImage', 'Resize']\n\nclass Flip(ImageAugmentor):\n def __init__(self, horiz=False, vert=False, prob=0.5):\n \"\"\"\n Random flip.\n Args:\n horiz, vert: True/False\n \"\"\"\n if horiz and vert:\n raise ValueError(\"Please use two Flip, with both 0.5 prob\")\n elif horiz:\n self.code = 1\n elif vert:\n self.code = 0\n else:\n raise ValueError(\"Are you kidding?\")\n self.prob = prob\n self._init()\n\n def _augment(self, img):\n if self._rand_range() < self.prob:\n img.arr = cv2.flip(img.arr, self.code)\n if img.coords:\n raise NotImplementedError()\n\n\nclass MapImage(ImageAugmentor):\n def __init__(self, func):\n self.func = func\n\n def _augment(self, img):\n img.arr = self.func(img.arr)\n\n\nclass Resize(ImageAugmentor):\n \"\"\"Resize image to a target size\"\"\"\n def __init__(self, shape):\n \"\"\"\n Args:\n shape: (h, w)\n \"\"\"\n self._init(locals())\n\n def _augment(self, img):\n img.arr = cv2.resize(\n img.arr, self.shape[::-1],\n interpolation=cv2.INTER_CUBIC)\n","sub_path":"tensorpack/dataflow/imgaug/noname.py","file_name":"noname.py","file_ext":"py","file_size_in_byte":1359,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"407856901","text":"# Written by Eric Martin for COMP9021\n# 托雷斯祝您VAN♂事如意\n# 抄袭有风险,学到才是自己的\n\nimport sys\nfrom random import seed, randint, randrange\n\n\ntry:\n arg_for_seed, upper_bound, length =\\\n (int(x) for x in input('Enter three integers: ').split())\nexcept ValueError:\n print('Incorrect input, giving up.')\n sys.exit()\n\n\n\ndef length_of_longest_increasing_sequence(L):\n if len(L) == 0:\n return 0\n LL = L\n LL.extend(L)\n a = b = n = 0\n for i in LL:\n if i >= n:\n a += 1\n else:\n b = max(a,b)\n a = 1\n n = i\n if max(a,b) > len(L):\n return False\n else:\n return max(a,b)\n\n\n\t\n\ndef max_int_jumping_in(L):\n if len(L) == 0:\n return 0\n l = []\n for i in range(len(L)):\n l.append(i)\n\n a = b = maxx = 0 \n ll = []\n maxz = ''\n for a in range(len(L)):\n b = a\n while b not in ll:\n ll.append(b)\n maxz += str(L[b])\n b = L[b]\n if maxz != '':\n maxx = max(maxx, int(maxz))\n maxz , ll = '', []\n return maxx\n\n \n\nseed(arg_for_seed)\nL_1 = [randint(0, upper_bound) for _ in range(length)]\nprint('L_1 is:', L_1)\nprint('The length of the longest increasing sequence\\n'\n ' of members of L_1, possibly wrapping around, is:',\n length_of_longest_increasing_sequence(L_1), end = '.\\n\\n'\n )\nL_2 = [randrange(length) for _ in range(length)]\nprint('L_2 is:', L_2)\nprint('The maximum integer built from L_2 by jumping\\n'\n ' as directed by its members, from some starting member\\n'\n ' and not using any member more than once, is:',\n max_int_jumping_in(L_2)\n )\n\n\n\n","sub_path":"Quiz/simple/quiz_3的参考.py","file_name":"quiz_3的参考.py","file_ext":"py","file_size_in_byte":1713,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"69610986","text":"from django.shortcuts import render, redirect\nfrom .models import Appointment\nfrom django.contrib import messages\nimport datetime\n\ndef index(request):\n return render(request, 'appointments/index.html')\n\ndef success(request):\n if 'appointment_id' in request.session:\n appointment_id = request.session['appointment_id']\n\n appointment = Appointment.objects.get(id=appointment_id)\n\n context = {\n 'appointment': appointment\n }\n\n return render(request, 'appointments/success.html', context)\n\ndef schedule(request):\n if request.method == 'POST':\n errors = Appointment.objects.validateDate(request.POST)\n\n appointments = Appointment.objects.all()\n date_try = request.POST['date']\n\n if not errors:\n for appointment in appointments:\n if Appointment.objects.filter(date__contains=date_try):\n errors.append(\"That time is not available\")\n\n # appointment = Appointment.objects.create(\n # date = request.POST['date'],\n # time = request.POST['time']\n # )\n\n if appointment:\n\n request.session['appointment_id'] = appointment.id\n\n return redirect('/success')\n\n else:\n if errors:\n for error in errors:\n messages.error(request, error)\n\n return redirect('/')\n\n\ndef logout(request):\n if 'appointment_id' in request.session:\n request.session.pop('appointment_id')\n return redirect('/')\n","sub_path":"lillian_klasen/appointments/apps/appointments/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1583,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"62622178","text":"\"\"\"workspace URL Configuration\n\nThe `urlpatterns` list routes URLs to views. For more information please see:\n https://docs.djangoproject.com/en/2.0/topics/http/urls/\nExamples:\nFunction views\n 1. Add an import: from my_app import views\n 2. Add a URL to urlpatterns: path('', views.home, name='home')\nClass-based views\n 1. Add an import: from other_app.views import Home\n 2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')\nIncluding another URLconf\n 1. Import the include() function: from django.urls import include, path\n 2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))\n\"\"\"\nfrom django.contrib import admin\nfrom django.urls import path,re_path\nfrom tellme import views\n\nurlpatterns = [\n path('admin/', admin.site.urls),\n]\n\nurlpatterns += [\n path('', views.index),\n re_path(r'^login/$', views.login_view),\n re_path(r'^logout/$', views.logout_view),\n re_path(r'^changepass/$', views.change_pass),\n re_path(r'^search/', views.search),\n re_path(r'^delete/(?P\\w+)/(?P\\w+)/$', views.delete),\n re_path(r'^jtid/(\\d*)', views.jtid),\n re_path(r'^jtidadd/', views.jtid_add),\n re_path(r'^cid/(\\d+)/', views.cid),\n re_path(r'^cidadd/', views.cid_add),\n re_path(r'^cidedit/(\\d+)', views.cid_edit),\n re_path(r'^siteadd/(\\d+)', views.site_add),\n re_path(r'^siteedit/(\\d+\\w+)', views.site_edit),\n re_path(r'^add/(?P\\w+)/(?P\\w+)/$', views.svc_add),\n re_path(r'^edit/(?P\\w+)/(?P\\w+)/$', views.svc_edit),\n re_path(r'^tag/(?P\\w+)/(?P\\w+)/$', views.tag),\n re_path(r'^photo/(\\w+)', views.photo),\n re_path(r'^report/(\\w+)', views.report),\n re_path(r'^trashfile/(\\w+)/(\\d+)', views.trashfile),\n re_path(r'^history/', views.history),\n]\n","sub_path":"workspace/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1768,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"148630624","text":"# -*- coding: utf-8 -*-\r\n\r\nimport json\r\nfrom datetime import datetime as dt\r\n\r\nimport dash_core_components as dcc\r\nimport dash_html_components as html\r\nimport requests\r\nfrom dash.dependencies import Input, Output, State\r\n\r\nfrom app import app\r\n\r\n\r\n# List of possible Wialon error responces\r\n#\r\ndef error_decode(code):\r\n errors = {\r\n 0: 'ошибка не определена',\r\n 1: 'недействительная сессия',\r\n 2: 'неверное имя сервиса',\r\n 3: 'неверный результат',\r\n 4: 'неверный формат или значение параметров',\r\n 6: 'несуществующий id создателя либо ошибка создания пользователя',\r\n 7: 'доступ запрещён',\r\n 1002: 'пользователь с таким уникальным свойством уже существует'\r\n }\r\n return errors[code] if code in errors.keys() else errors[0]\r\n\r\n\r\n# List of access mask templates\r\n#\r\naccess_templates = {\r\n 'g_base': 0x80C600C205, # Шаблон группы \"Основной\"\r\n 'g_view': 0x4223, # Шаблон группы \"Только чтение\"\r\n 'g_full': 0xCCF7F0FFFF, # Шаблон группы \"Полный доступ\"\r\n 'r_template': 0x15100001 # Шаблон ресурса \"TN_Шаблон\" ResourceID 18964209\r\n}\r\n\r\n\r\n# List of core server elements ID's\r\n# CCD means Customers Care Department, FDA means Fuel Drain Analyser\r\n#\r\nserver_elements = {\r\n 'hosting': {\r\n 'user_ccd': 13763012,\r\n 'user_fda': 13746666\r\n },\r\n 'local': {\r\n 'user_ccd': 2603,\r\n 'user_fda': 8575\r\n },\r\n 'pro': {\r\n 'user_ccd': 2976,\r\n 'user_fda': 13189\r\n }\r\n}\r\n\r\n\r\n# Список доступных серверов (новый)\r\n#\r\nserver_list = html.Div(\r\n id='server_list',\r\n children=[\r\n '{\"type\": \"hosting\", \"name\": \"Hosting\", \"url\": \"http://hst-api.wialon.com/wialon/ajax.html\", '\r\n '\"billing\": \"Тариф Основной\"}',\r\n '{\"type\": \"local\", \"name\": \"TN-Group Local\", \"url\": \"http://local.tn-group.net/wialon/ajax.html\", '\r\n '\"billing\": \"Тариф основной\"}',\r\n '{\"type\": \"pro\", \"name\": \"TN-Group Pro\", \"url\": \"http://144.76.79.144:8026/ajax.html\", '\r\n '\"billing\": \"Тариф основной\"}',\r\n '{\"type\": \"local\", \"name\": \"TN-Group Local2\", \"url\": \"http://gps.tn-group.net/wialon/ajax.html\", '\r\n '\"billing\": \"Тариф основной\"}',\r\n '{\"type\": \"add\", \"name\": \"Добавить новый сервер\", \"url\": \"\"}'\r\n ]\r\n)\r\n\r\n\r\n# Списк доступных для работы пользователей в формате {'label': Имя, 'value': ID}\r\nuser_list = html.Div(\r\n id='user_list'\r\n)\r\n\r\n\r\n# Обновление списка доступных для работы пользователей при авторизации/деавторизации\r\n@app.callback(\r\n Output('user_list', 'children'),\r\n [Input('session', 'children')],\r\n [State('server_list', 'children')]\r\n)\r\ndef update_user_list(json_session, servers):\r\n temp_user_list = [{'label': '', 'value': ''}]\r\n session = json.loads(json_session)\r\n if session['status'] == 'authorised':\r\n temp_user_list.append({'label': session['unm'], 'value': session['uid']})\r\n server = json.loads(servers[session['server']])\r\n params = '{}'\r\n if server['type'] == 'pro':\r\n params = json.dumps({\r\n 'spec': {\r\n 'itemsType': 'user',\r\n 'propName': 'sys_name',\r\n 'propValueMask': '*',\r\n 'sortType': 'sys_name',\r\n },\r\n 'force': 0,\r\n 'flags': 33,\r\n 'from': 0,\r\n 'to': 4294967295\r\n })\r\n elif (server['type'] == 'local') | (server['type'] == 'hosting'):\r\n params = json.dumps({\r\n 'spec': {\r\n 'itemsType': 'user',\r\n 'propName': 'sys_name',\r\n 'propValueMask': '*',\r\n 'sortType': 'sys_name',\r\n 'propType': 'property',\r\n 'or_logic': 1\r\n },\r\n 'force': 0,\r\n 'flags': 1,\r\n 'from': 0,\r\n 'to': 0\r\n })\r\n r = json.loads(wialon_request(server['type'], server['url'], session['sid'], 'core/search_items', params))\r\n if 'error' not in r:\r\n for user in r['items']:\r\n if server['type'] == 'pro':\r\n temp_user_list.append({'label': user['nm'], 'value': user['gd']})\r\n elif (server['type'] == 'local') | (server['type'] == 'hosting'):\r\n temp_user_list.append({'label': user['nm'], 'value': user['id']})\r\n json_user_list = json.dumps(temp_user_list)\r\n return json_user_list\r\n\r\n\r\n# Выпадающий список с выбором пользователя\r\nsingle_user_select = html.Div(\r\n id='single_user_select_block',\r\n style={'display': 'block'},\r\n children=[\r\n 'Выбор пользователя:',\r\n dcc.Dropdown(id='single_user_select_dropdown')\r\n ]\r\n)\r\n\r\n\r\n# Генерация выпадающего списка с выбором пользователя при обновлении списка доступных пользователей\r\n@app.callback(\r\n Output('single_user_select_dropdown', 'options'),\r\n [Input('user_list', 'children')]\r\n)\r\ndef generate_single_user_select_dropdown(json_user_list):\r\n if json_user_list is not None:\r\n return json.loads(json_user_list)\r\n else:\r\n return [{'label': '', 'value': ''}]\r\n\r\n\r\n# Список доступных для работы объектов в формате 'label': Имя, 'value': ID}\r\nobject_list = html.Div(\r\n id='object_list'\r\n)\r\n\r\n\r\n# Обновление списка доступных объектов при выборе пользоватедя\r\n#\r\n@app.callback(\r\n Output('object_list', 'children'),\r\n [Input('single_user_select_dropdown', 'value')],\r\n [State('server_list', 'children'), State('session', 'children')]\r\n)\r\ndef generate_object_list(user, servers, json_session):\r\n temp_object_list = [{'label': '', 'value': ''}]\r\n if (user != '') & (user is not None):\r\n session = json.loads(json_session)\r\n server = json.loads(servers[session['server']])\r\n params = '{}'\r\n if server['type'] == 'pro':\r\n params = json.dumps({\r\n 'spec': {\r\n 'itemsType': 'avl_unit',\r\n 'propName': 'sys_user_creator',\r\n 'propValueMask': str(user),\r\n 'sortType': 'sys_name',\r\n },\r\n 'force': 0,\r\n 'flags': 1,\r\n 'from': 0,\r\n 'to': 4294967295\r\n })\r\n elif (server['type'] == 'local') | (server['type'] == 'hosting'):\r\n params = json.dumps({\r\n 'spec': {\r\n 'itemsType': 'avl_unit',\r\n 'propName': 'sys_user_creator',\r\n 'propValueMask': str(user),\r\n 'sortType': 'sys_name',\r\n 'propType': 'property',\r\n 'or_logic': 1\r\n },\r\n 'force': 0,\r\n 'flags': 1,\r\n 'from': 0,\r\n 'to': 0\r\n })\r\n r = json.loads(wialon_request(server['type'], server['url'], session['sid'], 'core/search_items', params))\r\n if 'error' not in r:\r\n for object_ in r['items']:\r\n temp_object_list.append({'label': object_['nm'], 'value': object_['id']})\r\n json_object_list = json.dumps(temp_object_list)\r\n return json_object_list\r\n\r\n\r\n# Выпадающий список с выбором объекта\r\nsingle_object_select = html.Div(\r\n id='single_object_select_block',\r\n style={'display': 'block'},\r\n children=[\r\n 'Выбор объекта:',\r\n dcc.Dropdown(id='single_object_select_dropdown')\r\n ]\r\n)\r\n\r\n\r\n# Генерация выпадающего списка с выбором объекта при обновлении списка доступных объектов\r\n@app.callback(\r\n Output('single_object_select_dropdown', 'options'),\r\n [Input('object_list', 'children')]\r\n)\r\ndef generate_single_user_select_list(json_object_list):\r\n if json_object_list is not None:\r\n return json.loads(json_object_list)\r\n else:\r\n return [{'label': '', 'value': ''}]\r\n\r\n\r\n# Список доступных для работы ресурсов в формате 'label': Имя, 'value': ID}\r\nresource_list = html.Div(\r\n id='resource_list'\r\n)\r\n\r\n\r\n# Обновление списка доступных ресурсов при выборе пользоватедя\r\n@app.callback(\r\n Output('resource_list', 'children'),\r\n [Input('single_user_select_dropdown', 'value')],\r\n [State('server_list', 'children'), State('session', 'children')]\r\n)\r\ndef generate_resource_list(user, servers, json_session):\r\n print('selected user has changed')\r\n temp_resource_list = [{'label': '', 'value': ''}]\r\n session = json.loads(json_session)\r\n if (user != '') & (user is not None):\r\n server = json.loads(servers[session['server']])\r\n params = '{}'\r\n if server['type'] == 'pro':\r\n params = json.dumps({\r\n 'spec': {\r\n 'itemsType': 'avl_resource',\r\n 'propName': 'sys_user_creator',\r\n 'propValueMask': str(user),\r\n 'sortType': 'sys_name',\r\n },\r\n 'force': 0,\r\n 'flags': 257,\r\n 'from': 0,\r\n 'to': 4294967295\r\n })\r\n elif (server['type'] == 'local') | (server['type'] == 'hosting'):\r\n params = json.dumps({\r\n 'spec': {\r\n 'itemsType': 'avl_resource',\r\n 'propName': 'sys_user_creator',\r\n 'propValueMask': str(user),\r\n 'sortType': 'sys_name',\r\n 'propType': 'property',\r\n 'or_logic': 1\r\n },\r\n 'force': 0,\r\n 'flags': 257,\r\n 'from': 0,\r\n 'to': 0\r\n })\r\n r = json.loads(wialon_request(server['type'], server['url'], session['sid'], 'core/search_items', params))\r\n if 'error' not in r:\r\n for resource in r['items']:\r\n temp_resource_list.append({'label': resource['nm'], 'value': resource['id']})\r\n json_resource_list = json.dumps(temp_resource_list)\r\n return json_resource_list\r\n\r\n\r\n# Выпадающий список с выбором ресурса\r\nsingle_resource_select = html.Div(\r\n id='single_resource_select_block',\r\n style={'display': 'block'},\r\n children=[\r\n 'Выбор ресурса:',\r\n dcc.Dropdown(id='single_resource_select_dropdown')\r\n ]\r\n)\r\n\r\n\r\n# Генерация выпадающего списка с выбором ресурса при обновлении списка доступных ресурсов\r\n@app.callback(\r\n Output('single_resource_select_dropdown', 'options'),\r\n [Input('resource_list', 'children')]\r\n)\r\ndef generate_single_user_select_list(temp_resource_list):\r\n if temp_resource_list is not None:\r\n return json.loads(temp_resource_list)\r\n else:\r\n return [{'label': '', 'value': ''}]\r\n\r\n\r\n# Список доступных параметров\r\nmessage_param_list = html.Div(\r\n id='message_param_list'\r\n)\r\n\r\n\r\n# Генерация списка доступных параметров при выборе объекта(на основании последних 10 сообщений)\r\n@app.callback(\r\n Output('message_param_list', 'children'),\r\n [Input('single_object_select_dropdown', 'value')],\r\n [State('server_list', 'children'), State('session', 'children')]\r\n)\r\ndef get_message_data_from_object(object_, servers, json_session):\r\n data_list = [{'label': '', 'value': ''}]\r\n session = json.loads(json_session)\r\n if (object_ != '') & (object_ is not None):\r\n server = json.loads(servers[session['server']])\r\n params = '{}'\r\n r = {'error': 'get_message_data_from_object: server is unknown'}\r\n if server['type'] == 'pro':\r\n params = json.dumps({\r\n 'itemId': int(object_),\r\n 'type': 3,\r\n 'ival1': 10,\r\n 'ival2': int(dt.utcnow().timestamp()),\r\n 'flags': 0,\r\n 'flagsMask': 8388608,\r\n 'loadLocations': 0,\r\n 'loadCount': 50\r\n })\r\n r = json.loads(wialon_request(\r\n server['type'], server['url'], session['sid'], 'messages/load_interval', params\r\n ))\r\n elif (server['type'] == 'local') | (server['type'] == 'hosting'):\r\n params = json.dumps({\r\n 'itemId': int(object_),\r\n 'lastTime': int(dt.utcnow().timestamp()),\r\n 'lastCount': 10,\r\n 'flags': 0,\r\n 'flagsMask': 8388608,\r\n 'loadCount': 10\r\n })\r\n r = json.loads(wialon_request(\r\n server['type'], server['url'], session['sid'], 'messages/load_last', params\r\n ))\r\n if 'error' not in r:\r\n message_params = []\r\n for message in r['messages']:\r\n for param in message['p']:\r\n if param not in message_params:\r\n message_params.append(param)\r\n for param in message_params:\r\n data_list.append({'label': param, 'value': param})\r\n return json.dumps(data_list)\r\n\r\n\r\n# Выпадающий список с выбором параметра\r\nsingle_message_param_select = html.Div(\r\n id='single_message_param_select_block',\r\n style={'display': 'block'},\r\n children=[\r\n 'Выбор параметра:',\r\n dcc.Dropdown(id='single_message_param_select_dropdown')\r\n ]\r\n)\r\n\r\n\r\n# Генерация выпадающего списка с выбором параметра при обновлении списка доступных параметров\r\n@app.callback(\r\n Output('single_message_param_select_dropdown', 'options'),\r\n [Input('message_param_list', 'children')]\r\n)\r\ndef generate_single_message_param_select_list(temp_message_param_list):\r\n if temp_message_param_list is not None:\r\n return json.loads(temp_message_param_list)\r\n else:\r\n return [{'label': '', 'value': ''}]\r\n\r\n\r\n# Универсальный запрос к WialonAPI\r\ndef wialon_request(server_type, server_url, sid, request, params):\r\n \"\"\"\r\n Sending request to Wialon server\r\n\r\n :param str server_type: [pro|local|hosting], server_list[session['server']]['type']\r\n :param str server_url: address to ajax.html, server_list[session['server']]['url']\r\n :param int sid: current session id, session['sid']\r\n :param str request: 'svc' parameter from docs.wialon.com\r\n :param str params: json parsed list of 'params' from docs.wialon.com\r\n :return str: json parsed list from request.text\r\n \"\"\"\r\n if server_url is not None:\r\n if server_type == 'pro':\r\n r = requests.post(\r\n url=server_url,\r\n params='svc=' + request +\r\n '&ssid=' + sid +\r\n '¶ms=' + params,\r\n headers={'Content-Type': 'application/x-www-form-urlencoded'}\r\n )\r\n if 'messages' in request:\r\n requests.post(\r\n url=server_url,\r\n data='svc=messages/unload¶ms={}&sid=' + sid,\r\n headers={'Content-Type': 'application/x-www-form-urlencoded'}\r\n )\r\n if 'error' in r.text:\r\n print('Request: {0}'.format(request))\r\n print('Params: {0}'.format(params))\r\n print('Response: {0}'.format(r.text))\r\n return r.text\r\n elif (server_type == 'hosting') | (server_type == 'local'):\r\n r = requests.post(\r\n url=server_url,\r\n data=('svc=' + request +\r\n '&sid=' + sid +\r\n '¶ms=' + params).encode('utf-8'),\r\n headers={'Content-Type': 'application/x-www-form-urlencoded'}\r\n )\r\n if 'messages' in request:\r\n requests.post(\r\n url=server_url,\r\n data='svc=messages/unload¶ms={}&sid=' + sid,\r\n headers={'Content-Type': 'application/x-www-form-urlencoded'}\r\n )\r\n if 'error' in r.text:\r\n print('Request: {0}'.format(request))\r\n print('Params: {0}'.format(params))\r\n print('Response: {0}'.format(r.text))\r\n return r.text\r\n else:\r\n print('{0} error: The server is not recognized'.format(request))\r\n return '{\"error\": \"The server is not recognized\"}'\r\n else:\r\n print('{0} error: The server URL is empty'.format(request))\r\n return '{\"error\": \"The server URL is empty\"}'\r\n","sub_path":"wms/global_elements.py","file_name":"global_elements.py","file_ext":"py","file_size_in_byte":17612,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"133825007","text":"\nimport unittest\nimport pandas as pd\nimport io\n\nfrom pydlennon.extensions.pandas.ext_categorical import ExtCategoricalDtype, ExtCategorical\n\n\n_testdata = \"\"\"\n,gender,ideo,partyid3,race\n19641,1.0,,1.0,1.0\n19642,2.0,,3.0,1.0\n19643,2.0,,3.0,\n19644,2.0,,3.0,1.0\n19645,2.0,,2.0,2.0\n19646,2.0,,1.0,2.0\n19647,1.0,,1.0,1.0\n19648,2.0,,1.0,1.0\n19649,1.0,,1.0,1.0\n19650,1.0,,3.0,1.0\n19651,2.0,,2.0,2.0\n19652,2.0,,1.0,1.0\n\"\"\".strip()\n\n\nclass ExtCategoricalTestCase(unittest.TestCase):\n\n def setUp(self):\n def _read_csv(**_kw):\n fp = io.StringIO( _testdata )\n kw = {\n 'index_col' : 'record_id',\n 'header' : 0,\n 'names' : [\n 'record_id',\n 'gender',\n 'ideo',\n 'partyid3',\n 'race'\n ],\n }\n\n kw.update(**_kw)\n df = pd.read_csv(fp, **kw)\n\n return df\n\n self.loader = _read_csv\n\n self.xcdtype_gender = ExtCategoricalDtype( [\n (1, 'male'),\n (2, 'female')\n ])\n\n self.xcdtype_race = ExtCategoricalDtype( [\n (1, 'white'),\n (2, 'black'),\n (3, 'other')\n ])\n\n self.xcdtype_educ = ExtCategoricalDtype([\n (1, 'grade school'),\n (2, 'high school'),\n (3, 'some college'),\n (4, 'college or advanced degree')\n ], ordered=True)\n\n\n self.gender_data_s = [\"1\",\"1\",\"2\",\"2\",\"1\",\"2\"]\n self.gender_data_i = [1,1,2,2,1,2]\n self.educ_data_i = [2,3,3,1,2,4,3,2]\n\n\n # ----\n\n @unittest.skip\n def test_loader(self):\n df = self.loader()\n raise Exception(repr(df))\n\n # ----\n\n def test_xcdtype_init(self):\n self.assertEqual(self.xcdtype_gender._ext, [(1,2),('male','female')] )\n self.assertEqual(repr(self.xcdtype_gender), \"ExtCategoricalDtype(categories=[1, 2], ordered=False)\")\n\n # ----\n\n def test_xcdtype_init_ordered(self):\n self.assertEqual(repr(self.xcdtype_educ), \"ExtCategoricalDtype(categories=[1, 2, 3, 4], ordered=True)\")\n\n # ----\n\n def test_series_from_sequence(self):\n xc = ExtCategorical._from_sequence_of_strings(self.gender_data_s, dtype=self.xcdtype_gender)\n s = pd.Series(xc)\n self.assertEqual(repr(s), u\"0 1\\n1 1\\n2 2\\n3 2\\n4 1\\n5 2\\ndtype: ext_category\")\n\n\n xc = ExtCategorical._from_sequence_of_strings(self.gender_data_i, dtype=self.xcdtype_gender)\n s = pd.Series(xc)\n self.assertEqual(repr(s), u\"0 1\\n1 1\\n2 2\\n3 2\\n4 1\\n5 2\\ndtype: ext_category\")\n\n # ----\n\n def test_create_from_series(self):\n s = pd.Series(self.gender_data_i, dtype=self.xcdtype_gender)\n self.assertEqual(repr(s), u\"0 1\\n1 1\\n2 2\\n3 2\\n4 1\\n5 2\\ndtype: ext_category\")\n\n # ----\n\n def test_ordered(self):\n sex = pd.Series(self.gender_data_i, dtype=self.xcdtype_gender)\n educ = pd.Series(self.educ_data_i, dtype=self.xcdtype_educ)\n\n with self.assertRaises(TypeError):\n sex.min()\n\n self.assertEqual(educ.min(), 1)\n self.assertEqual(educ.max(), 4)\n\n # ----\n\n def test_xcat_relevel(self):\n educ = pd.Series(self.educ_data_i, dtype=self.xcdtype_educ)\n s = educ.xcat.relevel(1)\n\n self.assertEqual(type(s.dtype), ExtCategoricalDtype)\n\n self.assertEqual(repr(s), u'0 high school\\n' \\\n '1 some college\\n' \\\n '2 some college\\n' \\\n '3 grade school\\n' \\\n '4 high school\\n' \\\n '5 college or advanced degree\\n' \\\n '6 some college\\n' \\\n '7 high school\\n' \\\n 'dtype: ext_category' )\n\n # ----\n\n def test_csv_import(self):\n kw = {\n 'dtype' : {\n 'gender' : self.xcdtype_gender\n }\n }\n df = self.loader(**kw)\n\n self.assertEqual(type(df.gender.dtype), ExtCategoricalDtype)\n\n s = df.gender.xcat.relevel(1)\n self.assertEqual(repr(s), u\"record_id\\n\" \\\n \"19641 male\\n\" \\\n \"19642 female\\n\" \\\n \"19643 female\\n\" \\\n \"19644 female\\n\" \\\n \"19645 female\\n\" \\\n \"19646 female\\n\" \\\n \"19647 male\\n\" \\\n \"19648 female\\n\" \\\n \"19649 male\\n\" \\\n \"19650 male\\n\" \\\n \"19651 female\\n\" \\\n \"19652 female\\n\" \\\n \"dtype: ext_category\")\n # ----\n\n def test_dropna(self):\n kw = {\n 'dtype' : {\n 'gender' : self.xcdtype_gender,\n 'race' : self.xcdtype_race\n },\n 'usecols' : ['record_id', 'gender', 'race']\n }\n df = self.loader(**kw)\n df_clean = df.dropna()\n dtypes = df_clean.dtypes\n\n gender_coded = df_clean.gender\n gender = gender_coded.xcat.relevel(1)\n\n race_coded = df_clean.race\n race = race_coded.xcat.relevel(1)\n\n\n self.assertTrue( (dtypes == 'ext_category').all() )\n self.assertListEqual( gender_coded.unique().to_list(), [1,2] ) \n self.assertListEqual( gender.unique().to_list(), ['male', 'female'])\n self.assertListEqual( race.unique().to_list(), ['white', 'black'])\n self.assertListEqual( race.dtype.categories.to_list(), ['white', 'black','other'])\n\n\n # ----\n\n def test_assign_to_dataframe(self):\n kw = {\n 'dtype' : {\n 'gender' : self.xcdtype_gender,\n 'race' : self.xcdtype_race\n },\n 'usecols' : ['record_id', 'gender', 'race']\n }\n df = self.loader(**kw)\n df.loc[:,'xx'] = df.race.xcat.relevel(1)\n\n self.assertEqual( \n repr(df.xx.head()),\n u\"record_id\\n\" \\\n \"19641 white\\n\" \\\n \"19642 white\\n\" \\\n \"19643 NaN\\n\" \\\n \"19644 white\\n\" \\\n \"19645 black\\n\" \\\n \"Name: xx, dtype: ext_category\"\n )\n\n\n\n # ----\n\n def test_zero_count_behavior(self):\n kw = {\n 'dtype' : {\n 'gender' : self.xcdtype_gender,\n 'race' : self.xcdtype_race\n },\n 'usecols' : ['record_id', 'gender', 'race']\n }\n df = self.loader(**kw)\n\n race = df.race.xcat.relevel(1)\n zcv = race.value_counts().reset_index().values.tolist()\n\n self.assertListEqual( zcv, [['white', 8], ['black', 3], ['other', 0]] )\n\n\n\n# -----------------------------------------------------------------------------\n\nif __name__ == '__main__':\n \"\"\"\n # Run from tests subdirectory\n $ python3 -m unittest discover -b -s ..\n\n # OR, from package root directory\n $ python3 -m unittest discover\n \"\"\"\n unittest.main()","sub_path":"pydlennon/tests/extensions/test_ext_categorical.py","file_name":"test_ext_categorical.py","file_ext":"py","file_size_in_byte":7913,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"421998903","text":"from rest_framework.pagination import PageNumberPagination\nfrom rest_framework.response import Response\nfrom collections import OrderedDict\nimport math\nfrom data.global_enums import GetPaginationConfig\n\n\n\n# *******************************************************************************\n# * *\n# * @标题 : 自定义分页\n# * @功能 : DRF提供的自定义分页\n# * @备注 : None\n# * *\n# *******************************************************************************\nclass MyPagination(PageNumberPagination):\n\n msg_list = \"ok\"\n page_size = GetPaginationConfig.PAGE_SIZE.value # 每页最多显示数量\n page_size_query_param = GetPaginationConfig.PAGE_SIZE_QUERY_PARAM.value # 可以通过传入 /api/xxx/?page=2,改变默认每页显示的个数\n max_page_size = GetPaginationConfig.MAX_PAGE_SIZE.value # 最大页数不超过500\n page_query_param = GetPaginationConfig.PAGE_QUERY_PARAM.value # 可以通过传入 /api/xxx/?page=2, 获取指定页数据\n\n def get_total_pages(self):\n \"\"\"总页数\"\"\"\n return math.ceil(self.page.paginator.count / self.page_size) # 向上取整\n\n def get_paginated_response(self, data):\n return Response(OrderedDict([\n # ('success', True), # 成功标志\n ('msg', self.msg_list), # 消息\n ('count', self.page.paginator.count), # 数量\n ('size', self.page_size), # 每页大小\n ('totalpages', self.get_total_pages()), # 总页数\n # ('next', self.get_next_link()), # 下一页地址\n # ('previous', self.get_previous_link()), # 上一页地址\n ('results', data) # 返回数据\n ]))\n\n","sub_path":"utils/common/paginations/pagination.py","file_name":"pagination.py","file_ext":"py","file_size_in_byte":1825,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"483752625","text":"# exercise 2.1.1\nimport numpy as np\nimport csv\nfrom scipy.linalg import svd\nimport matplotlib.pyplot as plt\n\ncount = 214\n\nclasses = [\"Building float\", \"Building\", \"Vehicle float\", \"Vehicle\", \"Container\", \"Tableware\", \"Headlamp\"]\n\n# Preallocate memory, then extract excel data to matrix X\nX = np.mat(np.empty((count, 11)))\n\n# Load xls sheet with data\nwith open('glass_data.csv') as csvfile:\n\treader = csv.reader(csvfile)\n\n\t# Populate X\n\tfor i, row in enumerate(reader):\n\t\tX[i, :] = np.array(row)\n\n\ntable = X[:,1:-1]\nXmean = table - np.ones((table.shape[0],1))*table.mean(0)\n\ndata = Xmean / np.std(table, axis=0)\n\nU,S,V = svd(Xmean,full_matrices=False)\nU2,S2,V2 = svd(data, full_matrices=False)\n# Project the centered data onto principal component space\nZ = Xmean * (V.T)\nZ2 = data *(V2.T)\n\n# Indices of the principal components to be plotted\ni = 0\nj = 1\n\nC = len(set(np.array(X[:,-1]).T[0]))\n\nrho = (S*S) / (S*S).sum() \nrho2 = (S2*S2) / (S2*S2).sum() \n\nrhoakk = np.zeros((rho.shape[0],1))\nrhoakk2 = np.zeros((rho.shape[0],1))\n\nsum = 0\nfor i,v in enumerate(rho):\n\tsum += v\n\trhoakk[i] = sum\n\nsum = 0\nfor i,v in enumerate(rho2):\n\tsum += v\n\trhoakk2[i] = sum\n\n# print(rhoakk[1].sum())\n# exit(0)\nplt.figure(figsize=(7,3))\nplt.subplots_adjust(left=0.1, bottom=0.2, right=0.95, top=0.9)\nplt.rc('axes', axisbelow=True)\nplt.grid('on')\nplt.plot(range(1,rhoakk2.shape[0]+1), rhoakk2, 'o', color='green')\nplt.plot(range(1,rhoakk.shape[0]+1), rhoakk, 'o', color='blue')\n\nplt.axis((0.5,9.5,0,1.1))\nplt.xlabel(\"Number of principal components\")\nplt.ylabel(\"Variance explained\")\nplt.legend([\"std normed\", \"Not std normed\"], loc=4)\n\n# Output result to screen\nplt.savefig(\"figures/RhoAkk.eps\")\n\nplt.show()\n\n\n\n\n","sub_path":"Python/MachineLearning/Project1/RhoPlotBoth.py","file_name":"RhoPlotBoth.py","file_ext":"py","file_size_in_byte":1689,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"482268250","text":"\"\"\"\nCopyright (c) 2020 COTOBA DESIGN, Inc.\n\nPermission is hereby granted, free of charge, to any person obtaining a copy of this software and associated\ndocumentation files (the \"Software\"), to deal in the Software without restriction, including without limitation\nthe rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software,\nand to permit persons to whom the Software is furnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO\nTHE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,\nTORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.\n\"\"\"\nimport unittest\n\nfrom programy.bot import Bot\nfrom programy.config.bot.bot import BotConfiguration\nfrom programy.spelling.extension import SpellingExtension\nfrom programy.spelling.norvig import NorvigSpellingChecker\n\nfrom programytest.client import TestClient\n\n\nclass NorvigSpellingExtensionTests(unittest.TestCase):\n\n def setUp(self):\n self._client = TestClient()\n\n config = BotConfiguration()\n\n self.client_context = self._client.create_client_context(\"testuser\")\n\n self.client_context._bot = Bot(config=config, client=self._client)\n self.client_context._bot._spell_checker = NorvigSpellingChecker()\n self.client_context._bot._spell_checker.add_corpus(\"THIS IS HAVE WORDS SPELLED CORRECTLY\")\n\n def test_invalid_command(self):\n\n extension = SpellingExtension()\n self.assertIsNotNone(extension)\n\n result = extension.execute(self.client_context, \"XXX\")\n self.assertIsNotNone(result)\n self.assertEqual(\"SPELLING CORRECT INVALID COMMAND\", result)\n\n result = extension.execute(self.client_context, \"SPELLING\")\n self.assertIsNotNone(result)\n self.assertEqual(\"SPELLING CORRECT INVALID COMMAND\", result)\n\n result = extension.execute(self.client_context, \"SPELLING CORRECT\")\n self.assertIsNotNone(result)\n self.assertEqual(\"SPELLING CORRECT INVALID COMMAND\", result)\n\n def test_valid_scores_command(self):\n\n extension = SpellingExtension()\n self.assertIsNotNone(extension)\n\n result = extension.execute(self.client_context, \"SPELLING ENABLED\")\n self.assertIsNotNone(result)\n self.assertEqual(\"SPELLING ENABLED\", result)\n\n result = extension.execute(self.client_context, \"SPELLING CORRECT HAVVE\")\n self.assertIsNotNone(result)\n self.assertEqual(\"SPELLING CORRECTED HAVE\", result)\n","sub_path":"dialogue-engine/test/programytest/spelling/test_extension_norvig.py","file_name":"test_extension_norvig.py","file_ext":"py","file_size_in_byte":2926,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"135641605","text":"import json\nimport csv\nfrom itertools import zip_longest\n\nfoldername1to9='mintcand0'\nfoldername10to13='mintcand'\nanswer_type=[]\nSession=[]\nuser_statement=[]\nchatbot_ans=[]\nuName=[]\nuID=[]\nqueryTime=[]\nskillsfound=[]\nfname=[]\nfilename = \"chatbotlogs.log\"\nfor x in range(1,14):\n if x<10:\n infile=foldername1to9+str(x)+'/'+filename\n elif (x>=10 and x<14 ):\n infile = foldername10to13 + str(x) + '/' + filename\n with open(infile) as logs_Data:\n logs_Data = logs_Data.readlines()\n for i in logs_Data:\n try:\n if (eval(i.split(\"- INFO -\")[1]))['AnswerType']:\n answer_type.append((eval(i.split(\"- INFO -\")[1]))['AnswerType'])\n Session.append((eval(i.split(\"- INFO -\")[1]))['userSession'])\n user_statement.append((eval(i.split(\"- INFO -\")[1]))['statement'])\n chatbot_ans.append((eval(i.split(\"- INFO -\")[1]))['response'][0])\n uName.append((eval(i.split(\"- INFO -\")[1]))['uName'])\n queryTime.append((eval(i.split(\"- INFO -\")[1]))['queryTime'])\n skillsfound.append((eval(i.split(\"- INFO -\")[1]))['skills found'])\n fname.append((eval(i.split(\"- INFO -\")[1]))['uFname'])\n except:\n pass\n\nfinal_list_toCSV = [queryTime,uName,Session,answer_type, user_statement,chatbot_ans,skillsfound,fname]\nexport_data = zip_longest(*final_list_toCSV, fillvalue = '')\nwith open('final_chatbotwithSession_21_08_2018.csv', 'w', encoding='utf-8', newline='') as myfile:\n wr = csv.writer(myfile)\n wr.writerow((\"queryTime\",\"uName\",\"Session\",\"answer_type\",\"user_statement\",\"chatbot_ans\",\"skillsfound\",\"First Name\"))\n wr.writerows(export_data)\nmyfile.close()\n\n","sub_path":"data_clean_lgs.py","file_name":"data_clean_lgs.py","file_ext":"py","file_size_in_byte":1780,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"53926544","text":"#!/usr/bin/env python3\n\nimport os\nimport pytest\nimport unittest\nfrom inventory import *\nfrom pathlib import Path\n\nDATA_PATH = Path.cwd().with_name('data')\n\nTEST_DATA = [('Elisa Miles', 'LR04', 'Leather Sofa', 25.00),\n ('Susan Wong', 'FB31', 'Home Haircut Machine', 21.00),\n ('Edward Data', 'KT78', 'Kitchen Table', 10.00),\n ('Susan Wong', 'BT31', 'Flat-Screen TV', 200.00),\n ('Alex Gonzales', 'BR02', 'Queen Mattress', 17.00),\n ('Susan Wong', 'KT15', '5-Burner Stove', 175.00)]\n\n\nclass PerformanceTesting(unittest.TestCase):\n\n def test_add_furniture(self):\n \"\"\"\n test that inventory csv does not exist and then tests\n that it does exist after running adding records.\n :return: tuple\n \"\"\"\n self.assertFalse(False, Path.exists(DATA_PATH / 'test_items.csv'))\n add_furniture('test_items.csv', 'Elisa Miles', 'LR04', 'Leather Sofa', 25.00)\n add_furniture('test_items.csv', 'Edward Data', 'KT78', 'Kitchen Table', 10.00)\n add_furniture('test_items.csv', 'Alex Gonzales', 'BR02', 'Queen Mattress', 17.00)\n add_furniture('test_items.csv', 'Susan Wong', 'FB31', 'Home Haircut Machine', 21.00)\n add_furniture('test_items.csv', 'Susan Wong', 'BT31', 'Flat-Screen TV', 200.00)\n add_furniture('test_items.csv', 'Susan Wong', 'KT15', '5-Burner Stove', 175.00)\n self.assertTrue(True, Path.exists(DATA_PATH / 'test_items.csv'))\n os.remove(DATA_PATH / 'test_items.csv')\n\n def test_correct_number_of_records_added(self):\n \"\"\"\n tests that the correct number of records are\n added to the inventory csv\n :return: tuple\n \"\"\"\n add_furniture('test_items.csv', 'Elisa Miles', 'LR04', 'Leather Sofa', 25.00)\n add_furniture('test_items.csv', 'Edward Data', 'KT78', 'Kitchen Table', 10.00)\n add_furniture('test_items.csv', 'Alex Gonzales', 'BR02', 'Queen Mattress', 17.00)\n add_furniture('test_items.csv', 'Susan Wong', 'FB31', 'Home Haircut Machine', 21.00)\n add_furniture('test_items.csv', 'Susan Wong', 'BT31', 'Flat-Screen TV', 200.00)\n add_furniture('test_items.csv', 'Susan Wong', 'KT15', '5-Burner Stove', 175.00)\n test = pd.read_csv(DATA_PATH / 'test_items.csv')\n len_csv = int(test.iloc[:, 0].count())\n self.assertEqual(6, len_csv)\n\n def test_invoice(self):\n \"\"\"\n Tests creation of new rented_items csv including only records with the\n name entered as an argument utilizing currying.\n :return: csv file\n \"\"\"\n create_invoice = single_customer('Susan Wong', 'rented_items.csv')\n create_invoice('test_items.csv')\n test = pd.read_csv(DATA_PATH / 'rented_items.csv')\n test = test[test['customer_name'] == 'Susan Wong']\n len_csv = int(test.iloc[:, 0].count())\n self.assertTrue(True, Path.exists(DATA_PATH / 'rented_items.csv'))\n self.assertEqual(3, len_csv)\n os.remove(DATA_PATH / 'test_items.csv')\n os.remove(DATA_PATH / 'rented_items.csv')\n\n\nif __name__ == '__main__':\n unittest.main()\n","sub_path":"students/jeff_shabani/lesson08/assignment/tests/test_inventory.py","file_name":"test_inventory.py","file_ext":"py","file_size_in_byte":3129,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"452331665","text":"import cv2\nimport numpy as np\nimport matplotlib.pyplot as plt\n\ndef mediaMovel(vetor):\n vet = np.copy(vetor)\n somatorio = 0\n diferenca = 0\n amostra = 0\n thresholdDaDiferenca = 2\n for i in range(len(vet)):\n if len(vet) <2: return vet[0]\n if i == 0:\n diferenca = vet[i] - vet[i+1]\n if diferenca > thresholdDaDiferenca:\n vet[i] = (vet[i+1] + vet[i+2])/2\n elif (0 < i) and (i < (len(vet)-1)):\n diferenca = vet[i] - vet[i-1]\n if diferenca > thresholdDaDiferenca:\n vet[i] = (vet[i-1] + vet[i+1])/2\n elif i == (len(vet) - 1):\n diferenca = vet[i] - vet[i-1]\n if diferenca > thresholdDaDiferenca:\n vet[i] = (vet[i-2] + vet[i-1])/2\n somatorio = somatorio + vet[i]\n return somatorio/len(vet)\n\ndef filtro(vetor, pos, amostras):\n vetorFiltrado = 0.\n if pos < amostras:\n vetorFiltrado = mediaMovel(vetor[0:pos+1])\n else:\n vetorFiltrado = mediaMovel(vetor[pos-amostras+1:pos+1])\n return vetorFiltrado\n\ndef tratamento(img):\n k1 = np.ones((3,3),dtype=np.uint8)/9\n k2 = np.ones((4,4),dtype=np.uint8)\n k3 = np.ones((5,5),dtype=np.uint8)\n imgOrig = img\n img = cv2.filter2D(img, -1, k1)\n img = cv2.morphologyEx(img, cv2.MORPH_OPEN, k2)\n img = cv2.threshold(img, 100, 255, cv2.THRESH_BINARY)[1]\n img = cv2.dilate(img, k3, iterations = 3)\n img = cv2.bitwise_and(imgOrig, img)\n img = cv2.dilate(img, k3)\n return img\n\ndef criaBoundingBox(img):\n THRESHOLD_DA_AREA = 500\n contornos = \\\n cv2.findContours(tratado, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)[0]\n for contorno in contornos:\n (x, y, w, h) = cv2.boundingRect(contorno)\n if cv2.contourArea(contorno) > THRESHOLD_DA_AREA:\n cv2.rectangle(img, (x, y), (x+w, y+h), (0, 0, 255), 2)\n return img\n \n \nN_AMOSTRAS_MM = 10 #numero de amostras da media movel\nTHRESHOLD = 2 #limiar do movimento para salvar os frames\nvideo = cv2.VideoCapture(\"motion_detection_test/0HDJPJO1.mp4\")\nlargura = video.get(cv2.CAP_PROP_FRAME_WIDTH)\naltura = video.get(cv2.CAP_PROP_FRAME_HEIGHT)\npixeisVert = int(altura * 0.9)\n\nnumeroDeFrames = int(video.get(cv2.CAP_PROP_FRAME_COUNT))\n\nmediaDaDif = np.zeros(numeroDeFrames)\nDifFiltrada = np.zeros(numeroDeFrames)\n\nbackGND = cv2.bgsegm.createBackgroundSubtractorMOG()\n\nfor i in range(numeroDeFrames):\n frame = video.read()[1][0:pixeisVert]\n mog = backGND.apply(frame)\n tratado = tratamento(mog)\n mediaDaDif[i] = np.log10(np.mean(tratado) + 1)\n DifFiltrada[i] = filtro(mediaDaDif, i, N_AMOSTRAS_MM)\n criaBoundingBox(frame)\n cv2.imshow('VIDEO', frame)\n if cv2.waitKey(1) & 0xFF == ord('p'): \n while True:\n if cv2.waitKey(1) & 0xFF == ord('d'): \n break\n if cv2.waitKey(1) & 0xFF == ord('q'): break\n # if i > (N_AMOSTRAS_MM - 1):\n # media[i - (N_AMOSTRAS_MM - 1)] = np.mean(mediaMovel)\n # if media[i - (N_AMOSTRAS_MM - 1)] > THRESHOLD:\n # print(str(i))\n # cv2.imwrite('frames/'+str(i)+'.jpg', frameNovo)\n\nvideo.release()\ncv2.destroyAllWindows()\nplt.ylim(0, 1)\nplt.plot(DifFiltrada)","sub_path":"motion_detection/motionDetect5.py","file_name":"motionDetect5.py","file_ext":"py","file_size_in_byte":3210,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"236405388","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Aug 23 14:26:41 2018\n\n@author: yoelr\n\"\"\"\nfrom ... import Stream\nfrom ..._unit import metaUnit\n\n__all__ = ('mixer', 'run_mixer')\n\nclass mixer(metaUnit):\n \"\"\"Create a mixer Unit class which behaves like a mixer regarding mass and energy balances.\"\"\"\n def __new__(mcl, name, bases, dct):\n if '_run' in dct:\n raise TypeError(f\"cannot use {mcl.__name__} metaclass with an implemented '_run' method\")\n dct['_run'] = run_mixer\n dct['_N_ins'] = 2\n dct['_N_outs'] = 1\n return super().__new__(mcl, name, bases, dct)\n\nrun_mixer = lambda self: Stream.sum(self.outs[0], self.ins)\n \n \n","sub_path":"build/lib/biosteam/units/metaclasses/_mixer.py","file_name":"_mixer.py","file_ext":"py","file_size_in_byte":672,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"13"} +{"seq_id":"298977068","text":"\"\"\"\n\nMilestone Project 2 - Blackjack Game\n\nIn this milestone project you will be creating a Complete BlackJack Card Game in Python.\n\nHere are the requirements:\nYou need to create a simple text-based BlackJack game\nThe game needs to have one player versus an automated dealer.\nThe player can stand or hit.\nThe player must be able to pick their betting amount.\nYou need to keep track of the player's total money.\nYou need to alert the player of wins, losses, or busts, etc...\n\nAnd most importantly:\nYou must use OOP and classes in some portion of your game.\nYou can not just use functions in your game.\nUse classes to help you define the Deck and the Player's hand. There are many right ways to do this, so explore it well!\nFeel free to expand this game. Try including multiple players.\nTry adding in Double-Down and card splits! Remember to you are free to use any resources you want and as always:\n\nDeck: Consists of 52 cards\n Ace can be counted as 1 or 11 as the player chooses\n King/Queen/Jack are each worth 10 points\n Rest are numbered cards from 2 to 9 and are worth the same points\n\n\nActions:\nHit: Receive a card from deck\nStay: Do not take any more cards from deck\n\n\"\"\"\n\n# Import Statements\n\nimport random\n\n# Global variables\n\nsuits = ('Hearts', 'Clubs', 'Spades', 'Diamonds')\nranks = ('Two', 'Three', 'Four', 'Five', 'Six', 'Seven', 'Eight', 'Nine', 'Ten', 'Jack', 'Queen', 'King', 'Ace')\nvalues = {'Two': 2, 'Three': 3, 'Four': 4, 'Five': 5, 'Six': 6, 'Seven': 7, 'Eight': 8, 'Nine': 9, 'Ten': 10,\n 'Jack': 10, 'Queen': 10, 'King': 10, 'Ace': 11}\nplaying = True\n\n# Class Descriptions\n\n# Each card object will have a suit and a corresponding rank\n\n\nclass Card:\n\n def __init__(self, suit, rank):\n self.suit = suit\n self.rank = rank\n\n # This will return a car d as \"Two of Hearts\" i.e., Rank of Suit\n def __str__(self):\n return self.rank + ' of ' + self.suit\n\n# A deck object will have 52 card objects (4 suits times 13 ranks)\n\n\nclass Deck:\n # Create 52 card objects by looping through 4 suits and 13 ranks\n def __init__(self):\n self.deck = []\n for suit in suits:\n for rank in ranks:\n self.deck.append((Card(suit, rank)))\n\n # This magic method will return list of all card in a deck.\n def __str__(self):\n deck_composition = ''\n for card in self.deck:\n deck_composition += card.__str__() + '\\n'\n return deck_composition\n\n # This will shuffle cards in a deck to be in a random order\n def shuffle(self):\n random.shuffle(self.deck)\n\n # This will deal a single card form the deck to the player or the dealer\n def deal(self):\n single_card = self.deck.pop()\n return single_card\n\n\n# This class is used to maintain balance details of a player\nclass Player:\n\n def __init__(self, player, balance):\n self.player = player\n self.balance = balance\n\n def deposit(self, deposit_amount):\n self.balance += deposit_amount\n print(f'{deposit_amount} has been deposited and the new balance is {self.balance}')\n\n def withdraw(self, bet_amount):\n if self.balance == 0:\n print('Funds Unavailable')\n elif self.balance < bet_amount:\n print('Bet amount greater than existing account balance')\n else:\n self.balance -= bet_amount\n print(f'{bet_amount} has been withdrawn to bet and the new balance is {self.balance}')\n\n # This is a dunder/magic method which will return a string\n # This is called by default when print or str of an object is called\n def __str__(self):\n return f'Player {self.player} has balance of {self.balance}'\n\n # This is a dunder/magic method which will delete an instance and perform actions mentioned\n def __del__(self):\n print('Player account has been deleted')\n\n# This class is used to maintain balance details of a player's chips\n\n\nclass Chips:\n\n def __init__(self, total=100):\n self.total = total\n self.bet = 0\n\n def add_winning(self):\n self.total += self.bet\n print(f'{self.bet} has been added and the new total is {self.total}')\n\n def lose_bet(self):\n self.total -= self.bet\n print(f'{self.bet} has been deducted and the new total is {self.total}')\n\n\nclass Hand:\n def __init__(self):\n self.cards = []\n self.value = 0\n self.aces = 0\n\n def add_card(self, card):\n self.cards.append(card)\n self.value += values[card.rank]\n if card.rank == 'Ace':\n self.aces += 1\n\n def adjust_ace_value(self):\n if self.value > 21 and self.aces:\n self.value -= 10\n self.aces -= 1\n\n\n'''\n1. Ask player if he wants to start first\n2. If not start with dealer\n3. Deal 2 cards from the deck to the player and the dealer\n4. Display both cards of the player and only one card of the dealer and keep the other card hidden\n5. If so, start with adding a balance amount and the bet he will place\n6. Keep asking if the player or dealer, who ever goes first, wants to hit or stay for every turn\n7. If a stay is ordered and the value of all the cards in hand is less than 21, move to the opposite player/dealer\n8. Keep checking if the value in hand for the player or the dealer is a bust\n'''\n\n# Functions\n\n\n# Display the starting deck\ndef display_start(player, dealer):\n print('Player Hand: ', *player.cards, sep='\\n')\n print('\\nDealer Hand: ')\n print('